diff --git "a/dask__dask-6801/docstore.json" "b/dask__dask-6801/docstore.json" new file mode 100644--- /dev/null +++ "b/dask__dask-6801/docstore.json" @@ -0,0 +1 @@ +{"docstore/metadata": {"/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/conftest.py_pytest_": {"doc_hash": "d6ee0502ada0f7929069cd62f1a9f6a03750a14455d587624ce8c18451d7bc7b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/__init__.py__": {"doc_hash": "d19ac4f3b8c8b37e9701d2f022cf04a37c29f62e1f34c584daa8e8bccd78d572"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py__This_file_helps_to_comp_register_vcs_handler.return.decorate": {"doc_hash": "0b24b40892d431110da35ba7b6a6006a11adeadffd523811e0a8c14eca48c264"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_run_command_run_command.return.stdout": {"doc_hash": "9bb6406f4fb972751e5c6364db03cde017bbfe9756c5a54f3f9fc07652056c9c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_versions_from_parentdir_versions_from_parentdir.return._": {"doc_hash": "7b907cfcb72231e1177b03b38d56920da01861d319db6838294b786846021aed"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_git_get_keywords_git_get_keywords.return.keywords": {"doc_hash": "21829cd66b2953bba4fd1c01617450ce13b49454cdd63be9a77154cd0da5c308"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_git_versions_from_keywords_git_versions_from_keywords.return._": {"doc_hash": "6a3a6dd7180b831e2e34ed360173218eb258928dec069e879ea4e6303af34a52"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_git_pieces_from_vcs_git_pieces_from_vcs.return.pieces": {"doc_hash": "2cf80e73e4efcd2ec0a3674445e82ecdc474d1e0188bee4561f214b559e5d14d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_plus_or_dot_render_pep440.return.rendered": {"doc_hash": "e1361e9e7f31292c8fad2b4c78dd5d7ddab63fe3edf6b2d21efd350057f9605b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_render_pep440_pre_render_pep440_post.return.rendered": {"doc_hash": "f736ff4dfea0243dfac2ff609c0191dbec7db316734bbc4bdf6b9efb3a5a7b2e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_render_pep440_old_render_pep440_old.return.rendered": {"doc_hash": "af126064d117fdaf3a0c47991d6e68ef56304322e9b41c39523dd2abe0d2e44c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_render_git_describe_render_git_describe.return.rendered": {"doc_hash": "4f8656d406540ec09d958a32cc7ecc81f7f5de84721313016b0e228499a1f49c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_render_git_describe_long_render_git_describe_long.return.rendered": {"doc_hash": "9d44c78548b4e9d91f54839e47594fa0bdc73dc7197d4bf6e7085ba0ddf78294"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_render_render.return._": {"doc_hash": "804fc381aff372ee4cc53a4951345e67223e3259693c49bd34fef69e048787ee"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_get_versions_": {"doc_hash": "81d079ebbe56db63a6cc14fca7ecf6573363f7b663d1906af58a0aa7cfa01a3d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/__init__.py_try__": {"doc_hash": "87d1ff52236bbd113599aede77bd7d5896e3ae0168e1019b1652d6333d53b862"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/backends.py_tensordot_lookup_register_cupy._cupy_einsum.return.cupy_einsum_args_kwar": {"doc_hash": "4e0e84effbeff9d8a803e0d66e1c1019720b346b724adfa1719ea584a36f9d10"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/backends.py_register_cupyx_register_cupyx.concatenate_lookup_regist": {"doc_hash": "848c8c243b12829bc9eb718bbcc0af8548fb2b6a49d3c85998d8f72dcfb26e14"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/backends.py_register_sparse_": {"doc_hash": "6d0d1021359a64faea6becef5e91bc42771ddc3b005a88eac27c18f848bdd02e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/blockwise.py_numbers_blockwise._Tensor_operation_Gene": {"doc_hash": "72dbe95669d696820c2e67b7a5aa7038c2037bde76380d5f4510e4cd62acb593"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/blockwise.py_blockwise.out_blockwise.chunks._chunkss_i_for_i_in_out_": {"doc_hash": "ca7d3b80eed21ffdee81dca179beb48e27a745694aef6b1619f37928fd19f289"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/blockwise.py_blockwise.if_adjust_chunks__": {"doc_hash": "3e7f5412e76730e73f960daef7fecf17bd037b5ee51a0eb21971f3199ddf6fdb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py__A_set_of_NumPy_functi_keepdims_wrapper.return.keepdims_wrapped_callable": {"doc_hash": "267aa7db9d15e673d9e2c81ca025ebfc778c2e678fb24d3eba9134fd39cf25bb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py__Wrap_NumPy_functions_to_None_2.nanstd.np_nanstd": {"doc_hash": "6dfd7a13783be8c5c1ecdf5e49ca4980a3836f11e9df714c8d234f002240458e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py_coarsen_coarsen.return.reduction_x_reshape_newsh": {"doc_hash": "5094630765e2c1c4291d28ce90e73f6243d02cf692c2853e11fdcbaebbf01d9d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py_trim_trim.return.x_tuple_slice_ax_ax_if_": {"doc_hash": "84535f708ca9434cbf0cd87b3b498e25a7e4936fc2946d0ffd5d81dc937f217b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py_topk_topk.return.a_tuple_k_slice_if_i_a": {"doc_hash": "71afaa6839afc79a5051bd7f081c355d321a0ac0c5ead5c400435787117752a6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py_topk_aggregate_argtopk_preprocess.return.a_idx": {"doc_hash": "aa30a20d30cc3a044a3dc089547b61afd7c21887439e31b18138b2496da488b4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py_argtopk_argtopk.return.take_along_axis_a_idx2_": {"doc_hash": "2e85665ea9e958cbe742a6e19972dfbc8e9d5da1f52e9a47f5120160d2fe7175"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py_argtopk_aggregate_argtopk_aggregate.return.idx_": {"doc_hash": "93e6a6c59a7387d8ff5d5d7bc26d141bb399ac966055e5f73699d8e9e8e3ec64"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py_arange_view.if_order_C_.else_.return.x_T_view_dtype_T": {"doc_hash": "bb5a26e78b0a71951c5d95e963ba607652aba660cad0a2a0788376ef7165bdf4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py_slice_with_int_dask_array_slice_with_int_dask_array.return.x_tuple_idx_if_i_axis_": {"doc_hash": "4c218837556b0de0b99a55b119babfe7f3329f8c9ac33af60950512e020e9659"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py_slice_with_int_dask_array_aggregate_": {"doc_hash": "2d9eaea891f0e914f638686c930ef9d119b008bb2388dba7a17d0335fdb3a32d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk_types.py_try__": {"doc_hash": "a4488a5028afa5d0cf74bcdf8ee9da29207b9998b4a12ba9ad696c14bbe9c92b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_math_PerformanceWarning._A_warning_given_when_": {"doc_hash": "d71359114b7e377b5eb8eca8b32cad3460f4726a52bd0cd544551f94d31ce79f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_getter_getter_nofancy.return.getter_a_b_asarray_asar": {"doc_hash": "a79162ebc4c737ea85c7332b316c96120a03c88c63031c040fb4c35bc607db9a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_getter_inline_getter_inline.return.getter_a_b_asarray_asar": {"doc_hash": "8ceeea910a037542256084112c909b8eac9f9c87503d18f63ab7657a5f75efda"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_optimize_implements.return.decorator": {"doc_hash": "a7d579c93a125650c65288cec34b8bd5c8bc2e8c44c82908fb67844a78032591"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_slices_from_chunks_slices_from_chunks.return.list_product_slices_": {"doc_hash": "18afbf357bc75cd9f280170142b2655557936c8f27692ec888b638434ac21fc0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_getem_getem.return.dict_zip_keys_values_": {"doc_hash": "1a69153fef4d2bda2cd8a633dd2c4260c2302724fd0cdcef98f396d1725f1b15"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_dotmany_dotmany.return.sum_map_partial_np_dot_": {"doc_hash": "1b11393f1fc976c7fabff4fa5fb3e956cbb496694fd4b6e09a15cc4655c84b4c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py__concatenate2__concatenate2.return.concatenate_arrays_axis_": {"doc_hash": "fdabd448b1c5f4d68dc95702aa47eacba53d01fca36874a5f3dd81592c38b33c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_apply_infer_dtype_apply_infer_dtype.return.o_dtype_if_nout_is_None_e": {"doc_hash": "38abaabdd2b9afb74263e66750fc1b9fe05c0439d980257f1eef09ace9efea7f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_map_blocks.if_extra_argpairs__map_blocks.return.out": {"doc_hash": "4fa4969bc26460a93b279b7b708a9824ab5a60be97ca25b79769fd81f1406e74"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_broadcast_chunks_broadcast_chunks.return.tuple_result_": {"doc_hash": "9b48897b89882baf297c720626a5398d74caa4f6fa35a00ef6efbe9bc9e38c4c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_store_store.sources_dsk_1.Array___dask_optimize___": {"doc_hash": "8840dc2c8962d4a1a17a870da02dc46892afa41cf75fe11d5b1a531ce0e25225"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_store.sources2_store.if_return_stored_.else_.if_compute_.else_.return.result": {"doc_hash": "0e9db3c404210b1d37adc7a9996277d8d9a9880fe0d3ba431b8c64882902ec45"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_blockdims_from_blockshape_blockdims_from_blockshape.return.tuple_": {"doc_hash": "46adcc3b2b73f7050e0e2836fb3ea893c13aac0d4b13bac0ba9d254951ff32ab"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_finalize_CHUNKS_NONE_ERROR_MESSAGE._": {"doc_hash": "8a0d5a38efce4ebd53929fe422709e05dde56c8b0fcb4ad8556f5c2da4209f3a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array_Array.__slots__._dask__name__cached": {"doc_hash": "ed4dde42bc0ec66ea6f8df18576baae85377e2f941f8fc6581a1e9345fd59bb6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__new___Array.__new__.return.self": {"doc_hash": "d1ce788626ad90efd97de74f1afeaf38bca63c6c86bde061732ef70740b088e0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__reduce___Array.__dask_keys__.return.result": {"doc_hash": "69e932fd86068971612192f0bf0871d8dd6158cacdb41ba9b46a1f3092e24335"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__dask_tokenize___Array.npartitions.return.reduce_mul_self_numblock": {"doc_hash": "587e2b78649cb33d86568b24a6cd0e0bec7630aae1d5b73d2ec900b243a3878f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.compute_chunk_sizes_Array.compute_chunk_sizes.return.x": {"doc_hash": "fdb4e30ee33a86f623ea004a78ebc3d1adc99d3f8491befd406bcb724b3d6506"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.shape_Array.__len__.return.sum_self_chunks_0_": {"doc_hash": "82982dc1c565367a42f9d59baed832a7399c68b479b65e0275ac3950d9a99dfa"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__array_ufunc___Array.__array_ufunc__.if_method___call___.else_.return.NotImplemented": {"doc_hash": "697e4c23ea68e9f86712dffd00af0fe3bd09463eb9af871dca0fc4618de5dc3c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__repr___Array._repr_html_.return._n_join_both_": {"doc_hash": "e350456709a92ec5a082fa7fa71afa1185d4895d251ce53b558c787dff0d89f5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array._repr_html_table_Array._repr_html_table.return._n_join_table_": {"doc_hash": "e3a2d21ac38a90bd70e3f4ac92588736a1b0e5a437a42c371ad300f3e8b332e5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.ndim_Array.__array__.return.x": {"doc_hash": "c21cf7509e7c8ae5c5504bf2dd9e0f1110ca7eb0480cec53c69adae96e99993b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__array_function___Array.__array_function__.handle_nonmatching_names.return._HANDLED_FUNCTIONS_func_": {"doc_hash": "cbfdf4355d89bc71bccf8979bb7530fcbd73c8f977f28383dcbf464813b4fab7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__array_function__._First_verify_that_all__Array.__array_function__.return.da_func_args_kwargs_": {"doc_hash": "2c8c36f0219d2b2ff0698a3a539ee8f296e43506ba3d6ab12f02dfc9a9c0e3e1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array._elemwise_Array.to_svg.return.svg_self_chunks_size_siz": {"doc_hash": "5f9047fc96a4fd7a4173d4030a04e0cae87158752eac73bc29663f770130287f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.to_hdf5_Array.to_hdf5.return.to_hdf5_filename_datapat": {"doc_hash": "7a4b1f6f1dca4965f42c0943e1e41d58cabcc3327ae6856eedee7f9d2a7ad178"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.to_dask_dataframe_Array.to_dask_dataframe.return.from_dask_array_self_col": {"doc_hash": "eb41ea798375e7649dd5b4b1a703744197109a6d88af956785e4857ee8e0690f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__bool___Array.__complex__.return.self__scalarfunc_complex_": {"doc_hash": "a62286c8dd9a68a3979fdbc9f44ed075c384ef8e741ad72af27da2e3b89600ca"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__setitem___Array.__setitem__.if_isinstance_key_Array_.else_.raise_NotImplementedError": {"doc_hash": "e02d0c53d567d7e2c941927868bdbca9082d9b0d075a93dc400601a29b401287"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__getitem___Array.__getitem__.return.Array_graph_out_chunks_": {"doc_hash": "2cc08915fb397b2fffc554e9d79b92f20e2d43ea44422db023d931ca04c01caa"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array._vindex_Array._vindex.return._vindex_self_key_": {"doc_hash": "becb90a50266ea6383e3132a507bf19b51f41deaee54aa92e525cb6eb35cecb8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.vindex_Array.vindex.return.IndexCallable_self__vinde": {"doc_hash": "3d8f85e1aea07f5f36e186ef8f848cac68ff176a1b43c2ffaf6243b163a58d0a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array._blocks_Array._blocks.return.Array_graph_name_chunks": {"doc_hash": "83d485803d89d4362095f05c11fceb0b4515e5cd512968d96382f35ec3778ed2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.blocks_Array.blocks.return.IndexCallable_self__block": {"doc_hash": "5c530a7a60c764f792e32cac107bb30cd969f894106e843614516acf65800901"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.partitions_Array.partitions.return.self_blocks": {"doc_hash": "2a90708e8db75df32013465f41e28a3e17c720aa66a243c25bc644eef20c43b2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.dot_Array.argtopk.return.argtopk_self_k_axis_axi": {"doc_hash": "cab7be299997cb924bc28eb60726ae74c90311aeef295e3103185bd2e88e11d7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.astype_Array.astype.return.self_map_blocks_chunk_ast": {"doc_hash": "3d45fad9171a4c45d9f1bb4ba11a3717de6e36755dd239e1ee5ba176bde27133"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__abs___Array.__sub__.return.elemwise_operator_sub_se": {"doc_hash": "b7ae628ef4abf6d37c1838c74476974d7e757fa95ae6c90c895bc18ebd1975e0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__rsub___Array.sum.return.sum_": {"doc_hash": "e65e83a5bd120314dbe554f05fdb3e7566bbc66901eeef1e42816ca593b8c1bb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.trace_Array.var.return.var_": {"doc_hash": "25bada816b1ee2c4c88f7b07df4e857926240efd2412081fc50119438736a90f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.moment_Array.map_blocks.return.map_blocks_func_self_a": {"doc_hash": "2c7595af891dbccf4cb83ae113a854aaa76f3a79ea6b899dc11528624d1a0c2a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.map_overlap_Array.map_overlap.return.map_overlap_": {"doc_hash": "e198eb13afbb1acdd5b5ea5f68bffcd4ba804cfa82a3f4dcd3ca203d454b69ca"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.view_Array.view.return.self_map_blocks_": {"doc_hash": "5aa2687e75b00cd93c3a52afe86ce0402492335dd844369c5d270f12b76f1349"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.swapaxes_Array.__deepcopy__.return.c": {"doc_hash": "a56294942f1efd0fbf1b0cf0e424d76db487ab2c009f3e8ee84b2e383bd691a5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.to_delayed_Array.to_delayed.return.np_array_L_dtype_object_": {"doc_hash": "a8085bf664f1ef0987ba329df745a129f9e3f3e8a8b8e6a4dfaf0afbd5411f81"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.repeat_ensure_int.return.i": {"doc_hash": "6ccbd0af0cf4cbf403aec0bc73a702515bf61797f77c055f8f23dc03f91bd656"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_normalize_chunks_normalize_chunks.if_isinstance_chunks_lis.chunks.tuple_chunks_": {"doc_hash": "3e017137f89d00e0c4f5216a117c547f08ae4e17415ca30507b768bef79f3831"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_normalize_chunks.if_isinstance_chunks_Nu_normalize_chunks.return.tuple_tuple_int_x_if_not": {"doc_hash": "e5ffc23fc30076b0660e3cb447523a8f3ba9d6a63642b48387931c5b27e5d246"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py__compute_multiplier_auto_chunks.largest_block.np_prod_": {"doc_hash": "a5543b17831c56a31815aba44f78c0d83c443633ef83ae0f59c72dea0e1226d5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_auto_chunks.if_previous_chunks__auto_chunks.if_previous_chunks_.else_.return.tuple_chunks_": {"doc_hash": "1005b2bfd9620564268d787afe125008250fc52bc792544b88282ea3c7fd5504"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_round_to__get_chunk_shape.return.s_len_s_None_sli": {"doc_hash": "8df2e5387efc50be37cef71d706bdfac11e00ae1e171606e1f1a6b41ff7e1d41"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_from_array_from_array._Create_dask_array_from": {"doc_hash": "8f507a59c9613415b7e2b993b9775daf5b30ca179a84fbae9c001905f2b0fe85"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_from_array.if_isinstance_x_Array__from_array.return.Array_dsk_name_chunks_": {"doc_hash": "04160fb7d38cc48bc7d3ec87fd246d07f3c0de5f91bf1026f82c5390505d8712"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_from_zarr_from_zarr.return.from_array_z_chunks_nam": {"doc_hash": "80b4799bab173f2ce28ea2758086cd3e6b586eb1df6f027fe60e64cd6276b7c3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_to_zarr_to_zarr.return.arr_store_z_lock_False_": {"doc_hash": "d45ee52fb49562e0c5269680e685113f027cb19f585b95044b5ac8a50c256343"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py__check_regular_chunks__check_regular_chunks.return.True": {"doc_hash": "62e09a0117a1209e5c4f2feb69b55cd4f82c0d9bfcfd36bc0a8bd1afe3b9902e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_from_delayed_from_delayed.return.Array_graph_name_chunks": {"doc_hash": "8eefa22184d98309d2165e3117c2a6ac1527a1228748ef1784ada99fb019e814"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_from_func_from_func.return.Array_dsk_name_chunks_": {"doc_hash": "51ec22c7c6a1859e7b202dbc6717c0e0c8ecf6985df46bc6a8980e8de3143c20"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_common_blockdim_common_blockdim.return.tuple_out_": {"doc_hash": "2668de515adcb9f3fd946355f6ebd67041ab550be1878ab9f2e123252a3e33e3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_unify_chunks_unify_chunks.arrays._": {"doc_hash": "4a204f5ec9a6f128aadb57a42a1e7ec08441637904eabcc9689db0c1a43c9ebb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_unify_chunks.for_a_i_in_arginds__unpack_singleton.return.x": {"doc_hash": "4cf0cc1d7841b4e7575fc9f1489aef646dc26b297f8957a891c391779757a475"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_block_block._": {"doc_hash": "91289085cb4d44868b2a0c0999760f5455c617c5712b01dfa534b3cb02e57272"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_block._This_was_copied_almost__block.return.rec_map_reduce_": {"doc_hash": "5f14a3f601929aacf4404c5534cc99ac7b6afca7f36dd0cb41457e456806339f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_concatenate_concatenate.inds._list_range_ndim_for_i_": {"doc_hash": "a7ea51b17a72c9cf876feae853ec8d3742da7fb03a3e46b7e288bd607ce3b3fd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_concatenate.for_i_ind_in_enumerate_i_concatenate.return.Array_graph_name_chunks": {"doc_hash": "8993d22159af728dfaa5b2ab43333a0ec6677e9ad5acb26065373469e7ffa254"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_load_store_chunk_load_chunk.return.load_store_chunk_None_ou": {"doc_hash": "d9505c65021b7b60237f1db6d077d1a8b82b83b06f960888af758d4e9a26fd57"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_insert_to_ooc_insert_to_ooc.return.dsk": {"doc_hash": "4e47d33c928c4da8d123fb38a4789d76920c2f2866e21c4fc6f998937e45d428"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_retrieve_from_ooc_retrieve_from_ooc.return.load_dsk": {"doc_hash": "ba1ee239b8b3f29f52077f12734016c706a06ae5a47dce8a68f0d6dfe329d890"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_asarray_asarray.return.from_array_a_getitem_get": {"doc_hash": "491e6a84e40505d3ce05265bb2d070ddb49bd38b572340e32738d1dad312c3cd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_asanyarray_asanyarray.return.from_array_a_chunks_a_sh": {"doc_hash": "c1c8dc3eecf1caf5bdd6908b5f8bfd558b9698b2ccc757ac3b360e6d0d4e53e2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_is_scalar_for_elemwise_is_scalar_for_elemwise.return._": {"doc_hash": "ec02f31babf9b764705384928aff761c9f8f4646a26190a8b7c96ec3ece211b1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_broadcast_shapes_broadcast_shapes.return.tuple_reversed_out_": {"doc_hash": "63427393841c883e93de15cd7f988cae12eaf924309f6a21f72df939807a46a2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_elemwise_elemwise.return.handle_out_out_result_": {"doc_hash": "f4342bf2e749b6e9b2bf29f74b2e4784921481509c0c8e23b4b1e2b12382b776"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_handle_out_handle_out.if_isinstance_out_Array_.else_.return.result": {"doc_hash": "3c9fc8d9d8351e46ab66993611541160eab573f78b0b52d99c703f3485c7c207"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py__enforce_dtype__enforce_dtype.return.result": {"doc_hash": "69be4a9567201ba6151024d8de494d0eeb5195f64459455450e0591a12d4a8cc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_broadcast_to_broadcast_to.return.Array_graph_name_chunks": {"doc_hash": "3c28139d7e1ece51169d9820ed100ac7f7115e92a72496bd5f91710a791a7ab8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_broadcast_arrays_broadcast_arrays.return.result": {"doc_hash": "d3ccaaa996454c61a8b836285a4fc9340a7d038f8efa885a120ba6933f0f6d82"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_offset_func_offset_func.return._offset": {"doc_hash": "f89799f38f3329ba6a3dd966e1c811ec4a8902e1bdf07f257cf51f255000689b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_chunks_from_arrays_chunks_from_arrays.return.tuple_result_": {"doc_hash": "fbc922cecf55dd87e0689f02509351a911984db673dcf0c06621934098cb7ae8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_deepfirst_reshapelist.if_len_shape_1_.else_.return._reshapelist_shape_1_p": {"doc_hash": "c39912614f970f83f76354b333bdc46cd4e197a5f5c5155b12b2f247e8b34935"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_transposelist_transposelist.return.reshapelist_newshape_res": {"doc_hash": "88350e3382500f29b8081db2f1dfc9660f5b297fd0d63230250d114e88c5c11b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_stack_stack.keys.list_product_name_ra": {"doc_hash": "7c06dd382f8288bc1300b677d866f33d727f09589d1e81c583c31e0e7672fa38"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_stack.inputs_stack.return.Array_graph_name_chunks": {"doc_hash": "a2fc20ccade92de61012a9e79e72bafc37ebbc3dc7c3e59671ef7c512db2d242"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_concatenate3_concatenate3.return.result": {"doc_hash": "e105497ac68f2de75f5c801115f140cd18b6b9988d11b196f1c3a99e09a6f53f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_concatenate_axes_to_hdf5.with_h5py_File_filename_.store_list_data_values_": {"doc_hash": "42995d6e258404c6817d9ee94c6e6003fc7bd5136eaf12e4ed8d3d16a6edd1ec"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_interleave_none_keyname.return._name_i_tuple_k_for_k": {"doc_hash": "e222a54e46cb41333517d313287b1b95c148946fb732102cfc19cc0664f5b2df"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py__vindex__vindex.return.x": {"doc_hash": "71cddccbc557f4f4468f2b0c39e7de41073ed31d90f579194d37ca401b092089"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py__vindex_array__vindex_array.return.result_1d_reshape_broadca": {"doc_hash": "4daed95c690645feda964beaa3b6ea5dccaaf49d968b345d6671502d75f42074"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py__get_axis__vindex_transpose.return.block_transpose_axes_": {"doc_hash": "256cb095bc5f9b491cf2d5c2553e0091811726aecc32d32b0ee1adae9ee3f466"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py__vindex_merge__vindex_merge.return.x": {"doc_hash": "a7b883138cc1c7d5b624073a84622b4aab543bbdf06f19dcb5d9abc391ce45b2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_to_npy_stack_to_npy_stack.compute_as_if_collection_": {"doc_hash": "696287e7b306b4829b73a31acca9f78d92d445b49bb747d5a851c3f5ec294dc5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_from_collections_abc_impo_AxisError": {"doc_hash": "3441624dcd6bc7e268a0c677eb05b87568db399d18a69ffc8c2b3f8f017cfa47"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_full_like__get_like_function_shapes_chunks.return.shape_chunks": {"doc_hash": "1e61ccae8215c97887dd67eadc1997a8ea575182d8e1b2633886bd9271510fc1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_linspace_linspace.if_retstep_.else_.return.Array_dsk_name_chunks_": {"doc_hash": "286301d5730d9bc8507063a3c148d50ffcb6e053fe9651ace88d2cfb74548e16"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_arange_arange.return.Array_dsk_name_chunks_": {"doc_hash": "6fe9fd65b75c694984fd348b8b07258387aee93c3712097874a9bd9bfce4ef1f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_meshgrid_meshgrid.return.grid": {"doc_hash": "78679de020ae216c9dd861c515c5cb5f3d8dc8991d88df4662d3e2568e4b8782"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_indices_indices.return.grid": {"doc_hash": "d6d45a6e39744ad5b138d3dcd8c0987e34b8b8afe86e7aeb7dcbb7d6cd8ed922"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_eye_eye.return.Array_eye_name_eye_shap": {"doc_hash": "c64385f726f1ee986bd8739b425b4b54b83164ec762b3daa271830e594a2aca5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_diag_diag.return.Array_graph_name_chunk": {"doc_hash": "3b51ccc7e9ec660d7a65914af51f6dd9603a4f0a1faa9e7fa3bffb5cb52e07f2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_diagonal_diagonal.return.Array_graph_name_shape_": {"doc_hash": "cc5dc64b38b073bf8cefce60fdcfb7a1f99baa6f1dd7f1c2db855859fc830850"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_triu_triu.return.Array_graph_name_shape_": {"doc_hash": "62cd9998680e447fe727f7cc982c677dcd7e48ba588fe8bd7ac2b83582089e89"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_tril_tril.return.Array_graph_name_shape_": {"doc_hash": "04ab416ad2b32f2c0635daa6caa6da1286cbde43a930cadab6d96dba5c8d0573"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py__np_fromfunction_fromfunction.return.Array_dsk_name_chunks_": {"doc_hash": "766455ffac82b15944d5284ec79bc4b75f32e16927d34ec32b37f0322050fb33"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_repeat_repeat.return.concatenate_out_axis_axi": {"doc_hash": "a3f80e3a43e12a5df28cee0122038f2bedb01eb9f505a4de10b3c07db74ddeda"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_tile_tile.return.empty_shape_shape_out_dt": {"doc_hash": "1cba7ecaa196c602993fae546e1764a12057516228862414925a1bc5e3167879"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_expand_pad_value_expand_pad_value.return.pad_value": {"doc_hash": "44bb4ed537076573a268d1c8bf850ccde603fcc5b01cf10a5ba5a41696d0fd4e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_get_pad_shapes_chunks_get_pad_shapes_chunks.return.pad_shapes_pad_chunks": {"doc_hash": "688c2f17169cf1421eb12ca6a6ae92845e43da52e1e1d05e2f96753ea309c5e4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_linear_ramp_chunk_linear_ramp_chunk.return.result": {"doc_hash": "50dd995486fab008f44b356bcc6aa2ed3d3820d8d68403ed11c486c7d18477fb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_pad_edge_pad_edge.return.result": {"doc_hash": "22f7af7656eb92946ba899086ebca7a18d96796591740cf127ba50e6d8e2c193"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_pad_reuse_pad_reuse.return.result": {"doc_hash": "65aee02fd8fc1cd3cb44a71386947f373fe6f11861852045bd70690ff76197bc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_pad_stats_pad_stats.return.result": {"doc_hash": "9d0a2b55695c34ff20feb8b288076b6baffaffbacbac2c1114825719770a8f8c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_wrapped_pad_func_pad_udf.return.result": {"doc_hash": "82d26e729e23eed211c63a008f3f22a1b89592d81ccd1c5256be63c80d4e44da"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_pad_": {"doc_hash": "601e2a269bfc02187e3a160ff4d6f5501ca0a8bf9282aebd4267285db86940c2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/einsumfuncs.py_np_chunk_einsum.return.chunk_reshape_chunk_shape": {"doc_hash": "8b6f29dea925648cd02a89db1846ea0b7f5d12adefbaca43f825696ca96cecd8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/einsumfuncs.py__This_function_duplicate_parse_einsum_input._Parse_ellipses": {"doc_hash": "b095f157a7a5b966b7d37116832675b7443f11018cc29997c230bc0c0bb06a67"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/einsumfuncs.py_parse_einsum_input.if_in_subscripts__parse_einsum_input.return._input_subscripts_output": {"doc_hash": "42068a064a2dac2460eb50d5adf2f0b4ed792cbe5f2ea88a2c40dcb362ed9edb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/einsumfuncs.py_einsum_": {"doc_hash": "9a42d324c84a2dcf0c6b40fd24b4e6775da81d4ff833584aeb2937391a4d20a7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/fft.py_inspect__hfft_out_chunks.return.chunks": {"doc_hash": "7a9edf24ea674bb76d8ac9d9c929136c97d7c3da089a036e9e76d5d31334fdc6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/fft.py__ihfft_out_chunks__out_chunk_fns._": {"doc_hash": "3cabb6a98edf8851f793a3ab292e0b49280562e8a3e8d53bea286ef5c225496a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/fft.py_fft_wrap_fft_wrap.try_.except_KeyError_.raise_ValueError_Given_u": {"doc_hash": "b684f1a42ce86baa06271855f67d876405948f0ecbed9adeaed46b0fc6bfd04e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/fft.py_fft_wrap.func_fft_wrap.func.return.a_map_blocks_fft_func_a": {"doc_hash": "b5058979c84ce548343c9cd322b80c7c334f3a78fcfc8bd4b95487aaeb91b94a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/fft.py_fft_wrap.if_kind_endswith_fft__fft_wrap.return.func": {"doc_hash": "ca32ac6d934671d9e06961ed565c0ab20534fe85c8051cb002fa4cf0bff7b472"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/fft.py_fft_rfftfreq.return.r": {"doc_hash": "05c5e884839e760cbfb490e2f19a5bb43b7ee63078298f840fbeefc8047c6fc3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/fft.py__fftshift_helper_": {"doc_hash": "7275be591d823941f88c8bea39390b12b92eb61d8acd77dc6c5afb46b563df19"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py_np__SIGNATURE._0_1_format__IN": {"doc_hash": "617a0b769e9b7d48c8d467e794673ec528cd8f0cf994e52f89c0fb545269ed38"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py__parse_gufunc_signature__parse_gufunc_signature.return.ins_outs": {"doc_hash": "b11fabc79fd245f9225ef9ff3225da7d52ee7a9f8ae5a43a32a92e0b0e25137d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py__validate_normalize_axes__validate_normalize_axes._Assert_we_have_as_many_": {"doc_hash": "e959df10eb023926a92b75a8cf7a5abb2a8af76bda5e64b4cd695dc3ab60b3e8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py__validate_normalize_axes.for_idx_iax_icd_in_en__validate_normalize_axes.return.input_axes_output_axes": {"doc_hash": "261db20631488665c7fadd7e551c249d33912b503e2f473180af76e1be4a7b63"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py_apply_gufunc_apply_gufunc._": {"doc_hash": "92eabd576c86363e1360c52491559dd287b052a8d2f871b0401781bc1693f6d1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py_apply_gufunc.axes_apply_gufunc.max_loopdims.max_num_loopdims_if_num_": {"doc_hash": "7b1a9336f9a820ca6fd0db24b957e1537c8631a9e2f847cd989ea3024057918a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py_apply_gufunc.core_input_shapes_apply_gufunc._Modifying_blockwise_": {"doc_hash": "58ff76e27a4d2ae53db41f10493299686c00ac59918b1cb1695279ab83170456"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py_apply_gufunc.if_meta_is_not_None__apply_gufunc._Undo_from_above": {"doc_hash": "672f602e6869482bb95b4e4c8c675c00696f0d62bd254f3ceeb203ff1ff71144"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py_gufunc_gufunc._": {"doc_hash": "6c45a80b043e86ddf3d793dc870c79523de7a7d0a63aef1249185a74ed87e74e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py_gufunc.__init___gufunc.__call__.return.apply_gufunc_": {"doc_hash": "4b29bbd5fd1606196471f04f4270e3dc70ab06fac301526846502f3154a7ab27"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py_as_gufunc_as_gufunc._": {"doc_hash": "98536c3b13d52fbb1f0758477a625782917bab25c5d6335d9daf8384d2d29914"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py_as_gufunc._allowedkeys_": {"doc_hash": "489003ee768a30128386c13bdd4e2d28f19b3ad39af6b06fbcb115c045f19e9c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/image.py_from_glob_import_glob_": {"doc_hash": "d7e91752e274da8f4badce843f579d70c8714169326d4d3d7e5f2d3abc667bf9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_operator__nanmin.return.k_1_if_np_isnan_k_0_else": {"doc_hash": "e409a9d7c34ab8c361a92e95a83874099323b52dbd3c1e509794174526b90b06"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py__wrapped_qr__wrapped_qr.if_a_shape_0_0_.else_.return.np_linalg_qr_a_": {"doc_hash": "4659cfebba558a04a373e8cdbc14891d0191a677bd42950120dfab5107bd3199"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_tsqr_tsqr.layers.data___dask_graph___lay": {"doc_hash": "0f71b61dcf0c16c461769c1448d2e3af13e88cc582a2906f6883a19b80c974c5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_tsqr.dependencies_tsqr.can_distribute.chunks_well_defined_and_i": {"doc_hash": "db049b7353af8e34f1b44b36982cafa180c861f08d85a72d279f73bca0506579"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_tsqr.if_chunks_well_defined_an_tsqr.if_chunks_well_defined_an.dependencies_name_q_st3_": {"doc_hash": "46f27886d35129726db5a90abf05f5f6eb79fa5c43862c8afffe85ab756f3622"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_tsqr.if_chunks_well_defined_an.else__tsqr.if_chunks_well_defined_an.else_.dependencies_name_r_st2_": {"doc_hash": "a05bac3b17033ea9f4172d860c54976e21c129836026104a56bcd7bdca452cfa"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_tsqr.if_not_compute_svd__tsqr.if_not_compute_svd_.else_.return.u_s_vh": {"doc_hash": "e06859b7c3af91cb855226fd7b6d604ed7a8fa4a831f577d28e0e15956097273"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_sfqr_sfqr.name_R_1.prefix_R_1_": {"doc_hash": "1bb319eb1a19287783eb276a2721124eda02f4559feded56e0b5bec493155047"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_sfqr.layers_name_Q_R1_nam_sfqr.return.Q_R": {"doc_hash": "788b23b9aec0cd083f945fe3e7bab72833be465645222dd8734e944be30cccbf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_compression_level_compression_level.return.min_max_min_subspace_size": {"doc_hash": "22e16e8e9973dfcb7e59b53fc539081bda9d3ad2f919aa49cf3ba07da6dfa5ea"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_compression_matrix_compression_matrix.return.q_T": {"doc_hash": "c68ae3ad91b63c36308d884b29ae2594e9f4b76d852f019b6a19acafa221bf12"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_svd_compressed_svd_compressed.return.u_s_v": {"doc_hash": "56f0921ce901eafec85afef4c7e8d6e240b625f305b837356ba1e9e03aaa5974"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_qr_qr.if_len_a_chunks_1_1_.else_.raise_NotImplementedError": {"doc_hash": "ca6d7768a8bde88e3a1d41e545a258ee7de224cc99dc820360c281938634b378"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_svd_svd._Single_chunk_case": {"doc_hash": "1d9cb1992f16e84a5d714e2b34903f2b59aa71a5ff489ca934e3128cf8e8fb8e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_svd.if_nb_0_nb_1_1___solve_triangular_lower.return.scipy_linalg_solve_triang": {"doc_hash": "0ca0ce5eafacdcf4aeacc080d4320e16d758614c675284d6f4da78c8aa417304"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_lu_lu.dsk._": {"doc_hash": "02e1103735ce6803e442ce88fb256bba0ab0e9a23d6e5203de93af1b471f40b8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_lu.for_i_in_range_min_vdim__lu.for_i_in_range_min_vdim_.for_k_in_range_i_1_vdi.dsk_name_lu_k_i_": {"doc_hash": "381d34c91b0a7c5344df478730c2a757590cefe907905b867086afcc9f256789"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_lu.None_4_lu.return.p_l_u": {"doc_hash": "e0667e5516ab6a537b4b1aacc123f966ee3e3180285a4710b4cc9f6628f95e3c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_solve_triangular_solve_triangular.return.Array_graph_name_shape_": {"doc_hash": "d8d7d6f30b144122750c387e65a7a06e51a719d8a31d5e7feb3751f1a40a6896"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_solve_solve.return.solve_triangular_u_uy_": {"doc_hash": "fdf0c4c1eaa1a711b6462508f1b4892ba35fd5df7c901a067f5898e53af139cb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_inv__cholesky_lower.return.scipy_linalg_cholesky_a_": {"doc_hash": "2228d64cbe27f9f05c59692db042577c5f10fc5081d61c788fecd30dbdf2c6d0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_cholesky_cholesky.if_lower_.else_.return.u": {"doc_hash": "4af45195d7095afeb0100abedb50270932cc55376cf97c41b79ddffa7a945472"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py__cholesky__cholesky.return.lower_upper": {"doc_hash": "c5213564f9c8d065985823c4dc616ffade5737aeb8dc26653aa0b301623d526b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py__sort_decreasing_lstsq.return.x_residuals_rank_s": {"doc_hash": "74967c05f3363e9b9e44b2a21f488ec297565961ecfdb54a0d043e2e13aea237"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_norm_": {"doc_hash": "0b3ea2f48799a1dbe6ea799175dcf1e43b2dfddb8c9b82a86460734083ccd004"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ma.py_from_functools_import_wra_normalize_masked_array.return._data_mask_fill_value_": {"doc_hash": "396a99a42782285a786f1c1908819de2c42355d6d02bbb79f13cb10152dd2593"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ma.py__concatenate__concatenate.return.out": {"doc_hash": "cf91bf3fa61e25b5516855f50f0eccf708b9d93658f3e47c64e53b9f4a529616"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ma.py__tensordot__tensordot.return.res_reshape_olda_oldb_": {"doc_hash": "7fdee7331d7e50822d7ca958366598cc6d9c0378ac64d2cd515a5e11612f0340"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ma.py_filled_masked_outside.return.x_map_blocks_np_ma_masked": {"doc_hash": "4071914691df0012670bfa135a7c36219fe8129421f700cb78b608e603491057"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ma.py_masked_where_masked_where.return.blockwise_": {"doc_hash": "78cf43f154acc5082dbe99f792dc6ac02fe57f8b9a6c4842f3011537e1dabf56"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ma.py_masked_values__masked_array.return.np_ma_masked_array_data_": {"doc_hash": "555d6066093bfd811d80e0d5cb57e91893dc92723646cb0ef3ce2f817686338a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ma.py_masked_array_masked_array.return.blockwise__masked_array_": {"doc_hash": "6eaa9e2b0385cfbdabe6eaaa9bd9d58087968d2f0b7668d1617f71b657a2fca9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ma.py__set_fill_value_": {"doc_hash": "4c5bf244f24c924b42e4ebb4b0ef56381b3462ed439f91f2cc6146b4085167d9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/numpy_compat.py_from_distutils_version_im_try_.except_TypeError_.ma_divide.np_ma_core__DomainedBinar": {"doc_hash": "334386f28124078632b04cdf0648551e05b49dfc204d3f61303e87a625bc597e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/numpy_compat.py_if_LooseVersion_np___vers_if_LooseVersion_np___vers.take_along_axis.return.arr__make_along_axis_idx_": {"doc_hash": "f6692d69b4c7b80817968c4044eb8923d288c9d59ef00c4579fc8f8720d5cdf3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/numpy_compat.py__make_sliced_dtype_np_ge_16__make_sliced_dtype_np_ge_16.return.np_dtype_new_": {"doc_hash": "c7a6b9f1497722f4a59911468d7efcdc58d74f129ec6f2e572b26c7301c2a4bc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/numpy_compat.py__make_sliced_dtype_np_lt_14_None_1.else_._make_sliced_dtype._make_sliced_dtype_np_lt_": {"doc_hash": "4646cb6e53b1a5b699f7b83c2e76706b897f3d7c39e11f07ad807997d4c1e5b6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/numpy_compat.py__Recurser__Recurser.map_reduce.return.f_x_kwargs_": {"doc_hash": "da9cdefc4c144d84e7c74173e430c81e3777b0113daef426b812bc492b2e8f1c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/numpy_compat.py__Recurser.walk_if__numpy_116_.else_._unravel_index_keyword._dims_": {"doc_hash": "b672887e258dfacd8f83addad7005afcf137a9db4082279d744ef7ba645b3b01"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/numpy_compat.py__Implementation_taken_di_moveaxis.return.result": {"doc_hash": "7349956e1f8e48ca973aa12b608250326d1a0057f7d40c8dc05166a927a09b23"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/numpy_compat.py__Implementation_adapted__": {"doc_hash": "0eceeb42979a77ce74e058c15dfd8fae71395c23d3109be2221a8b7e6e03990c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/optimization.py_from_itertools_import_zip_GETNOREMOVE._getter_getter_nofancy_": {"doc_hash": "19bf99f36b73aa02cfe9649a5901d8c1579b70b9351c3316ef6d6a7f5ddb9d4a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/optimization.py_hold_keys_hold_keys.return.hold_keys": {"doc_hash": "cfa2971852f7b932684b2f00f811a16f1669e7812d136ade3aba1d4ebc5a0ab0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/optimization.py_optimize_slices_optimize_slices.return.dsk": {"doc_hash": "99df1a6d6242c0eb66dc536907331d396221dacb6cba11e7f27f4dea09c0d643"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/optimization.py_normalize_slice_check_for_nonfusible_fancy_indexing.for_f_n_in_zip_longest_f.if_type_f_is_not_list_an.raise_NotImplementedError": {"doc_hash": "6650f3f814bd3e5702b028566bc574b21e556ea1ed194fb9a301f60354582d00"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/optimization.py_fuse_slice_fuse_slice._and_newaxes": {"doc_hash": "0ebe26b90504be091a55a5bc528455cbc55e8ce757f9071f8c602b309c33d4a3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/optimization.py_fuse_slice.None_8_": {"doc_hash": "4a7c8739a0263cfa7cfb8168ce6d2f18dec7f00c28d10bbe1e7c2d96f42fc7b5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_expand_key_expand_key.return.result": {"doc_hash": "ec0270b05d7844a1405cffb6f9c5223e775270b77039b57569589bf3369b99f3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_overlap_internal_overlap_internal.return.Array_graph_name_chunks": {"doc_hash": "d0f223c9752b8252e54c8db0805c7394ef2a5bc7a13e9ac812c144648482cfaa"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_trim_overlap_trim_internal.return.map_blocks_": {"doc_hash": "9f1b124d807b2692522eb26869384e43baba83f860057d8b31b3d9eef647fdd4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py__trim__trim.return.x_ind_": {"doc_hash": "5f67b416d011267cebbcb439ccc1a48c7a125e452702dba107e6d9a6dfb643a9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_periodic_periodic.return.concatenate_r_x_l_ax": {"doc_hash": "4c7894cdaf701b7de8f47f2ae92df4f43b1cdc274f9ba99b00d406ace22e35a5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_reflect_reflect.return.concatenate_l_x_r_ax": {"doc_hash": "eb59e04f9e63091d05b162216497bcf91340ca59958fb8a8835b852f8391a1e1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_nearest_nearest.return.concatenate_l_x_r_ax": {"doc_hash": "4c43ba64cf299316e3786d2414c9d092d03a55b3277fcd6abf5a3eae2f0fbef6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_constant__remove_overlap_boundaries.return.l_r": {"doc_hash": "73e4f3ed056cee1ccae5eba441fb9f0dac3b6aa7331934465316d88186404d2a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_boundaries_boundaries.return.x": {"doc_hash": "db105f851669b27191eeee60cdd9529b4f10210454fca872ba08f53d33a4083b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_overlap_overlap._Share_boundaries_betwe": {"doc_hash": "c9482a647f40e38e776d0e8e021e4d20e98ff57ec62280986098d505ebc35f50"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_overlap.depth2_overlap.return.x4": {"doc_hash": "8d539541463b236e09b726d16a692bb8ba59dc6f476c56befe4b2a95cb1eb53f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_map_overlap._Look_for_invocation_usi_map_overlap.if_trim_.else_.return.x": {"doc_hash": "205d45d92af7d0f32dac165a86c14423d54231d460150225b58126035528df6e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_coerce_depth_": {"doc_hash": "7dac5aeb74762d1c0ef4483398f0c9f2ee9b901c39050de716b783572f7d9fa1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/percentile.py_from_collections_abc_impo__percentiles_from_tdigest.return.np_array_t_quantile_qs_": {"doc_hash": "d97e9fdc96295b10654e130069130849aef1e863143152526ed145298a061b4c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/percentile.py_percentile_percentile._Allow_using_t_digest_if": {"doc_hash": "7b9e563f833c6c247c663f3edf76a4935adc1b4980a822c51430a56612c26b64"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/percentile.py_percentile.if__percentile.return.Array_graph_name2_chunk": {"doc_hash": "0c033dd771a7cf551073bdaa4231585391b830d50d0098a8056333064a5e06c4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/percentile.py_merge_percentiles_merge_percentiles.combined_vals_counts.merge_sorted_map_zip_va": {"doc_hash": "6e71192a560a81e1f02f20261d6859506ff4a62a2959be2327e8d6f22bb342a3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/percentile.py_merge_percentiles.combined_vals_combined_c_": {"doc_hash": "cbd39f1b6aff28531000f54da10f9c1107e1a207a9ac5ae823aaed7e0ef8c9ce"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/random.py_numbers_doc_wraps.return._": {"doc_hash": "804fe1e96a8e86416e5a007a5217e5df06df5daafe1962f317012c2842e6d311"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/random.py_RandomState_RandomState.seed.self__numpy_state_seed_se": {"doc_hash": "6c5f47ad34c9555420bbeaf517cd5b5cd4f788391ac5398b1a681fd98297ce62"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/random.py_RandomState._wrap_RandomState._wrap.vals._": {"doc_hash": "3c138485552c4a7efd65c350dc4f6f5866f2165bd6465f85b02639fc083efbe3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/random.py_RandomState._wrap.for_seed_size_slc_bloc_RandomState._wrap.return.Array_graph_name_chunks": {"doc_hash": "3704ffd9829084c267991501505b5b587c1daa5bcfece40551ca279d20fc4590"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/random.py_RandomState.beta_RandomState.with_ignoring_AttributeEr.choice.return.Array_graph_name_chunks": {"doc_hash": "0eb3dfd443d4bf148466aba95ea6793d88b04fed3f35fa079a3ed6d303e54ea5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/random.py_RandomState._derived_from_np_random_RandomState.multinomial.return.self__wrap_": {"doc_hash": "87b802fcd3b9e55eef1b0c76eb547d7e5910a306a85c0bb538986faa78bd7730"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/random.py_RandomState.negative_binomial_RandomState.rayleigh.return.self__wrap_rayleigh_sc": {"doc_hash": "d85e7488b5d89f107127e109a404a4888d39fda2df0e4f7a12fb65e95d6f0117"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/random.py_RandomState.standard_cauchy_RandomState.zipf.return.self__wrap_zipf_a_siz": {"doc_hash": "17d4546ee94d6315679079e4354e38dce5e66c8f1c0052ceea3c71c3d5ab85cb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/random.py__choice_": {"doc_hash": "d8f5ad828565596aec9b513ad94dc11c6af7f3b7d16761bb62ab484549c2d39a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py___config": {"doc_hash": "8e8d679fc80f5029474e86eb1493ffeaa0c87cf9fc2e08622e4e1f2b338fd726"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py_cumdims_label_cumdims_label.return._": {"doc_hash": "f889d58b1de136d1a222328934d259b62cf622d9749c8e9b6d16c16b8996a2fa"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py__breakpoints__breakpoints.return.tuple_sorted_cumold_cum": {"doc_hash": "e6a9503d31d480dd619c069b5e65ac502192d9a5e3069c04694d4eb285c65ab5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py__intersect_1d__intersect_1d.return.ret": {"doc_hash": "d19d3df97617f6af3ad23af0a6909353a0c67c8ce670d3c76d023bdbe1aa2ed2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py__old_to_new__old_to_new.return.old_to_new": {"doc_hash": "d3fecc82fdd13090a3c361c2963c83ab417810615f32a7b95471c7205980cf60"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py_intersect_chunks_intersect_chunks.return.cross": {"doc_hash": "825ef9e652323d0fe498d70de354808569953a9804f554dc69e7c17486aeadbf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py_rechunk_rechunk.return.x": {"doc_hash": "c23b3d42ecb573bb1c201f6026270ef2c0f7d06b41fa56ff5848adfc153aa635"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py__number_of_blocks_divide_to_width.return.tuple_chunks_": {"doc_hash": "178b18f76dfae2dbb770cf0ec23437484a3eb40f7611cbe909f20785d8561133"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py_merge_to_number_merge_to_number.return.tuple_filter_None_chunks": {"doc_hash": "fc82712635db2a3998679336f7909c98e07ab00d6db39dcafe9bafd5432a8f76"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py_find_merge_rechunk_find_merge_rechunk.return.tuple_chunks_memory_lim": {"doc_hash": "d23e3e8950d874ac648b213214cbaf5c7334eb548351eb9a30f560c45a3258eb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py_find_split_rechunk_find_split_rechunk.return.tuple_chunks_": {"doc_hash": "eec737ad37d33000372fd1e9baa1a2b2b937ff8bbdfc4a607181f672cf6f1844"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py_plan_rechunk_plan_rechunk.return.steps_new_chunks_": {"doc_hash": "7250fdc29fc483ff6988ab9cfb39731ee823b2e338442afec33fb0feb900f407"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py__compute_rechunk__compute_rechunk.return.Array_graph_merge_name_": {"doc_hash": "a7703de2efd69af820afdd2d33f109a1cff3c1004bf383fd75d95494d104c1a1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py__PrettyBlocks__PrettyBlocks.__repr__.__str__": {"doc_hash": "2f95da48686b7a63ccfb36fcf3de3d8ff282f64585f652c4a18a9601323a4443"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py_format_blocks_format_blocks.return._PrettyBlocks_blocks_": {"doc_hash": "619c36af4b482a1cb62a26975b01972d2ac401bcfe8c047f663251b30a90b966"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_builtins_divide.return.f_a_b_dtype_dtype_": {"doc_hash": "e31103c485e22edc112e5f50d21189d9061285fd99424161d3e4333191e45687"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_reduction_reduction._General_version_of_red": {"doc_hash": "f8f69996dff788eb92f598eeec534823220980578f9e016c912d4b57baec598f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_reduction.if_axis_is_None__reduction.return.handle_out_out_result_": {"doc_hash": "383770ba1164e870f5a9175f483b2609d147784a2c0ee90c607e91ee14b6f3e1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py__tree_reduce__tree_reduce.return.partial_reduce_": {"doc_hash": "eba553bd8594752ad5de4c1022b6b71710fc11e8d3857f2e911712d209557fe4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_partial_reduce_partial_reduce.if_np_isscalar_meta_.else_.return.Array_graph_name_out_ch": {"doc_hash": "5c5ab21e6ba735e87feadf7453435098bc715b7871a8b5a8336012b08bbe6736"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_sum_prod.return.reduction_": {"doc_hash": "c6b56ec8d61581dd41ac36f1b6c27092822114b0a009f3f49ed509f3e14950a6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_min_all.return.reduction_": {"doc_hash": "b5568de5b8af54179040b7cf9ac24a5918976772eacb1edbd28488a5739d142d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_nansum_nansum.return.reduction_": {"doc_hash": "3f10e7bfc5c67b589aa854f94da031c25a6db535b193123ec087610c5957cc4f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_with_ignoring_AttributeEr_nanmax.return.reduction_": {"doc_hash": "bb2b66c79639f03d9ddbf062ae7fcc7a25865058d36256df62d95cb469acf5cb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_numel_numel.return.full_like_safe_x_prod_s": {"doc_hash": "ff2e93e85b172cc9fac2f48b48f6341cadec69bbe151237a57983760272dd6fb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_nannumel_mean_chunk.return._n_n_total_total_": {"doc_hash": "42838f7198467df332e2756df44282e7c4489da978a80b40d02c707c69693230"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_mean_combine_mean_combine.return._n_n_total_total_": {"doc_hash": "86ca998c4b7973542ab2e419530837ca54e2d96f4030dde468554292d9f05ddc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_mean_agg_mean_agg.return.divide_total_n_dtype_dt": {"doc_hash": "2af5edf4ed30411482749a64a2c711fc07bbffca2bf52ad710b5ef660de99350"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_mean_mean.return.reduction_": {"doc_hash": "a5985e9bf1487420fff1bc4cc683218e0cab69a4286b1d9a3ba414df88efa0f6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_nanmean_None_1.nanmean.derived_from_np_nanmean_": {"doc_hash": "9812886f61993083fb9351d9c66cece8bc59e30bbc502259655796b364db1663"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_moment_chunk_moment_chunk.return._total_total_n_n_": {"doc_hash": "841eae85021dafca7f4b1306c9063b5fa7f806367a2cb145b94298f4d26d84b3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py__moment_helper__moment_helper.return.M": {"doc_hash": "e73a5d375cc60c93c9d7332d45ae1d16bc482dca3a06749f7d6b08788a656b92"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_moment_combine_moment_combine.return._total_total_n_n_": {"doc_hash": "c3fd6b182cf51e24db252dc6ba551f93840f24ef4db32f34301ad4d5caed50eb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_moment_agg_moment_agg.return.divide_M_denominator_dt": {"doc_hash": "e0f51c72bbf7ea335e40ec6eee2c87dc9b2480e03115d731583bb130f88e66b8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_moment_moment.return.reduction_": {"doc_hash": "6c87afc87c56b77fbdb7c16b94b517c5476ff9071f4b577893b4e19954b4f846"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_var_var.return.reduction_": {"doc_hash": "e6b7ae8504bdf9031679f1217eee9401ef4ec0ca4c66d42cbaa11c3cc9ed5948"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_nanvar_nanvar.return.reduction_": {"doc_hash": "10c6122aab6f7378b30bdd0ed38717044d1d35eb972f29b14704c84bca6673e2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_None_2_safe_sqrt.return._sqrt_a_": {"doc_hash": "f969a24c9fa23a3864eaf87f20f30e2460e225a463d116ccf2a49fc9ebdf9ecf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_std_nanstd.return.result": {"doc_hash": "f55d80afe28b3ce506374139e97f9e318673914d9f1d89b60768278b767ed926"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_None_3__arg_combine.return.arg_vals": {"doc_hash": "534b12874344838451199c9ad043d3e92028a09b996f132f6f012ae4046139aa"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_arg_chunk_arg_chunk.return.result": {"doc_hash": "bf55ab36a35cf6c4749375448a689d103bd1e6718e52840debf629429ecd8370"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_arg_combine_nanarg_agg.return.arg": {"doc_hash": "95f16941f7100178581b9dea78ff8cea70a165c657523dba9a2cf3d3b6b3083a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_arg_reduction_arg_reduction.return.handle_out_out_result_": {"doc_hash": "b747ebf91497607ba1acb4d4e714bf271c67cb88e5c637c818625dfa452a206c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_make_arg_reduction_make_arg_reduction.return.derived_from_np_wrapped_": {"doc_hash": "4921f7d9a8692d08e9700e22b013b7d026762f666a1370efc832a48fda45b0af"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py__nanargmin_nanargmax.make_arg_reduction_chunk_": {"doc_hash": "3bef79fda0b9b8f0d69357946570a64ed6915b5095f0f385ce6642aea980c226"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_topk_topk.return.reduction_": {"doc_hash": "90e6028e4cc6d9c0385734376315ef7036b534be48fce086cc6415938200948e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_argtopk_argtopk.return.reduction_": {"doc_hash": "ede2858ef5a6ee92afe3a322a78956eb01697d4431367535d4c985a144740b20"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_trace_median.return.result": {"doc_hash": "e6c857e4fe88c5cebceb0f3880134cd75a55d0ac76abde87484c4880ddd48740"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_nanmedian_": {"doc_hash": "79d70f70d9b7f9edb3e1b5c99b2319d11d856235342e381f79255f58846fc5b9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reshape.py_from_functools_import_red_reshape_rechunk.return.tuple_result_inchunks_t": {"doc_hash": "331fe60763867f24d755e1acaa5993ea0c032945f4f4c1080fb6479f6aeb133c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reshape.py_expand_tuple_expand_tuple.return.tuple_out_": {"doc_hash": "0db96f19e443afe6d780724bac7043c6277d872226defe9ea4a5666823c66500"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reshape.py_contract_tuple_contract_tuple.return.tuple_out_": {"doc_hash": "9f62f7737f5638b153f7c64ba7c902043e105bd688ea0612423ea6c07b946129"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reshape.py_reshape_": {"doc_hash": "32d07db40f28edbc67d9955b4fcd5e6adf6b78942266ddb6ae17cae606fdd1f8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_inspect_result_type.return.np_result_type_args_": {"doc_hash": "c57591f647ef6fe34b5b2ab178a817f536d8392ed2ebf56c32252b7eb4b1c404"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_atleast_3d_atleast_3d.if_len_new_arys_1_.else_.return.new_arys": {"doc_hash": "d8bfaa731ee75d83a55148d8427a4064a462b70c6e69152a97389939b4a9a9eb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_atleast_2d_dstack.return.concatenate_tup_axis_2_": {"doc_hash": "c462237612ceefa706dde456fe5b99670a1f436186ec965650345c3825a882cf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_swapaxes_transpose.return.blockwise_": {"doc_hash": "cc0a51b5c620219f3e136e4638683e1a6f9775eca26b6dbe88c0507b97ee68ae"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_flip_ALPHABET.alphabet_upper_": {"doc_hash": "8e247c2d2bee67bca12bc194c55a30e40f76e4e216e04e8ab7a22938a1578935"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py__tensordot__tensordot.return.x": {"doc_hash": "3c6c616004fc4581db6dbccd2127dc8ee519b6b10805e3ec725465f833a1234a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_tensordot_vdot.return.dot_a_conj_ravel_b_r": {"doc_hash": "400b9ba89f0fcd3c6e38c716ee8cfb68b9cd41eb3ba0defd70d9e74d559da445"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_matmul_matmul.return.out": {"doc_hash": "371f35ee562b44642f9a033003d20c7cd19aff4c7933076a3b35c8a83aed1110"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_outer__inner_apply_along_axis.return.np_apply_along_axis_func1": {"doc_hash": "7e68f7251d68f46521d16cccc2eb152a0e532ea11df6aff0fc0ee765a1af101d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_apply_along_axis_apply_along_axis.return.result": {"doc_hash": "f513296935e16e80a6bfdb56665fb7d0d4b7ffc4ea020aedfe65014bed31422a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_apply_over_axes_apply_over_axes.return.result": {"doc_hash": "aea405db3085e5797b29014d473eb7afb50ea810a45e798f9e40a75b52f7047c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_ptp_diff.return.r": {"doc_hash": "e8e01fdce31c51e182780484cddb411ea24a4ed1aff3ab5115066cb0a79510e7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_ediff1d__gradient_kernel.return.grad": {"doc_hash": "0d83f395b02b2b7dc28bbb7a3dbff0491b38c1e4a32d16a8ef847b7614394747"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_gradient_gradient.return.results": {"doc_hash": "ec73cd11c8e937d95d74d4c4cd4357dcd2cc4bee2853076c7a953bc671f0bef5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py__bincount_sum_bincount.return.Array_graph_final_name_": {"doc_hash": "6fdd527ef6ec34ed6c0e73acae13ce59729b6de32a57bd81d447a718166c919b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_digitize__block_hist.return.np_histogram_x_bins_ran": {"doc_hash": "619325924270c823fe3cf27b7593612b909a4591862d69c343d792341585fbc0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_histogram_histogram._": {"doc_hash": "08af9c05ed9ec33011cf691d8744f16a1329f593ca9313da77bf014f1fdb601c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_histogram.if_isinstance_bins_Array_histogram._Map_the_histogram_to_al": {"doc_hash": "893f1a4d74f798c0800c8a297f2520e69a3aa40d114eb07badcaa1a7aecfeb9a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_histogram.if_weights_is_None__histogram.if_density_is_not_None_.else_.return.n_bins": {"doc_hash": "1d4742fe9740f6d42a610e5b742264bd787e4d0781e669cc984d0b00c7109fed"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_cov_cov.if_not_rowvar_.else_.return._dot_X_X_T_conj_fac": {"doc_hash": "de33c4713834c38f05bfb93b902f89c623d2f254a55ac43893fbaf8449cc3f5b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_corrcoef_iscomplexobj.return.issubclass_x_dtype_type_": {"doc_hash": "b583748ef2c83e3424ba5945c7455e8c25c71f166fc96e72348ddb4ca89074db"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py__unique_internal__unique_internal.return.r": {"doc_hash": "9d756ba60b55fb34b209b86ec2bf510ffb69bb9c7a197fc521968064d9485da1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_unique_unique.return.result": {"doc_hash": "ad5259f6b22bfcd47e993a3eea5f76e6f9decae7974a5884c339fdc8875ac24b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py__isin_kernel_isin.return.result": {"doc_hash": "568f31af0e36d5794686aceb9e8d65b266ba34154cc12bb4cd885dafd371fa53"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_roll_roll.return.result": {"doc_hash": "dfe19ad18fc6a8297e1370adfe575ff7887926e8db4b119f5ba1e5dbac407a32"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_shape_squeeze.return.a": {"doc_hash": "d35077b65e0a4b2f22f34f70c02ae9a2cf04a06dcc19efea54fa55c215fcf21e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_compress_compress.return.a": {"doc_hash": "567cd16e028859ee602099e7698ad0b5d98c32106caefcede9d6c3d66d5471d7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_extract__isnonzero_vec.np_vectorize__isnonzero_v": {"doc_hash": "bcec4cda5523975b5fc718513d8084415752653de9ff2c613e91e0999083bf5e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_isnonzero_isnonzero.try_.else_.return.a_astype_bool_": {"doc_hash": "4809b5b5cb7fcd7fa8cb7374f749f9841d5fe340eaa97b48404d0d8a4767e972"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_argwhere_where.if_np_isscalar_condition_.else_.return.elemwise_np_where_condit": {"doc_hash": "dc60cae82fa51b236505781333d6c8a1aebb18702cbe71abeca9b29ef2400570"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_count_nonzero__unravel_index_kernel.return.np_stack_np_unravel_index": {"doc_hash": "ab5a3dd7b6d79fcd5c4a991e04199f6eb978a390b71bba3f39654893b2235e71"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_unravel_index_piecewise.return.map_blocks_": {"doc_hash": "3879491e48294ad5665701762b8712ff2888b0dba8a196b95835cc57861bae05"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_aligned_coarsen_chunks_aligned_coarsen_chunks.return.tuple_newchunks_": {"doc_hash": "920d81e3c18f19c9a880e30013a7c634b1454069879b8eb24245e351ab8b9784"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_coarsen_coarsen.return.Array_graph_name_chunks": {"doc_hash": "de5035a2e3952dcfd272c054cc076bbb38968aad6deb7171b3815ea38ae17ca2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_split_at_breaks_split_at_breaks.return.split_array": {"doc_hash": "16fe33a7012f00edd2e100ec776d30c241923783bceb41c7c82c482f9f34a1e4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_insert_insert.return.concatenate_interleaved_": {"doc_hash": "3d331e06bec048400b6af505af4a05e743bbd3cd42408d3b8e8f9c935376d217"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py__average_": {"doc_hash": "4fb732f241720fb650a5b208494c5d440873d18c767d4d8b3816c50b6c82ebf7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_from_itertools_import_pro__sanitize_index_element.if_isinstance_ind_Number.else_.raise_TypeError_Invalid_": {"doc_hash": "e29f62c3cb9d9d108fe90ae51e603b1083807ebe6bb2b98441907a028c3c2d5b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_sanitize_index_sanitize_index.if_index_array_dtype_b.else_.raise_TypeError_Invalid_": {"doc_hash": "ad104f8338fe7b686dc6f57f12ce31ee3c4ee0a05f121ffddf6b26b2d34bd028"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_slice_array_slice_array.return.dsk_out_bd_out": {"doc_hash": "a3233b9f43970793586c80a2d46020be933dba7d17525093f74e5738c1306187"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_slice_with_newaxes_slice_with_newaxes.if_where_none_.else_.return.dsk_blockdims2": {"doc_hash": "ec8c6db1facb05e1639677aeeab883d186d4fad5cff95fcd7f49dbe9059d2ef0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_slice_wrap_lists_slice_wrap_lists.return.dsk3_blockdims2": {"doc_hash": "0895dd09e776595b18867d03b1391d99818c50790a2a5c3423451033d736ebf2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_slice_slices_and_integers_slice_slices_and_integers.return.dsk_out_new_blockdims": {"doc_hash": "7999bbf9b7dd730d2ca199eb84730b8fe772aef2440fdac3630cb3089c61f020"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py__slice_1d__slice_1d._Returns_a_dict_of_blo": {"doc_hash": "7bbcd0fdb34830ae0d3c936259ca585c4a3c39953dbf00eb68dccf4e5bea31e4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py__slice_1d.chunk_boundaries__slice_1d.return.d": {"doc_hash": "95a181199ed9a7d2e06919b03f3979ec02f1176d0257c896caf9e75cfed42a92"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_partition_by_size_issorted.return.np_all_seq_1_seq_1_": {"doc_hash": "28e7ab4b793a44804a370057772226dcb85da72122695bf15b234ebab44115bd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_slicing_plan_slicing_plan.return.out": {"doc_hash": "140f5fe6ab72b1eb790e1fe93bda10ea5bb9385b2b660eac9e2ef861e5de830c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_posify_index_posify_index.return.ind": {"doc_hash": "1a8ecc737310020569a457adbf089696953ec3167b5f2cd22391df10d0c44f2b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py__expander_expander.return._expander_tuple_where_": {"doc_hash": "0e79f9db3ef819894b1d515f616d822bbbdab4fa80ca7cdd09b697d208eb2035"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_new_blockdim_new_blockdim.return._int_math_ceil_1_0_slc": {"doc_hash": "23d9adaf194856e87bd2a16d30631747032092542ed63076a1a12b92dfe703c0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_replace_ellipsis_replace_ellipsis.return._": {"doc_hash": "a7d917d689d0d98174eb823505c95bd929104c34a6e20b7369d9bb3a6a80c01d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_normalize_slice_normalize_slice.return.idx": {"doc_hash": "9a0678db8b41bc54c0950da775ac4b69a28cc4f4d5111782b0ae35a43d41938f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_normalize_index_normalize_index.return.idx": {"doc_hash": "e2e189633f7c3015b5d59049e9ff7a7c78464a3121d9c237f5fccfe2bce3d782"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_check_index_check_index.if_np_isnan_dimension_.None_5.raise_IndexError_msg_i": {"doc_hash": "8e21cfa2cfe6bce776ffde020abb3dfa6830e77e2304a0294753b647d864d92e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_slice_with_int_dask_array_slice_with_int_dask_array.return.x_tuple_out_index_": {"doc_hash": "6b59ac04ce82c08eadce8d5e63b9bdc0e58fdd519fd5ee07c408033fb62464b4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_slice_with_int_dask_array_on_axis_slice_with_int_dask_array_on_axis.return.y": {"doc_hash": "62e1facab8cbe789d0a451d3a72e21139ddd6a2d6c252db2581ee27fbcd3b068"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_slice_with_bool_dask_array_slice_with_bool_dask_array.return.out_tuple_out_index_": {"doc_hash": "d1d7db64c8cfd30be9b5f3618395a487308152229d853e6bcb9a2e87eb6a45ef"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_getitem_variadic_make_block_sorted_slices.return.index2_index3": {"doc_hash": "70591c77dc36e8c49cb829c55d917babf58765ef4c226c0d0583734e87204cf2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_shuffle_slice_shuffle_slice.with_warnings_catch_warni.return.x_index2_rechunk_chunks2": {"doc_hash": "929db5cfd80ddb6aa6a9b38c45d060851fc85a88ed4a11fd6285daf297fcd539"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py__HashIdWrapper__cumsum.if_initial_zero_.else_.return.tuple_accumulate_add_seq": {"doc_hash": "273260da3228d358d0277fa6e8df99c626f2790bd8e066fadfb534d6a736fb4d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_cached_cumsum_": {"doc_hash": "77ee85e6c4584fb7dbcde70ff7f743fb4e203110c9552e8b521163c7d9f328e8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_____all__._": {"doc_hash": "1367473a88fcde18fcff133f6d0c96cbc85440e7ffb709475d224126ce3f7244"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_None_21_ttest_ind.return.delayed_Ttest_indResult_": {"doc_hash": "51e1a377d6b905215ad8ab5d13aac975ac71a843235dc5081ceced3408d76da3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_ttest_1samp_ttest_1samp.return.delayed_Ttest_1sampResult": {"doc_hash": "d4e8361beef68569557b3aa1be59f80a9de9e4bcc0e122d51aa1a62de9d1c18e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_ttest_rel_chisquare.return.power_divergence_f_obs_f": {"doc_hash": "b77d275fa500050aa6cdb6684f323bffff180758cfc94caa0ba65cf2523ae195"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_power_divergence_power_divergence.return.delayed_Power_divergenceR": {"doc_hash": "3928d73e9b6377412e93a23d33ac71b303c5dc06b23685b18b604835d267a9c3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_skew_skew.return.vals": {"doc_hash": "8eb0859f8238b0fcbbe123384a0e2b45e7226d23dcbaa093018b77653927b0f6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_skewtest_skewtest.return.delayed_SkewtestResult_n": {"doc_hash": "9c8f4a69c3562f111ae5280d0ed64830204e9f589cc8fb03797bdba90047a021"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_kurtosis_kurtosis.if_fisher_.else_._TODO_scalar_vals_va": {"doc_hash": "2689e80894c8805bffcf7b8b65ac15070be411c422c80e406781a4d2bf802e2e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_kurtosistest_kurtosistest.return.delayed_KurtosistestResul": {"doc_hash": "0b59eca99b261f2a6dc4ec2c68c1384ef33319a43b3489b3eeae835ae033ca91"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_normaltest_normaltest.return.delayed_NormaltestResult_": {"doc_hash": "af267ce6482e3919de99b4314f86e6202e0db8ec4f292cdcbf67c81df19d2d80"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_f_oneway_f_oneway.return.delayed_F_onewayResult_n": {"doc_hash": "df625c0515093a0476e747d82a1b7e0d9798d3a66b392bfe66c8b7fdf4607dfe"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_moment__equal_var_ttest_denom.return.df_denom": {"doc_hash": "f6f0bdbb127c6fe177801091b6efc823a3dc679e0de9c42586a4771c79013fd6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py__unequal_var_ttest_denom__unequal_var_ttest_denom.return.df_denom": {"doc_hash": "96ac0555c2944ec8a4184c5b437190ad34cee52c2bd8165af1d472b014df0c60"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py__ttest_ind_from_stats__count.if_axis_is_None_.else_.return.x_shape_axis_": {"doc_hash": "fbcffe68fad2e72f090cdbf505c3a2109180958dea1eb3555a70628fad3344f3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py__sum_of_squares__sum_of_squares.return.da_sum_a_a_axis_": {"doc_hash": "c3897ac60c2619436f60a2545427f0c0c01b2a4b361d49c017290775fa390d6c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py__square_of_sums_": {"doc_hash": "969dd22840e2a94c8e305de702549d69687e669af6a110b8cab9623be9ded6e4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/svg.py_math_text_style._font_size_1_0rem_font_": {"doc_hash": "568abfa65255b948ff400d51c1d3d08533b5ecd27d938e74a490df4a7a0ac453"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/svg.py_svg_2d_svg_2d.return.header_n_join_lines_": {"doc_hash": "92751f7293674586a471f49e1c070413390466bba33acc966a4ce9ab53ded1d4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/svg.py_svg_3d_svg_3d.return.header_n_join_xy_z": {"doc_hash": "16d6359666890362e69525b3c6e02c85c46abf8b24703440c440557996afbd6e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/svg.py_svg_nd_svg_nd.return.header_n_n_join_out_": {"doc_hash": "dd5a4d00c7b6c834f800d2956bcd7e968f08cfa0f52a4322e4060b6a3f648c59"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/svg.py_svg_lines_svg_lines.return.lines": {"doc_hash": "78fadd4c453ed18dc3366ca1ece3aef3f470aab71a3f7430071f9cf6ed31372c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/svg.py_svg_grid_svg_grid.return.h_lines_v_lines_rect_": {"doc_hash": "73db723f9082024545f451b8ef9ed0d23d2197b12be41b0a954e3d51c20c0bc8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/svg.py_svg_1d_draw_sizes.return.tuple_size_r_for_r_in_r": {"doc_hash": "797dbc8db169120c11c2a992ba7ac8471b481076c9b7a15e98bfb3b8dc361eb9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/svg.py_ratio_response_": {"doc_hash": "42310fc68f08be8b95e3387bc67318d65b1f80a4c22e9d06ebe93be6af59f566"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_copy_from_numpy_import_nancums": {"doc_hash": "9bb06d974370ae24bb3e9eff647ca0914b69eab8f24951bd2b7e21cea8d3b1ef"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_getem_test_getem.assert_getem_X_2_3_": {"doc_hash": "4eb8ee6bcc2493bdf5d71385d3d5493459fc2f40b061502b2e4e37c11416cf80"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_top_test_top.assert_top_identity_z_": {"doc_hash": "6c27fa0bc4fbb025ba9120e123615658d9fda81c799cc953c4f971b70f2e9723"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_top_with_kwargs_test_top_supports_broadcasting_rules.assert_top_": {"doc_hash": "25c7e5fd86dde49a643ee0cc9ea5570dffeb906ca4793ec1eeaf445d4277a710"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_top_literals_test_top_literals.assert_top_add_z_ij_": {"doc_hash": "4c49fae161451ffe90f0fbcbb5137f8968807e21c2d2c0c9f1c84fcbd6280074"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_blockwise_literals_test_blockwise_literals.assert_eq_z_x_": {"doc_hash": "68b24f134c9cb0924c05564412c1746c80cc048f53b6cc85b000fe6abb0e28e7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_blockwise_1_in_shape_I_test_blockwise_1_in_shape_I.da_blockwise_": {"doc_hash": "5535934c291d75dc1fd53e6e30983f6be81508a331ca7b7a7fb62505a6d4328e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_blockwise_1_in_shape_II_test_blockwise_1_in_shape_II.da_blockwise_": {"doc_hash": "ac30dbf12443d06d2ce6d2c00b908444511130cf2c39d67fe411cc3d1c8c0389"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_blockwise_1_in_shape_III_test_concatenate3_on_scalars.assert_eq_concatenate3_1": {"doc_hash": "e5d9c3594a253766930ccbcebf1362b2fd7db74abc315192c5c0fd6f7601cca0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_chunked_dot_product_test_chunked_dot_product.assert_eq_np_dot_x_o_c": {"doc_hash": "3bd4bea172bc822772e6bec7f77346b6364e719ee4c38e5b27ad19f4aec95583"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_chunked_transpose_plus_one_test_chunked_transpose_plus_one.assert_eq_concatenate3_ou": {"doc_hash": "7e116225ac26168be0f64841f2b4eb4a73797bae33a3abc64acd2b4f78b47a05"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_broadcast_dimensions_works_with_singleton_dimensions_test_broadcast_dimensions.assert_broadcast_dimensio": {"doc_hash": "33cc48d6c640a191c9b0e167587bcf0e733bbf4eef38bb61c9c634895e37f4bc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_Array_test_Array.with_pytest_raises_TypeEr.Array_dsk_name_chunks_": {"doc_hash": "a81544596fa96ae9d6914f7ca857b0cb9c1a7d0371ff4cda4fcd1093a0835806"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_uneven_chunks_test_numblocks_suppoorts_singleton_block_dims.assert_set_concat_a___das": {"doc_hash": "fc07a47cf6c8b8cc9f748a3ae29f04a04cf6eb254e0607e1acee37596a6ea621"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_keys_test_keys.assert_d___dask_keys___": {"doc_hash": "34cad0e9564d4e0e39cfb09961fc070db729ff2c1071841de445a645d8edcd5d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_Array_computation_test_Array_numpy_gufunc_call__array_ufunc__01.assert_eq_ny_vy_": {"doc_hash": "3a0b038343c42d2fccec46893eb826b7e839944993c164da2ef649ecff89031a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_Array_numpy_gufunc_call__array_ufunc__02_test_Array_numpy_gufunc_call__array_ufunc__02.assert_eq_nv_vv_": {"doc_hash": "867b11eb2d83c0993a7419396a7859159cd42ffb38e4f6e070de7ffe47f272fb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_stack_test_stack.assert_stack_a_b_c_a": {"doc_hash": "d688eb649cd27cf5b605e902451bf1cd22896db635bdec58cab14b4bc3c29ac3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_stack_zero_size_test_stack_rechunk.assert_eq_z_np_stack_x_": {"doc_hash": "e6d227571f136cecf1a6f8252ccd546cc1d78526d5fb091c0536b570bcb43ab9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_stack_unknown_chunksizes_test_stack_unknown_chunksizes.None_1": {"doc_hash": "7cafdeb38f022d3af361221c8544c8a0f9806901e2f1c73a107b81d4f93112d4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_concatenate_test_concatenate.None_1": {"doc_hash": "1898ae135ae2ea103ee7bb216ff922ccb2181ef2602643cd232e24680ea3f561"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_concatenate_types_test_concatenate_types.assert_x_dtype_dt_out": {"doc_hash": "fdfdae7ac8f135b4992fd9ef479ab2de5269569cd39a6aa326bac70e10b80a34"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_concatenate_unknown_axes_test_concatenate_unknown_axes.assert_eq_c_x_np_concate": {"doc_hash": "a9f8d430777ccfa04ebb6f8f1a1ac6a1a32d15ce2844c9b37a5f27c3fc9abf1e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_concatenate_rechunk_test_concatenate_rechunk.None_1": {"doc_hash": "a93175738a89d11a26e2b2c321c8e8429ccdfdeeab404ee78319485dbf12dab1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_concatenate_fixlen_strings_test_concatenate_zero_size.None_2": {"doc_hash": "b262f7d17f3ebd44bf07f579ae7dc15f1540b09771e61a4ffe19c0fa41b2f7f5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_block_simple_row_wise_test_block_simple_column_wise.assert_eq_expected_resul": {"doc_hash": "ce84eec631d8d5f37fb80e4106896de15d3f718c9ee4085bc0a463282d3927dd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_block_with_1d_arrays_row_wise_test_block_with_1d_arrays_row_wise.None_1": {"doc_hash": "9aa3ace6f643f5e0d8362989ca882b35cfcf3feb8f9d221eb2243a321a455e43"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_block_with_1d_arrays_multiple_rows_test_block_mixed_1d_and_2d.assert_eq_expected_resul": {"doc_hash": "b44b02f206390a00895de5058a7a7b95fda3094394e5a750c69c9c0547a51235"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_block_complicated_test_block_complicated.assert_eq_expected_resul": {"doc_hash": "72fc40fab684c3183ebe0e30ab311290d5e8a7a917e4ee39c22ace19cf5c06cf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_block_nested_test_block_nested.assert_eq_expected_resul": {"doc_hash": "425b273eb40b8bdbb089d3e762bbd0185fad80dc923e81141f749e6b6e7f57a3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_block_3d_test_block_3d.None_1": {"doc_hash": "ed198690448c60913024f931f92059d1836b625acdfab22acd18eed4af1916de"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_block_with_mismatched_shape_test_block_tuple.for_arrays_in_1_2_.e_match_r_tuple_": {"doc_hash": "79d42c66e8139a5fb9917f568656bb42b9a6645c1b276858ba8fcce61457097e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_broadcast_shapes_test_broadcast_shapes.None_2": {"doc_hash": "a28ca2388afaae56ce8f3fd56a65fb210c29e139b1cdb9e754030b5942f519a9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_elemwise_on_scalars_test_elemwise_on_scalars.assert_eq_x_sum_y_a": {"doc_hash": "adfdfa8ae3f5849db20e47f6c33644f9301eb7ea578908e9125a8243d6be2553"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_elemwise_with_ndarrays_test_elemwise_differently_chunked.assert_eq_b_a_x_y_": {"doc_hash": "ff1f8de07278b0451ff52188cd6b0ee7004e9dcc3e97881768fbd3ecbc81de49"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_elemwise_dtype_test_elemwise_dtype.for_x_in_values_.for_y_in_values_.assert_da_maximum_x_y_d": {"doc_hash": "68bbf335477098eb76b9d06670d2eeb43a062b7ff6845650879fb583393e491d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_operators_test_operators.assert_eq_a_x_": {"doc_hash": "f06389631295f0fe1a9c6a7ce17d2efba664ecd21d6c8073edafc11b60356519"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_operator_dtype_promotion_test_field_access.assert_same_keys_y_b_": {"doc_hash": "1ceb51eb8f145c63105654430c14cf3c846203cd6fe4c61427fb2703759c389f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_field_access_with_shape_test_field_access_with_shape.assert_eq_x_col1_col": {"doc_hash": "5fa8b9c63c7ade586cdc38ae684f05a2269b82dcf53ac5e7a17fc80640f953fd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_matmul_test_matmul.None_6": {"doc_hash": "79fcf8ec979ab71cec61dcfe68e10ba721ce246e1e0883dd711251a8a3cfdaf0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_matmul_array_ufunc_test_T.assert_eq_x_T_a_T_": {"doc_hash": "0a10e8e45b3512ecc797604e1c3c2bb622be47acd42c6aee7aa7cc8200c0e966"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_broadcast_to_test_broadcast_to.None_1": {"doc_hash": "59376d8a183574c490afd4a3a81e044b2534baced3666bf92600644fc323583d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_broadcast_to_array_test_broadcast_to_scalar.for_shape_in_tuple_0.assert_eq_a_d_": {"doc_hash": "72312879edf10eef373a4449e50d74e4d6bc9bef2f23baabb2ed6ad5bb904458"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_broadcast_to_chunks_test_broadcast_to_chunks.None_3.broadcast_to_a_5_2_6_": {"doc_hash": "cc6c6a0afd4a02806ff427becc781158592182886f264d27a313607b6a3b8a10"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_broadcast_arrays_test_broadcast_arrays_uneven_chunks.None_1": {"doc_hash": "9550c24b47a8c3d8aa8287b65191cc61cbf64ec17fbb67c48b3f2324f0c4dcd2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_broadcast_operator_test_broadcast_operator.assert_eq_w_d_w_": {"doc_hash": "366932026ef56ec1024074d438d5618d48de6a3fbdec92e3f46484906ea20e46"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_reshape_test_reshape.assert_eq_xr_ar_": {"doc_hash": "5e30910485ac725c126986327e6ead62e44c3ac3b2136e3eeabcd80d2a89c09d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_reshape_exceptions_test_reshape_fails_for_dask_only.for_original_shape_new_s.with_pytest_raises_ValueE.da_reshape_a_new_shape_": {"doc_hash": "e791e623064bfa497fb9bc1f866181ce664dd1078b61832c5de19ba1c3700202"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_reshape_unknown_dimensions_test_full.assert_eq_d_np_full_3_": {"doc_hash": "bc15a9a084b48cd0b9a148001cce37e3a121d77c3170474299eccc65339c0537"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_map_blocks_test_map_blocks.None_2": {"doc_hash": "f8b90a7959e1a4616fec0b3f2d1723cba4493e11fa769ad908db65ff1dcc78f4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_map_blocks2_test_map_blocks2.None_1": {"doc_hash": "31df0d34ae2a2be09cec913b0cefd15acc3e06df9dc0ff58cdb859258d4fafe0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_map_blocks_block_info_test_map_blocks_block_info.assert_eq_z_x_x_1_": {"doc_hash": "daf70acdc58cef9e7a033a4a81afc149fc822582a32bf0861f5fb57f276e5468"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_map_blocks_block_info_with_new_axis_test_map_blocks_block_info_with_new_axis.assert_eq_z_np_ones_4_": {"doc_hash": "ce6e9a9934f3f4cb81d85d3c8b2732767f8601ea1c37787d671d1019f3771e88"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_map_blocks_block_info_with_drop_axis_test_map_blocks_block_info_with_drop_axis.assert_eq_z_np_array_7_": {"doc_hash": "ce07a1379426de86df890bb0f2ae33b3329314f3ec836b24fe7c7b1b004e825f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_map_blocks_block_info_with_broadcast_test_map_blocks_block_info_with_broadcast.expected2._": {"doc_hash": "ae0f0679f207b9b7a882e5b12ba290f2c3b84b07f2e5d3896c89852c3c460200"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_map_blocks_block_info_with_broadcast.expected_test_map_blocks_block_info_with_broadcast.assert_eq_d_3_np_ones_": {"doc_hash": "3783c146147b1aac1bc671c7f20de3057f94afb0fa64808d163d64f036db4d4e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_map_blocks_with_constants_test_map_blocks_with_kwargs.assert_eq_result_np_arra": {"doc_hash": "4ee4835d0efbc9c13a921b700911b485aee40e623d93fafcf3ce65cf185fcb88"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_map_blocks_infer_chunks_broadcast_test_map_blocks_with_chunks.assert_eq_dz_np_ones_5_": {"doc_hash": "2add0acd83a37b348fec6044cc6225ab6a1e6e1e3152b35a4ab1d5001479e470"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_map_blocks_infer_newaxis_test_map_blocks_no_array_args.assert_eq_x_np_arange_8_": {"doc_hash": "79b1e4c9cc00ea162b8502aa7c78e5b4c32e0a0ef2b10be484b65c89c3f81691"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_map_blocks_optimize_blockwise_test_map_blocks_optimize_blockwise.assert_len_optimized_laye": {"doc_hash": "12b17d7041be74897cd9280d72076c444318ee7383092a374a5a335161f00a7d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_repr_test_dtype._no_shape": {"doc_hash": "fbe037a3c3860b57e464c66ac8e61ecb460df6d9836ca09d639c64b95183b10d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_blockdims_from_blockshape_test_blockdims_from_blockshape.None_2": {"doc_hash": "90d6fac0bcb12dc8a42d0e7d0fce3a3f47021487919239707d19c6ebc214b1c5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_coerce_test_bool.with_pytest_raises_ValueE.bool_darr_darr_": {"doc_hash": "451c19cb249849a1c72309879fa238440d0c081191c3393b400a4354e00ea195"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_store_kwargs_test_store_kwargs.None_5": {"doc_hash": "6d84d627b88d292ae98e508b5e1088c33fb8783e07cb028a581d7dda8eddcffd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_store_delayed_target_test_store_delayed_target.for_st_compute_in_False_.None_7": {"doc_hash": "af1f2222523acb7dcf4c09994c77891b93fc1afd08a0a214e828e51ad4372db4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_store_test_store.None_2": {"doc_hash": "8b58d8625006f2f0aed72fc576517511a0dc708c29dfefd7cd28c55acc62e937"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_store_regions_test_store_regions._Multiple_regions_keep_": {"doc_hash": "f09870380f5cf717c9b8285d20912b17f74e0726ecd4a1805869426c1971291f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_store_regions.None_1_test_store_regions.None_1.assert_ar_2_all_": {"doc_hash": "749f9fc8d9f5593bb97db240bead472c873decbad139c4d237f592ca7426fba2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_store_compute_false_test_store_compute_false.None_9": {"doc_hash": "b7777e93deb1f5cc10e7eec3d285f144f0c9ba08c3bdfd1eedbcfd962bb95ba0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_store_nocompute_regions_CounterLock.release.return.self_lock_release_args_": {"doc_hash": "1b293e9a2a900c8dfae0abe515dadab445dc31be93c8c2430632c97b99d9ff8f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_store_locks_test_store_locks.for_c_in_False_True_.if_c_.else_.assert_lock_acquire_count": {"doc_hash": "673c2687ac643e78a274b0cbfcb56aeac07a578a93ef311829544e619f5fe3f5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_store_method_return_test_store_multiprocessing_lock.assert_st_is_None": {"doc_hash": "5cd87234b9f4668c0aec91cf2559cd3c7660a2d70e35a3fb99eb4f7c2e945e94"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_to_hdf5_test_to_hdf5.None_3.with_h5py_File_fn_mode_.assert_f_y_chunks_": {"doc_hash": "4919bb9844d8add0ed895468cca2e95a1045987ae04d854650acda46938e6b3b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_to_dask_dataframe_test_np_array_with_zero_dimensions.assert_eq_np_array_d_sum_": {"doc_hash": "1553d36ee72ffe89f4962ff1bf50dc09eceb589c8d70e41ec1d3f68b47657946"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_dtype_complex_test_dtype_complex.assert_eq_d_numbers_": {"doc_hash": "a50183a03392c1c53bb76397068d9ba508051bac4bcdbaec2c0543457af97ef8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_astype_test_astype.assert_d_astype_f8_is_": {"doc_hash": "143aa74287191a55db29fcd540956f724c8427cfda638e25b7c6f1bf9f06bdcf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_arithmetic_test_arithmetic.assert_eq_da_log10_a_np": {"doc_hash": "592a6fd470e44c29d3790c8c89ced914c1ea3c5a95fb2f3b9420281099410bb3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_arithmetic.assert_eq_da_log1p_a_np_test_arithmetic.assert_eq_da_ceil_a_np_": {"doc_hash": "0360f401fcd4b8e0241fd7f239264848394bfd52a955c16d015d63442d884e8e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_arithmetic.assert_eq_da_trunc_a_2__test_arithmetic.assert_eq_da_around_a_1": {"doc_hash": "c08e7459ed029fe1cf2df3ff31f1ce8e456089e8dd4b15835763d4cdcbfc45e2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_elemwise_consistent_names_test_optimize.assert_all_key_in_result_": {"doc_hash": "e49f9a0baca82d8e5fe8c63efbcc4cb8231b09b3f72d3aa89f7270f941f64074"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_slicing_with_non_ndarrays_test_slicing_with_non_ndarrays.assert_eq_x_1_sum_": {"doc_hash": "b1d9e6459d95708accc4ba3dac9da56c9510bd382a583e5c1bb33e2456944146"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_getter_test_getter.assert_eq_getter_np_arang": {"doc_hash": "076c8cf14728f28dceb4b9e8d0e0aee48c66e4d6a9b24fbb12e5486408dd844c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_size_test_Array_normalizes_dtype.assert_isinstance_x_dtype": {"doc_hash": "e67beda188450a0dc30777eca1d22508885c61cd3ea46d24210e092a78b355c0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_from_array_with_lock_test_from_array_with_lock.assert_eq_e_f_x_x_": {"doc_hash": "3f731fa5e2793dba433d50016abb3625079a4b480c2d49d7af2b09781a667b38"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_MyArray_test_from_array_tasks_always_call_getter.assert_eq_x_dx_": {"doc_hash": "46edde344f5e43dcabae2076d4945ced74dcb5d4e42a1ecaad236548050097d6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_from_array_ndarray_onechunk_test_from_array_ndarray_getitem.assert_dx_dask_dx_name_": {"doc_hash": "00256910170d3f195227e24779cf786f0ec1b6442538ae3e073c54e43e0ab709"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_from_array_list_test_from_array_list.assert_dx_dask_dx_name_0": {"doc_hash": "1493ad3dc8a79fb60034a6d24b7ff6e0cbea07ed0db757eb4090f2b352f58d66"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_from_array_scalar_test_from_array_scalar.assert_isinstance_": {"doc_hash": "4f1dfbb8e502f4b74c2495cecc4965846e545169f0a99e643d18c8541ace3305"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_from_array_no_asarray_test_from_array_no_asarray.None_2": {"doc_hash": "fa88b0fd55f978a03ff3aac00108450c639f8ed5c6016fe3c157291a4832bd98"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_from_array_getitem_test_asarray.assert_eq_asarray_y_x_": {"doc_hash": "e3e940e17a0c43d8191d62deae5aa1d11cfa536c0af5785d5270f25a34e8c084"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_asarray_dask_dataframe_test_asarray_dask_dataframe.None_1": {"doc_hash": "702c0cde08e97ed1cf70df7748f22b5f0c703383220588dba8771c7528f74f21"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_asarray_h5py_test_asarray_h5py.with_tmpfile_hdf5_as_.with_h5py_File_fn_mode_.assert_not_any_isinstance": {"doc_hash": "8b8403024e8bf286dadf1c5535336b8bdc259b6c04ec7784004b5136119b1778"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_asarray_chunks_test_asanyarray.assert_da_asanyarray_dx_": {"doc_hash": "de19d9e4fb1c9afc3d53cf9db82d3726d22b26109442c32f11d21463d30b0452"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_asanyarray_dataframe_test_asanyarray_dataframe.None_1": {"doc_hash": "2c5cb1e57d77d6d6a009b82ff37b3c9966e37974b98ad923ba28267229809fc0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_asanyarray_datetime64_test_from_func.assert_same_keys_d_from_": {"doc_hash": "714f621048c69f9d23d74fdb0a02ef31bfbd2442c02820f3fed35b3d4d6dfbf5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_concatenate3_2_test_concatenate3_2.assert_eq_": {"doc_hash": "6ccef6720f67ab13b13ddd168a6431b25025b588876edec344ed43a96b2211a6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_map_blocks3_test_from_array_with_missing_chunks.assert_d_chunks_da_fro": {"doc_hash": "f3577e8ba1afa43e01fdab535c8d7ef35ca7ae4c7f5c4a30e9be9563e93d9dff"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_normalize_chunks_test_normalize_chunks.None_1.normalize_chunks_5_": {"doc_hash": "55bc54426b1d00cdd9c6bdfdd759abe8ce215e144efe3559feb79792f149e917"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_align_chunks_to_previous_chunks_test_align_chunks_to_previous_chunks.None_5": {"doc_hash": "6d6caa5425cc99d3075a2b5bd98f82245c04fb90febd2135da994dcc6b0ec1e6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_raise_on_no_chunks_test_long_slice.assert_eq_d_8000_8200_x": {"doc_hash": "65ef49a093f3f02ef90e858d803c7ebdd572977923e3eb1a1be4d4f33d6bdad1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_h5py_newaxis_test_ellipsis_slicing.assert_eq_da_ones_4_chun": {"doc_hash": "f45f69028d83f2879024c848f11b2fc16d57365e426f0102f0f943b2bc56793d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_point_slicing_test_point_slicing.assert_same_keys_result_": {"doc_hash": "0e537a8bb0842de638eec7f7fb0e009adfa32ad702344988a57826051a3ebd63"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_point_slicing_with_full_slice_test_point_slicing_with_full_slice.for_ind_in_inds_.assert_result_shape_0_": {"doc_hash": "961a4c425c612cef6dcc3cd121e8d7d6953a76058b42e440e614ef6d4ff12eb4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_slice_with_floats_test_slice_with_integer_types.None_3": {"doc_hash": "452fbdbf445878105d423d77ebd0622964275e88435e95fdaa4e5b00c9f8a41a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_index_with_integer_types_test_vindex_basic.assert_eq_result_x_2_": {"doc_hash": "d6d3a7dff9bba7d8417c180c3ad3503383fc87a7ea02824509cf3ee6d4996740"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_vindex_nd_test_vindex_nd.assert_eq_result_x_T_": {"doc_hash": "c62ccf2a1dd080a181de9853cc9779d2a6b81942168917ad5f5a986c06b0c9f1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_vindex_negative_test_vindex_errors.None_4": {"doc_hash": "cbf598d64ee96a10b41aa632b2bf6fe9c2605461a724f7bb3b4bf9e9d7867f79"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_vindex_merge_test_vindex_merge.assert_": {"doc_hash": "cdf37c2914800c6d27c5a0beede4c88891bd901bb1cc4629138091c339b3a759"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_vindex_identity_test_vindex_identity.None_5": {"doc_hash": "a077ac7d1e0e8d9293d352296b0b893f68ab059ef6d60cfdd6747303cb4815bf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_empty_array_test_memmap.with_tmpfile_npy_as_fn.with_tmpfile_npy_as_fn.try_.finally_.target__mmap_close_": {"doc_hash": "403413ef8c4231be3fe090c4f48185c553ee06c6f41b6bdc9421159094e885cf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_to_npy_stack_test_to_npy_stack.with_tmpdir_as_dirname_.assert_eq_d_e_": {"doc_hash": "d32dc2248ab6953e71bd7bb8a8b6b14529e84e13c375af1565b6df0f42a2f01b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_view_test_view.None_1.d_view_i4_order_asdf_": {"doc_hash": "cfb2488d7274b05a1e477cfcf6af2bcb206dac55f9f9170296788f241a6867fd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_view_fortran_test_h5py_tokenize.with_tmpfile_hdf5_as_f.with_tmpfile_hdf5_as_f.assert_tokenize_x1_to": {"doc_hash": "ecf4643c06c00279b5b1a97e9f8f4d30b3f48c008c22cf32d335d507584e69c0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_map_blocks_with_changed_dimension_test_map_blocks_with_changed_dimension.None_5": {"doc_hash": "f3ab7db40ada16aeb4ff63fb6f9c841327a3464898e4b6d1a30435de96138a48"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_map_blocks_with_changed_dimension_and_broadcast_chunks_test_map_blocks_with_changed_dimension_and_broadcast_chunks.assert_eq_result_expecte": {"doc_hash": "c1c2abb7ed0e3bb1302fd1412fac6c1d872668cbcc814318e3b22b8ef86cffdf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_broadcast_chunks_test_broadcast_chunks.None_1.broadcast_chunks_a_b_": {"doc_hash": "bcf7561e54a1a5e8075fca9c49e6764a1983ef19e8733841b8111082937d60be"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_chunks_error_test_dont_fuse_outputs.assert_eq_a_np_array_1_": {"doc_hash": "9655b911531cc034221a7c741f60bdb63328c60909f27e7c9c7b18168b8f4007"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_dont_dealias_outputs_test_dont_dealias_outputs.assert_eq_a_np_ones_4_": {"doc_hash": "bd8944e8273663dd283b217cca8bad793ce2eb96d8c7bc6afad18c4e3633578e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_timedelta_op_test_to_delayed.assert_a_compute_s": {"doc_hash": "27d8183d6e51efda856953bbd30192b8e3d3e9bb328cc6f9c1e5ab49c2654e89"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_to_delayed_optimize_graph_test_to_delayed_optimize_graph.assert_d_compute_d2": {"doc_hash": "e71259d450fcd69bd99c0fb4eb3a0a4bdb2182273f4eb515947aa6147bb474d1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_cumulative_test_cumulative.None_3.x_cumsum_axis_4_": {"doc_hash": "ed40c9816da2733eb309883fc112a4166c7c5052c8581d6fc80705808dc0b88c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_from_delayed_test_A_property.assert_x_A_is_x": {"doc_hash": "bfd76df0ef13182674719ce2518e3109b40c40fb4fecd0682d3c08d11ac43022"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_copy_mutate_test_copy_mutate.assert_memo_id_x_is_y2": {"doc_hash": "8dba4f582fed14cb30339417c75bfd3f89169aca77c75abe9a5f052c37559162"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_npartitions_test_from_array_raises_on_bad_chunks.None_1.da_from_array_x_chunks_": {"doc_hash": "7754142dd5e15de1f126817301120fc0c2dea876755e568875c1c84ad7de4f44"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_concatenate_axes_test_concatenate_axes.None_1._too_many_axes": {"doc_hash": "a88f203f2e629c9e676df39a94bfb91a5199f29c2049d015d2359032e80f96de"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_blockwise_concatenate_test_blockwise_concatenate.assert_eq_z_np_ones_10_": {"doc_hash": "728f62ef1f3bb31df72515d605892b2a7be932cc4871bc406927dfc2fd065c89"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_common_blockdim_test_common_blockdim.None_6": {"doc_hash": "c838984d6a9426a6ad31a7fab676729c09e03b9eebfd0b2eec70158d7a7090d6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_uneven_chunks_that_fit_neatly_test_elemwise_uneven_chunks.assert_z_chunks_2_2": {"doc_hash": "d98f9d1b3c7c9560539f165b47e32a93eb5040daf058678dbc8ade8826800b7d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_uneven_chunks_blockwise_test_uneven_chunks_blockwise.assert_eq_z_x_compute_": {"doc_hash": "7945a96514fedf8e17d3cde32e586eb510a38a91498a8f20a77a7d45af0f8202"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_warn_bad_rechunking_test_map_blocks_delayed.assert_yy_key_in_zz_dask": {"doc_hash": "0426978cbdaee3b48adf2e5e975a93074097ee3fdbe39c6cfa93cb3c7c8f0ed2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_no_chunks_test_no_chunks.assert_eq_x_x_std_kee": {"doc_hash": "40f5ab49514b1900604dca9f4b7a002da577ad33fbd9051f0387ce1873e35bef"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_no_chunks_2d_test_no_chunks_2d.assert_eq_x_dot_x_T_1_": {"doc_hash": "ed8ca01ab07aabef5eb44365c103b96e8016cc8e15efd7922e7c3aa785601422"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_no_chunks_yes_chunks_test_no_chunks_yes_chunks.assert_x_dot_x_T_chunk": {"doc_hash": "f3ff9f9f66dba3e039a6208dccfec67b8feeee25ef6106660e2fd7d3ff3231ed"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_raise_informative_errors_no_chunks_test_raise_informative_errors_no_chunks.for_op_in_.if_chunk_not_in_str_e_v.op_": {"doc_hash": "75b198c542e4db64af3e0ab5d7878dce4e698d8b011eaa5af94be61c2a9ce59c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_no_chunks_slicing_2d_test_no_chunks_slicing_2d.for_op_in_lambda_x_4.with_pytest_raises_ValueE.op_": {"doc_hash": "4682ff178a047891a787bfad3edc66c43663e96ec806e7046f0ccbd54f5487f9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_index_array_with_array_1d_test_index_array_with_array_1d.with_pytest_raises_ValueE.dx_dy_5_": {"doc_hash": "195baecaac2d4fc550d1c9aa2ba54e7fb58710dfacb72e0d141549aafa365376"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_index_array_with_array_2d_test_index_array_with_array_2d.assert_len_record_2": {"doc_hash": "881cb5f6d8035a59f456f7a202144eabbfde559dee1752e715ca3719c4a6380b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_index_array_with_array_3d_2d_test_index_array_with_array_3d_2d.assert_eq_x_ind_dx_": {"doc_hash": "4434d42ac2c6903a770aed667ea6dd831df0b27101390d93507a8fe024cd326c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_setitem_1d_test_blockwise_zero_shape_new_axes.da_blockwise_": {"doc_hash": "6f84c5c17e2efa3e5c09ac0371b249904eab372edf45a494f91084cebd0e762e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_broadcast_against_zero_shape_test_broadcast_against_zero_shape.None_5": {"doc_hash": "cc04392c8a9d6dddb8d5cdb16680debb77a37afba0143668786873c9c77eb74f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_from_array_name_test_from_array_name.assert_dx2_name_dx3_na": {"doc_hash": "0f7893ed1180fa611e65bbbd2c33bdfef4edf8bf6edb176b131ceccd62e94ea0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_concatenate_errs_test_concatenate_errs.None_1.da_concatenate_": {"doc_hash": "9970fcec6c0e338f6d360a0c46bec08d30107b17d6ca5df625b0e37b9fb874a2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_stack_errs_test_blockwise_with_numpy_arrays.assert_any_x_is_v_for_v_i": {"doc_hash": "c2780eaf8d8c508a4fc4f8a41a684a2b556092c90e1ad7e41fa58c272e2d23c8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_elemwise_with_lists_test_elemwise_with_lists.assert_eq_x3_d3_": {"doc_hash": "13c56071fa2e143b71550faf0111b91ba0f83e41cacb46ea549bfda5a40ac0e3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_constructor_plugin_test_constructor_plugin.assert_len_L_2": {"doc_hash": "098703174d1d1aa178439256ed1eb748ba762b58213eea0c1295c056adfcb049"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_no_warnings_on_metadata_test_meta.assert_a_nbytes_1000": {"doc_hash": "1fdf4573461c8a0e411e1bb473f79c18290b381139e7574ef7dbc4fd58611a11"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_normalize_chunks_auto_1d_test_normalize_chunks_auto_1d.assert_result_expecte": {"doc_hash": "35ce565cc7680b3b01873b596396af6e85c76edd2af764874ea4b1fa03ff1630"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_normalize_chunks_auto_2d_test_normalize_chunks_auto_2d.assert_result_expected": {"doc_hash": "757325413fa9718a21940e30ba27c1ca6aeaaa4804b8e2889b1bb844409048c7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_normalize_chunks_auto_3d_test_normalize_chunks_auto_3d.None_1": {"doc_hash": "16d9c8c738a46cb25f10e4d96859e403093ff322bd9c3c0383ed7e70ba3bb415"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_constructors_chunks_dict_test_zarr_return_stored.with_tmpdir_as_d_.assert_a2_chunks_a_chu": {"doc_hash": "6fbb7b9ffb288c078da08596f4d5cbdf3e4f0c2ce7e31c4977098a896d032a15"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_to_zarr_delayed_creates_no_metadata_test_zarr_pass_mapper.with_tmpdir_as_d_.assert_a2_chunks_a_chu": {"doc_hash": "1ad11b485c0d9dd870b29be8e46048ba77d9edaf6b0df7f698bccab9d3293111"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_zarr_group_test_zarr_group.with_tmpdir_as_d_.assert_a2_chunks_a_chu": {"doc_hash": "cfb024268e1bf380ee91c0388a56d699cc15d69fa7b31807dc897a5333211890"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_regular_chunks_test_zarr_nocompute.with_tmpdir_as_d_.assert_a2_chunks_a_chu": {"doc_hash": "f52a15be64f40a05f846628d313b2b94f36aa2300725defb9d04db1ae2968df8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_tiledb_roundtrip_test_tiledb_roundtrip.None_2.assert_a_chunks_tdb_ch": {"doc_hash": "7c27d951e29eab84c1d9ea4d785290e26873b4d08ea7ce7a8542034da00d485c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_tiledb_multiattr_test_tiledb_multiattr.with_tmpdir_as_uri_.assert_eq_np_mean_ar2_d": {"doc_hash": "e3912ac98bf6a7ef0bff4856ce639b9de3dbee09f22cce1ba454d937362d560a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_blocks_indexer_test_blocks_indexer.with_pytest_raises_IndexE.x_blocks_100_100_": {"doc_hash": "08c1b9833168c92f2214819678e46813212dd65d12ea7951184ec668624d7564"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_partitions_indexer_test_partitions_indexer.with_pytest_raises_IndexE.x_partitions_100_100_": {"doc_hash": "3004e3c878f859f051b6adfec96e39909ffba56ea2ced32ed67abd35299438f5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_dask_array_holds_scipy_sparse_containers_test_dask_array_holds_scipy_sparse_containers.assert_zz_xx_T_all_": {"doc_hash": "09fb68edba1a35fc1aa9c2fac352e081690242b9da2dd4cd092651451bfea726"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_scipy_sparse_concatenate_test_scipy_sparse_concatenate.assert_z_z_expected_": {"doc_hash": "9e2d91a2395819a84e6943c4215f9d0baf562e014b7537842104ea7f51e7ab75"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_3851_test_map_blocks_large_inputs_delayed.None_1": {"doc_hash": "0682c238b8146a046882c46691accc7a97766b5e0cba0268bc5a8e63db90d282"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_blockwise_large_inputs_delayed_test_blockwise_large_inputs_delayed.None_1": {"doc_hash": "93630030aa61bbb4cf7b58433ce217f3ef20fada0ad538506768859f4b7ebfdd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_slice_reversed_test_map_blocks_chunks.assert_eq_": {"doc_hash": "8d015b99e7f0bbd7e6da18897c6d1ab5fd1f07e6fcd1b4ba148154886580a463"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_nbytes_auto_test_nbytes_auto.None_3.normalize_chunks_10B_": {"doc_hash": "b4c6f83ff11842ae78265ad5970bc1c99d7ec6be527469072f43f85bd15d8390"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_auto_chunks_h5py_test_auto_chunks_h5py.with_tmpfile_hdf5_as_.None_1.with_dask_config_set_ar.assert_x_chunks_256_": {"doc_hash": "79ea819976daf427d51f47865292bd843273a7afa3b2f1aabdfd88b66a795e23"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_no_warnings_from_blockwise_test_no_warnings_from_blockwise.None_2": {"doc_hash": "c18f28305181f9de8d4b4141154a0467d75fe3bd0b2740d9b6a20e85906ee694"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_from_array_meta_test_compute_chunk_sizes.assert_isinstance_z_chunk": {"doc_hash": "9322a55d4eaf1d82c4e33dc7ce8a1bfce74ed18d4ff49702de368122d1953152"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_compute_chunk_sizes_2d_array_test_compute_chunk_sizes_2d_array.assert_Z_shape_4_4_": {"doc_hash": "0cdf1c3e15d323208f117e7cf9652841ad1c863ff68fcbdef101f86fd188fe64"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_compute_chunk_sizes_3d_array_test_compute_chunk_sizes_3d_array.assert_Z_chunks_4_4": {"doc_hash": "0fa8def731cf4982b96999fbfb37b2aec5e1440da384704418d6b02ff8ba80fc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py__known_": {"doc_hash": "75f36309f11d97af32d3df18de1de17937cbe96eb31d004e37963fa5f357f579"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_function.py_pytest_test_array_function_dask.assert_eq_res_y_res_x_": {"doc_hash": "994df497beb6230df086b58c11462e6894425aac36da1e4459a1977fb8f8fe75"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_function.py_test_array_function_fft_test_array_function_fft.assert_eq_res_y_res_x_": {"doc_hash": "988ce35cd4d8ea057e184a7eb7c2ddf5caf1a5d8891f1a020206e044f05d4b2a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_function.py_test_array_notimpl_function_dask_test_array_notimpl_function_dask.with_pytest_warns_.func_y_": {"doc_hash": "286d4963d725b550b97639ddd4a63d9431e5e9693c9b0472f4c4c00664f2c712"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_function.py_test_array_function_sparse_test_array_function_sparse.assert_eq_func_x_func_y": {"doc_hash": "bd0c5aec468ef9d47df9a6a1390d9face3487db579254f43d1d45910e7a25ec1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_function.py_test_array_function_sparse_tensordot_test_array_function_sparse_tensordot.assert_eq_": {"doc_hash": "84ebedcb8a0539c02337b039cba43a4853c7cbba7fca64744ee80e6ea8f07075"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_function.py_test_array_function_cupy_svd_test_array_function_cupy_svd.assert_eq_v_v_base_": {"doc_hash": "de3872a8fe12a61b66f132a69e7188acc2c66df2aa2df765d7f1445a8d505cf1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_function.py_test_unregistered_func_test_unregistered_func.assert_eq_xx_yy_check_m": {"doc_hash": "15a1a8121bd9ceb7b698d1c8b9ad3000afdeca830dd0cf8381675c28dcb50bfa"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_function.py_test_non_existent_func_test_non_existent_func.if_IS_NEP18_ACTIVE_.else_.assert_list_np_sort_x_": {"doc_hash": "5956372846fc5c5aea89a4579aa3f71caa99f87a8205f007ca04f8a6c7f4e989"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_function.py_test_binary_function_type_precedence_": {"doc_hash": "fc775b7eaf624fe6796e94e78bc60b36a192af90f708792949b4151e22f5f2e5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_utils.py_np_test_meta_from_array.assert_meta_from_array_np": {"doc_hash": "3093c82c80e0b47074176445cc01ee568654755f17dde4d2f03653ff8cab8f17"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_collections_test_optimize_blockwise.assert_": {"doc_hash": "461592654c9abbbed19090f66638bdd1e14e58a2387619151ba13b46f50c9aba"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_blockwise_diamond_fusion_test_blockwise_diamond_fusion.assert_": {"doc_hash": "a2bee1a2e97db51da9fdd5fac83c6f3e543d3e2ce8994b5a74d566427b1777af"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_blockwise_non_blockwise_output_test_blockwise_non_blockwise_output.None_5": {"doc_hash": "e1197fa3d6cc643e099e32ca7584528139ed2f9784dba1564f54e73544735343"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_top_len_test_blockwise_names.assert_y_name_startswith_": {"doc_hash": "b32f588d717ed45a14b5a217056c940b4c6ddfcdc0fcd44fa98c0b19047c3bcd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_blockwise_new_axes_test_blockwise_new_axes.assert_eq_y_np_ones_4_": {"doc_hash": "02017fddf6506c9791b999bac6e5bf96f97ba47ae969c71e210b6294215141f5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_blockwise_new_axes_2_test_blockwise_stacked_new_axes.assert_eq_z_np_ones_5_": {"doc_hash": "e7f01aeb88158b134842f6106783619c6267e46d88961b334d3d06497b7d0cdc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_blockwise_stacked_new_axes_front_test_blockwise_stacked_new_axes_front.assert_eq_w_np_ones_7_": {"doc_hash": "c7016de02aae1d0c3b6f17f3aca436572755fe459827a996a92782b385b9736d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_blockwise_stacked_new_axes_same_dim_test_blockwise_stacked_new_axes_same_dim.assert_eq_c_np_ones_5_": {"doc_hash": "0eb88272f42da59269c45042d1cf9a24308515103a3ea51264760a119f800953"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_blockwise_new_axes_chunked_test_blockwise_new_axes_chunked.assert_eq_y_np_array_0": {"doc_hash": "3af5c59603976c3ce4ab037c6eba5da975311c6d52792310052fdbb195a94ff4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_blockwise_no_args_test_blockwise_kwargs.assert_eq_y_np_ones_5_": {"doc_hash": "b1f7790b8348d68363cd9cd0f3921bdf3a33243fbabd16fcecb8ea42b61196cb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_blockwise_chunks_test_blockwise_chunks.None_3": {"doc_hash": "2568626fcd46846d7f2eab7252af935a2785280d8c9cb96d7358d1ae411f1f43"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_blockwise_numpy_arg_test_blockwise_numpy_arg.with_warnings_catch_warni.assert_eq_x_np_arange_10": {"doc_hash": "ef7fa2b7955fc0d9180b27e39b87aa91c20911be9fa4508c0c8c6ff1f34198ad"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_bag_array_conversion_test_svd.assert_eq_z_z_": {"doc_hash": "d414324c840dfb371b40168444dab60427baf73239108de5b4a7872f048c630a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_args_delayed_test_args_delayed.None_1": {"doc_hash": "49426b737fc61084621bbebd8f87932b9559dde8298346ed09b4d44b56f52ddc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_namedtuple_test_namedtuple.assert_eq_A_B_": {"doc_hash": "5a4acc4217391827de46645450d8aa80a60b7ec0d4b516ea076ad8ee28da2900"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_validate_top_inputs_test_validate_top_inputs.assert_i_in_str_info_va": {"doc_hash": "db1bb33b856a544dc2966ffa248ba4847fb1c056a26a1efdc1dee4d862519ee2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_dont_merge_before_reductions_test_dont_merge_before_reductions.z_compute_": {"doc_hash": "45e07b09bed94317f8e05eb1563a9ddbd703d080de3557614d317c00b430d59d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_atop_legacy_": {"doc_hash": "26852160fe6f28e05db25fbd5353fc97eeac00fc0e5972fd4b3fb0d55725b988"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_chunk.py_pytest_test_keepdims_wrapper_no_axis.assert_rwf_276": {"doc_hash": "17e70556a822cd339d9b45e247aace1eba4c29352ac57be3889f403ad1ba0e1a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_chunk.py_test_keepdims_wrapper_one_axis_test_keepdims_wrapper_one_axis.assert_rwf_np_array_": {"doc_hash": "8a50e425262987521552746010093f47f77ff46391ad9a96417701fb4f3e4072"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_chunk.py_test_keepdims_wrapper_two_axes_test_keepdims_wrapper_two_axes.assert_rwf_np_array_": {"doc_hash": "98fa4391977191c2093130de14fbe586a666e9dc397834832693e19ca62088b8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_chunk.py_test_coarsen_": {"doc_hash": "3ced4d43618118daf5c9ae5f49f60ef1a460266bcddee6bb0db749ccdcdee721"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_pytest_test_arr_like.if_order_F_.else_.assert_not_np_isfortran_d": {"doc_hash": "f1b145e51757e22f822fc70f11c01e54b109619661c1f04c5f32b7fb2b1b6c40"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_arr_like_shape_test_arr_like_shape.if_empty_not_in_funcnam.assert_eq_np_r_da_r_": {"doc_hash": "82dda06ed481ffb008fbe89e01015413982088257c1f2b17d2c31772015dd602"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_linspace_test_linspace.None_4": {"doc_hash": "b2b3cfc2df29a4cfc99c5a874c09796a4405434246412b0d78add276ce33d5a6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_arange_test_arange.assert_da_arange_10_chun": {"doc_hash": "23405af146e4b6cfc67277b6eb23e3c6f367643f0a7e35ae5b0d0afd79d611f9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_arange_dtypes_test_arange_dtypes.assert_eq_a_np_a_da_": {"doc_hash": "afdecf7f8721dc4d25e071fb2d104fd4861bb2d649b3c1700f68ccbdf4b58e46"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_arange_cast_float_int_step_test_arange_cast_float_int_step.assert_eq_darr_nparr_": {"doc_hash": "55948725bfb8d101ea96f22736544ce4a079efd189053ed287cf40185676a1b8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_arange_float_step_test_arange_float_step.None_3": {"doc_hash": "86bb49850afa8b3e4020a7efab24529cafc858149648da1be61e6df935009a0b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_indices_wrong_chunks_test_indices_dimensions_chunks.with_dask_config_set_ar.assert_expected_actual": {"doc_hash": "d95a4265c303aab48021f3bce2e3b40a7bdcb738e98d91eba11e69c4f2d70533"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_empty_indicies_test_empty_indicies.None_3": {"doc_hash": "dd97c18e5375ab634c753694e3788d74309ea17a14ddc65b37bd4cbda1a9d4e0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_indicies_test_indicies.None_3": {"doc_hash": "5b656fe233100332c53f67fe3affe45bee0cbbc47f4e526b830630ce6a6650aa"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_meshgrid_test_meshgrid.for_e_r_a_e_r_d_i_in_zi.if_sparse_.else_.assert_e_r_d_chunks_xi": {"doc_hash": "296e8dfa1097cc7a5ddf5700bbfa681185c427e5531847ea677da53cf347551d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_meshgrid_inputcoercion_test_meshgrid_inputcoercion.assert_eq_z_z_d_": {"doc_hash": "be2ccf119c275f2a4164abe57dad2e1f859fca9990829fbffad9610f6a8ffc6b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_tril_triu_test_tril_triu.for_chk_in_5_4_.for_k_in_.None_1": {"doc_hash": "e34be54bc22201b57f338c54123f9affabc652043c42b952309cc64cf438f22c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_tril_triu_errors_test_tril_triu_non_square_arrays.assert_eq_da_tril_dA_np": {"doc_hash": "5daaba40d4230a65ea84de449a0d900c139e1161c9677ba9cb6bd9bd0d3408a8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_eye_test_eye.with_dask_config_set_ar.assert_4_x_npartitions_": {"doc_hash": "822c0533fddd4edb04ee0ef0fd95bcb7b56a0db31887fcb1e8e5cdfb20bab935"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_diag_test_diag.assert_eq_da_diag_d_np_": {"doc_hash": "ae4989a4a37994f92f1c53cd5d3d83d8b811a3888ca34f36a8b8cf6837fcf9f1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_diagonal_test_diagonal.None_14": {"doc_hash": "9aa948fec917238fad67e969bffbe81d27f80061b909413a9722b0c397aab121"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_diagonal.None_15_test_diagonal.None_22": {"doc_hash": "55fbb135f085eaff37465c9be1ba525cf2e6c99e99744d1c3e34cf482f6564bb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_fromfunction_test_fromfunction.assert_same_keys_d_d2_": {"doc_hash": "4828de766802643eee6c33cbf993ab0ccb1979dd8cd89e11502d2c675128b6a6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_repeat_test_repeat.for_r_in_1_2_3_4_.assert_all_concat_d_repea": {"doc_hash": "6cc76abf371b7476047e0b0df3ab995a2b31115459e19621a919095754c42118"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_tile_basic_test_tile_basic.assert_eq_np_tile_b_reps": {"doc_hash": "2c75fed05dcfbecd6534f2688557594bb4937f2a583c69b5d67d85b640a93650"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_tile_chunks_test_tile_neg_reps.with_pytest_raises_ValueE.da_tile_d_reps_": {"doc_hash": "4032670416c797a5d781b1457eaaf82d892f257da3dc21da73a103fe94689b60"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_tile_zero_reps_test_tile_zero_reps.assert_eq_np_tile_x_reps": {"doc_hash": "2c155efc2594f4bd1482082ba8d0724120fa34fdb042bb14eca1d3ba6a2fd816"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_tile_empty_array_test_tile_empty_array.assert_eq_np_tile_x_reps": {"doc_hash": "b0bf2a0babbcdab08e10c3598afb56a523e1c8d5c78849ca45aad1e794ec11ee"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_tile_np_kroncompare_examples_skip_stat_length.pytest_mark_xfail__numpy_": {"doc_hash": "f71a1f4e0e7c4209d0fec242464ae86a54cd531c04970d80fe6d91a3aeb229d6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_pad_0_width_test_pad_0_width.assert_eq_np_r_da_r_": {"doc_hash": "c5e4dd93670b4fcff37ec50dff115534120bcb7e7941d0016bc32c1b4ab1abca"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_pad_test_pad.if_mode_empty_.else_.assert_eq_np_r_da_r_": {"doc_hash": "fbd2c83b5077ba11e1fa8dd4d167165d7efe655113c0a352a0fb5230098e17fc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_pad_3d_data_test_pad_3d_data.assert_eq_np_r_da_r_": {"doc_hash": "2acbf899723dd42caaebcbf4fc9a936c5f31474be632b19811070b82ade0a276"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_pad_udf_": {"doc_hash": "330b1459d2fcd8826aa903681e7d6f41113dbc1b07e8cbd6b3142707443222cf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_basic_test_sizeof.assert_sizeof_c_c_nby": {"doc_hash": "360ce6c6773863aa159d00f64d4288a21782f38d167c21f07499a804c15592b1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_diag_test_diag.assert_eq_da_diag_dx_cu": {"doc_hash": "7cf2b2e94b63a0a6ef41e20afedf3b288a20ab09328f757027838a6a031a07a6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_diagonal_test_diagonal.None_11": {"doc_hash": "c2b5c048c3c5bcc932c604443242ac86ee988b29d65d96bcffd291d502780007"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_tril_triu_test_tril_triu.for_chk_in_5_4_.for_k_in_25_20_9_.assert_eq_da_tril_dA_k_": {"doc_hash": "3e0b26bcc59016c6c94313017c0a46335e2a2c7d53dbf1b1df717110a1f6c3c8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_tril_triu_non_square_arrays_test_tril_triu_non_square_arrays.assert_eq_da_tril_dA_np": {"doc_hash": "f1e56bfb1be298cf874477931d759420036a931710cef3f5495e35fa3a838e8c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_apply_gufunc_axis_test_apply_gufunc_axis.assert_eq_m_dm_": {"doc_hash": "889a444af252d6c10750bccacca16ef29510357a4e860fb317f486cf38ada91c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_overlap_internal_test_overlap_internal.assert_same_keys_da_overl": {"doc_hash": "015af3457951ebf4ee29e805d0c31cb8a1f818b88c8df635035c028642c063df"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_trim_internal_test_periodic.assert_eq_e_0_d_2_": {"doc_hash": "7067acd317996e505e4df44256548288efc7e44041bd9e0383568e6ebac7e64c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_reflect_test_reflect.None_1": {"doc_hash": "d5780653ebf168e3f33055894e871c414f12367307c8781becb0ac325963634c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_nearest_test_nearest.None_1": {"doc_hash": "60d558a93f5f2d22c6edbd7868ce9509d297db13a12c54dd9e8e1495161037bd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_constant_test_constant.assert_eq_e_1_np_on": {"doc_hash": "52d943a4a4563d182aba221df35d2d80afd0e0b26392c3fc60a54c1be648ad55"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_boundaries_test_boundaries.assert_eq_e_expected_": {"doc_hash": "31b67d078de71cc96d0a0de46745bfb84873748fdfd86ab089b1f8a0ccca7bb8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_random_all_test_random_all.rnd_test_rs_standard_t_2": {"doc_hash": "54545aec4ee0dd99ee1ef5bf43ec17da2770db388f3a18eb7270c118cfdf016f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_random_shapes_test_random_shapes.assert_x_shape_shape": {"doc_hash": "79b6297d5b8a1a70f7477dfda1bbe271c1d7e478f051a4a02d8fa73ae1e9e8f7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_tsqr_test_tsqr._full_matrix_returned": {"doc_hash": "ba3457e3e28046a2e6614100a696f67ae7357803d776c1c18ad884408a44fca3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_tsqr.if_error_type_is_None__test_tsqr.if_error_type_is_None_.else_.None_1.u_s_vh_da_linalg_tsqr": {"doc_hash": "df7669a567ea29df786f29b8d4e29d25ca702bfd1022c47a1b02b4dcc71b9683"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_tsqr_uncertain_test_tsqr_uncertain.data.da_from_array_mat_chunks": {"doc_hash": "6d04b323649a67de752fca65150dd32e0159c1d5e3312f6184b5a97e63ce8aa7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_tsqr_uncertain.if_vary_rows__test_tsqr_uncertain._full_matrix_returned": {"doc_hash": "7b9b198d8fe4bb48d28cf7b3bb3e305d014c613c6735cc8b8bdcae4f488bfad0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_tsqr_uncertain.if_error_type_is_None__test_tsqr_uncertain.if_error_type_is_None_.else_.None_1.u_s_vh_da_linalg_tsqr": {"doc_hash": "a7cf8792a72365d027ba8b8ae96e5df23f44534ea1d097bc308797362d39eb24"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_sfqr_test_sfqr.if_error_type_is_None_.else_.with_pytest_raises_error_.q_r_da_linalg_sfqr_dat": {"doc_hash": "de89ccb8cc6a51a0098d8a482facf3d6639e74ef23c0e7c401813dd514d0d42d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_sparse_hstack_vstack_csr_test_sparse_hstack_vstack_csr.assert_eq_x_y_todense_": {"doc_hash": "fe99d54b2e9539e1a3504dc05839a22d1faa0575eea8dee3ef713da4ece3a7f0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_cupy_sparse_concatenate_test_cupy_sparse_concatenate.assert_z_toarray_z_": {"doc_hash": "d3ae1096c148bf0920829a51bb05c784bff3e9cf79b377aa85ea739e3fd1210d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_bincount_": {"doc_hash": "1d794adc174c3ef124c3a2695e5e9107da0614e597f092c24019267eec21e1b7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_operator_dispatch_property.return.wrapped": {"doc_hash": "38afa067bc8d01cb3119b1c7490119a4764805d21178557a91883b8b0292b042"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_EncapsulateNDArray_EncapsulateNDArray.__array__.return.np_asarray_self_arr_arg": {"doc_hash": "9da32cc53c09562d5834621380ed7ac08f4990389d7fd13fc80035e58e357e3c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_EncapsulateNDArray.__array_function___EncapsulateNDArray.__setitem__.wrap___setitem___": {"doc_hash": "3f49a12391631e18bc4bf27ab23871022cfdfb4f678c6d5cb2c6537375aa1461"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_EncapsulateNDArray.__array_ufunc___da_register_chunk_type_En": {"doc_hash": "2199df318c2abf5ec4d47926d468fa6686ac178c668ba8da67664b79f012924b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_WrappedArray_WrappedArray.__setitem__.self_arr_key_value": {"doc_hash": "c071ac80e8702996b4cb000960977d2b8ec5713a1e766e70771200e07196c7bf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_test_binary_operation_type_precedence_test_binary_operation_type_precedence.assert_": {"doc_hash": "cc6a949980ae3bebd267d2019336f966c309b3b99abff91173a68e6e1739e67d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_test_is_valid_array_chunk_test_is_valid_array_chunk.assert_is_valid_array_chu": {"doc_hash": "1990620dd79800285239695c192e375d51a5ce27604f0c7d5658d5ab6b521fb9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_from_itertools_import_com_test_fft.assert_eq_da_fft_darr_n": {"doc_hash": "927bc63798c193504d0180c6eae6ac0db6611551dd0d9d30f3c13e80dbff88c8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_test_fft2n_shapes_test_fft2n_shapes.assert_eq_": {"doc_hash": "632f12d366ddc2d5a41f576f98646e88bafdd9624a849d67107b55099f026222"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_test_fft_n_kwarg_test_fft_n_kwarg.None_5": {"doc_hash": "1117f5baea2fac14e141885a1d0765ff6700549f23c9b9f454e7829316e8cd05"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_test_fft_consistent_names_test_wrap_bad_kind.with_pytest_raises_ValueE.fft_wrap_np_ones_": {"doc_hash": "c3f6e727cddaa01e2853d30978f216338f44a87f6a3d7e33907cc85af2f6743b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_test_nd_ffts_axes_test_nd_ffts_axes.for_num_axes_in_range_1_.for_axes_in_combinations_.if_len_set_axes_len_a.else_.assert_eq_r_er_": {"doc_hash": "f5bd41db059f7a5545bc576b1c70c677a1e9647196cdc784f9f4e30e2fbb7be7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_test_wrap_ffts_test_wrap_ffts.if_modname_scipy_fftp.else_.None_5": {"doc_hash": "762204454f6f2b89e6735a254897deeef5bceb7f90dc225af285d64acce32439"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_test_wrap_fftns_test_wrap_fftns.assert_eq_": {"doc_hash": "15da8a7d670123c1c4a287f245cc01a055bb6de147db206ea49fc887ced3c690"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_test_fftfreq_test_fftfreq.assert_eq_r1_r2_": {"doc_hash": "0101e8d82e97518ca26df3d4773a8fdb42253e71b291cb8dfac480f88d12c0e6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_test_rfftfreq_test_rfftfreq.assert_eq_r1_r2_": {"doc_hash": "94005c36f4ebad7528317e3b15069970d914a24311c3a4267351c73b37be61a1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_test_fftshift_test_fftshift.assert_eq_d_r_a_r_": {"doc_hash": "281386e558d82975081ed86791390423694f3eba21201dd4550cd4db7d9836d6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_test_fftshift_identity_": {"doc_hash": "e97bd10d31521f993760023fc0ecdd7242f475de66e0726259da3837b69369ab"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_pytest_test__parse_gufunc_signature.None_3._parse_gufunc_signature_": {"doc_hash": "d8582bd564455f1c72095e3bfb324fa3c77c92cac7ec778a4d27758f25f9d24a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_axes_input_validation_01_test_apply_gufunc_axes_input_validation_01.None_2.apply_gufunc_foo_i_": {"doc_hash": "fbeda353c636daca6d437d432f8c4651aa6eacfbecc3b9a0f22b5b5e3d934f32"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test__validate_normalize_axes_01_test__validate_normalize_axes_01.assert_o_0_": {"doc_hash": "5ffcb1513a502d9d609af1a8781ffb7b2386afd7fc4884dd9d0c6ac64ed6bfaf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test__validate_normalize_axes_02_test__validate_normalize_axes_02.None_2._validate_normalize_axes_": {"doc_hash": "e6cde94ee6bb668f5338930e70a260885f56b55f8d1efa1ddcea6e426b519d23"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test__validate_normalize_axes_03_test__validate_normalize_axes_03.None_2._validate_normalize_axes_": {"doc_hash": "2fb04460ca164c2f0de8b8f705643b0833e000e928a7099ca7b0ea969ac06396"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_01_test_apply_gufunc_01.assert_std_compute_shap": {"doc_hash": "2600c72bec68c1dd91729297d43c7c1204033347f9e4e901bf7ce859a581ddef"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_01b_test_apply_gufunc_01b.assert_std_compute_shap": {"doc_hash": "35415ee65556e42787f37995d7c1773e29bffd8d3258742971d50fd035327b1f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_output_dtypes_string_test_apply_gufunc_output_dtypes_string.assert_mean_compute_sha": {"doc_hash": "091ea3d1a834cd1285e86ab1cb3fdd41646a34d837718bea4f5f505d0da95654"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_output_dtypes_string_many_outputs_test_apply_gufunc_output_dtypes_string_many_outputs.assert_std_compute_shap": {"doc_hash": "f36ed57ed6d5015b777d1133617738297f357f5a3a6193460430a6608a396228"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_pass_additional_kwargs_test_apply_gufunc_02.assert_c_compute_shape_": {"doc_hash": "ea873a5c3f46ce269ace5205538b58a48918c066d4eb7fc1404cfae2b51aa61a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_scalar_output_test_apply_gufunc_elemwise_01b.with_pytest_raises_ValueE.apply_gufunc_add_": {"doc_hash": "c6ad27f35d74c7ce962634f10b65382ec9aa83c60ef65f9056cc10dc5a8646d7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_elemwise_02_test_apply_gufunc_elemwise_02.assert_eq_z2_np_array_1": {"doc_hash": "bc1277464ca1c2f5eab165c38e1d0927efc7ddfae8a93e335c901cc9608d6ec3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_gufunc_vector_output_test_apply_gufunc_two_scalar_output.assert_y_compute_2": {"doc_hash": "d8e497532feef657dfd76f514429300b382ee672831144d34832506a938428aa"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_two_mixed_outputs_test_apply_gufunc_output_dtypes.assert_eq_y_dy_": {"doc_hash": "0cdedd70119571165ccb76e698e468f34f5d80ceca14822056fff0249fb31674"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_gufunc_two_inputs_test_gufunc_two_inputs.assert_eq_x_3_np_ones_": {"doc_hash": "4665e4ec2c67b16b80892aaf06db4a1dd359579f9c20d84917f4f755e2b339ae"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_gufunc_mixed_inputs_test_gufunc.assert_valy_shape_10_": {"doc_hash": "f8059e6a94e0d45eacfa6c1de614ed278b8d025523d6c97a6713c14bf46f09cf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_as_gufunc_test_apply_gufunc_broadcasting_loopdims.assert_z_compute_shape_": {"doc_hash": "791c28a47649fc7f8de34951f00be9c084b3e4d9a1d1a9ce9d4334ee8d53081d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_check_same_dimsizes_test_apply_gufunc_check_coredim_chunksize.assert_consists_of_multi": {"doc_hash": "cf65b1c5b7099841169827156281c95c6f454eab2b8f6d2b0f1ebdab9884768f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_check_inhomogeneous_chunksize_test_apply_gufunc_check_inhomogeneous_chunksize.assert_with_different_ch": {"doc_hash": "2f70d2267db4a76bb927bfd32aedc882bea9a97d01e2ea7de488d83b0be70d00"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_infer_dtype_test_apply_gufunc_infer_dtype.assert_eq_z1_dx_dy_": {"doc_hash": "6b90ebfdb1e24c98273a8a09b131bf385bf62e61891896d501c653eb1b19c2fe"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_axis_01_test_apply_gufunc_axis_02.assert_eq_m_dm_": {"doc_hash": "c9fa2b12a816bcd46ba395f8baed4d24c9a7ae9447161c2c3db2895f8d4c458c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_axis_02b_test_apply_gufunc_axis_02b.assert_eq_m_dm_": {"doc_hash": "97910ed9f1ad2b2c8fa1e11012066544ad6e18e919ae53c169eb20443b7cc0df"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_axis_03_test_apply_gufunc_axis_03.assert_eq_m_dm_": {"doc_hash": "f1337b281b074fc6552d841716892cdacb1b3d8f6641efdf4c02e85839ad473d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_axis_keepdims_test_apply_gufunc_axis_keepdims.assert_eq_m_dm_": {"doc_hash": "8aa5433c365f831ebd5a2636fe0fc517db4f6c52ca5904fa8e98d3f8bfd46153"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_axes_01_test_apply_gufunc_axes_01.assert_eq_m_dm_": {"doc_hash": "4fee468b197621a0c813b6e6284d57e6765be9c1fc2b901fb7c666d8f6f530e6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_axes_02_test_apply_gufunc_axes_02.assert_eq_m_dm_": {"doc_hash": "bfec28111607f2a98c0e75aea6272f77f1be79686dd1e81144cb7962fd7594d3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_axes_two_kept_coredims_test_apply_gufunc_axes_two_kept_coredims.assert_c_compute_shape_": {"doc_hash": "a0e9d6e34515be9dc0a37c198f585b8cb1f735b47c6a4c830e6bcd9c6406f95d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_via_numba_01_test_apply_gufunc_via_numba_01.assert_eq_x_y_": {"doc_hash": "c043dfe3c4a72c73af0bf402c0e282cf7e4fc95f5d8cff54454577d68c89ff40"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_via_numba_02_test_apply_gufunc_via_numba_02.assert_eq_x_y_": {"doc_hash": "af202d6d53fdfe7ba8a33b54d87b1d70fdc9851a65ef9d5a758412c6052db1f7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_preserve_meta_type_test_preserve_meta_type.assert_eq_mean_mean_": {"doc_hash": "b319a24bee3d3b6561a21f028b49528e8fd2b5e5a019f12873d820316ebc259f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_with_meta_": {"doc_hash": "dfc012d199fc8469103e57129c9a632990e7f48a6ad61f1af9669752ef16d6cc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_image.py_from_contextlib_import_co_random_images.with_tmpdir_as_dirname_.yield_os_path_join_dirnam": {"doc_hash": "8512f688e5db94b912756c45fbdc529714ac0e7e0ece65bca5f1a76a3309487a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_image.py_test_imread_test_imread.with_random_images_4_5_.assert_im_compute_dtype": {"doc_hash": "1de2325b19fb0d81ab2d3ead4f81afc557a6c7a7c4229c4eaf487bcd1d1b8ece"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_image.py_test_imread_with_custom_function_": {"doc_hash": "2599cd47d29cfaa9e97c754a44278a1e780d3fb3da9e4288f206fd3e7d29228e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_pytest_test_tsqr.if_error_type_is_None_.else_.None_1.u_s_vh_tsqr_data_com": {"doc_hash": "863ed557cb1ec54e775dea5a418927d2bcb6f487fc39c3437f5a6f9adb9be026"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_tsqr_uncertain_test_tsqr_uncertain.if_vary_rows_.m.mat_shape_0_": {"doc_hash": "9e977e5b4aeb57716923fe627fe008c5fd287fc79f10103000a9f397451e8f99"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_tsqr_uncertain.if_vary_cols__test_tsqr_uncertain._full_matrix_returned": {"doc_hash": "65a04bedb67fe6472daec8b94121b8e0aebdd852b263b81a5ba987ff7b53238b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_tsqr_uncertain.if_error_type_is_None__test_tsqr_uncertain.if_error_type_is_None_.else_.None_1.u_s_vh_tsqr_data_com": {"doc_hash": "56e27bcdda8341df9e0abc0bf3deef570f1c5962f4169f002027714197f8c11d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_tsqr_zero_height_chunks_test_tsqr_zero_height_chunks.None_13": {"doc_hash": "0ee246afab18e541055ac35cf8974d5212ac760fa11895d794f44f363eb535dd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_sfqr_test_sfqr.if_error_type_is_None_.else_.with_pytest_raises_error_.q_r_sfqr_data_": {"doc_hash": "20817ea12c96f43ff18fb94977e93a2a3257ceba16654a1a2e7f663829199475"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_qr_test_qr.if_error_type_is_None_.else_.with_pytest_raises_error_.q_r_qr_data_": {"doc_hash": "b6a81803841024b544a781ec1f9aed609b18e42e4379f41778bdf3f10c4b88ea"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_linalg_consistent_names_test_linalg_consistent_names.assert_same_keys_v1_v2_": {"doc_hash": "d46fd9f8dc94ae1e36c0cca700b144fc34d2bea627659343aeb0f257d7d7566e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_dask_svd_self_consistent_test_dask_svd_self_consistent.for_d_e_e_in_zip_d_u_d.assert_d_e_dtype_e_dty": {"doc_hash": "8aedec5a7933f1a11ac7218c675c3e504492fa30280fd2a949b1e350fb74cc84"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_svd_compressed_test_svd_compressed._s_must_contain_the_sing": {"doc_hash": "67d98fe576bafa0235a0c6e5be5e2057e91d0383b549079fc1f89e392dc84ed5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_svd_compressed_shapes_test_svd_compressed_shapes.assert_v_shape_r_n_": {"doc_hash": "02149eddd66a222d8db082563339de1cadc3416a051a106998c9124a04201db7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_svd_compressed_compute__check_lu_result.assert_eq_u_da_triu_u_": {"doc_hash": "3478bc9cc7d8f9899ee59159f96c06c338bb5c6cd0d620dc6df88bb5a9f7a8b9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_lu_1_test_lu_1.for_A_chunk_in_zip_A3_._check_lu_result_dp_dl_": {"doc_hash": "a30312252a7af98748f02e752e943dbd7fd015e9362c67c5588ed88d57d113ec"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_lu_2_test_lu_3._check_lu_result_dp_dl_": {"doc_hash": "6b5788244d3725ad78202607ea9c7aae069126bb509eb3d644308268b61cfe03"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_lu_errors_test_lu_errors.None_2": {"doc_hash": "4d9218cc77a3dd7e77b76694311d4c377d1f9ed5e81dcd92aef5e633196de3fd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_solve_triangular_vector_test_solve_triangular_vector.assert_eq_dAl_dot_res_b": {"doc_hash": "8bd870cecfd823b1d57679e1a6cf7a7a66ef425d7931a7c45859633d932d27ce"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_solve_triangular_matrix_test_solve_triangular_matrix.assert_eq_dAl_dot_res_b": {"doc_hash": "daf957161ba4d6f624483d7825f265e31f40a04f9b2bf4e18d4cfaeeffcafcfc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_solve_triangular_matrix2_test_solve_triangular_matrix2.assert_eq_dAl_dot_res_b": {"doc_hash": "f31e6d66533a4f6ceff1629d021f0b1120203bf47d1a0ca89b8bbba2a9382d35"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_solve_triangular_errors_test_solve_triangular_errors.None_1": {"doc_hash": "6b1a630125be491cc9243808dd3e61a9b626d2985c07e414b328c32a63317f89"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_solve_test_solve.None_6": {"doc_hash": "2969c1c5632150f0dedfe3dc953c98e99145e33e664f5f75699b77d1ab4b24ce"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_inv__get_symmat.return.lA_dot_lA_T_": {"doc_hash": "5dc075a1feee1df456f31475092ccc2d25639f37d656984d1b999c7369e6688d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_solve_sym_pos_test_solve_sym_pos.None_6": {"doc_hash": "535caab04d7f269295f0483df0e60743b34ee7af18802f3737f4244e1a56da6f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_cholesky_test_cholesky.assert_eq_": {"doc_hash": "2ac2795d2bf6d851a1023bf80bf64b4ad42350aa8a1b194ca9a88d4db9f6f7e7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_no_chunks_svd_test_no_chunks_svd.for_chunks_in_np_nan_.assert_eq_abs_u_abs_du_": {"doc_hash": "284e1c7fef49408375d830168938560b7f751ea739bfb233902f61235776ee91"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_svd_supported_array_shapes_test_svd_supported_array_shapes.assert_eq_dv_nv_": {"doc_hash": "945c7744291fc1ed57815f3df54dfee5100d8c41e23283bcc37d33f41ee5e7a8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_svd_incompatible_chunking_test_svd_incompatible_dimensions.with_pytest_raises_ValueE.da_linalg_svd_x_": {"doc_hash": "bad1218e900cf50e5f082dde6e7ad14851db59e81047c913d265238dfa97e7ea"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_norm_any_ndim_test_norm_any_ndim.assert_eq_a_r_d_r_": {"doc_hash": "2568a1efd10cfa111bdfa34a60622cf2cc1c3e5da7fc9aa0435b930f64ca85f6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_norm_any_slice_test_norm_any_slice.for_firstaxis_in_range_le.for_secondaxis_in_range_l.assert_eq_a_r_d_r_": {"doc_hash": "06fad678b9fbf9ea46cb414f0f9f102f8625b065569b29b824dfa090c8bc3f95"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_norm_1dim_test_norm_1dim.assert_eq_a_r_d_r_": {"doc_hash": "74544d4bc5b429c35973ddeec64500eb4fbda8e13fb4bea1e42c1caee3501626"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_norm_2dim_test_norm_2dim.assert_eq_a_r_d_r_": {"doc_hash": "d3d57d0c333e31c410f4313c2a9d526b6f7e02ca1712fae1aa73e431e9bb3f9a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_norm_implemented_errors_": {"doc_hash": "f857fffa6c1789929c7b8ca5bef1ea3eae088fb273f46755e2ade295bf1eecea"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linearoperator.py_pytest_": {"doc_hash": "6b7106b310e1cd9144312edd753e91afbd2bc28d5c6f427d6993d1d3824a1c93"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_random_test_tokenize_masked_array.None_4": {"doc_hash": "01c2d253b473c28d6c346c63f8f63d75c650b84af9158a79d4714345974c6a6c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_from_array_masked_array_test_copy_deepcopy.assert_isinstance_y2_comp": {"doc_hash": "635258393bc186c5f22cb0791ec18ca23ec8758ad1234a2094704303f621894c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_functions_functions._": {"doc_hash": "0092b3f075163c87fb65d30f8beec8b0a7791c7f8630dbc852b8561f3217559b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_basic_test_basic.if_yy_shape_.assert_isinstance_zz_np_": {"doc_hash": "4cd9c9bc8072a0ba3dadb274698bcef8d64e3964c6bcaa100df3fe3044f1f0d3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_tensordot_test_tensordot.None_2": {"doc_hash": "873559e2f682789b392baad6418bf3bcc01a5c3a0ab7e4416a549cf6e6fdef27"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_mixed_concatenate_test_mixed_concatenate.assert_eq_dd_ss_check_m": {"doc_hash": "e45df437d3e81feadaca0841231fbc090edaff43a9d8812cb6b5ffd69924dbd5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_mixed_random_test_mixed_random.assert_eq_dd_ss_check_m": {"doc_hash": "42c4198d4abf8957e1527baa93b2989c0a1f0b64d503d08a2720aed4d7eb38b1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_mixed_output_type_test_mixed_output_type.assert_isinstance_zz_np_": {"doc_hash": "324064cb8967bce12aabb1ef19888dd7d633adfc49f281583909437f7a764c4b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_creation_functions_test_creation_functions.assert_eq_da_ma_fix_inval": {"doc_hash": "899b75a42ea21e8b945b160057aadaac59b901957f3bf007fa7c4c0b4f7076d4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_filled_assert_eq_ma.if_res_is_np_ma_masked_.else_.assert_eq_a_b_equal_nan": {"doc_hash": "f94702fdfef877dbb3519aeb6083f662a050fcc949cd2e927e9954c8e0efe35c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_reductions_test_reductions.None_6": {"doc_hash": "c2d168d12d21612bc44e91946239578d858812721de9e861e10b1a8f1e4fc350"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_reductions_allmasked_test_reductions_allmasked.assert_eq_ma_dfunc_dx_f": {"doc_hash": "7aa74c6e3663e26fdfb6d5910e2f46c4f0fa4ace2c9cb66d531ee3c363f4031b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_arg_reductions_test_arg_reductions.assert_eq_ma_dfunc_dmx_2": {"doc_hash": "c7db251706be26e9869717ecdbde239c65100b8f16be9a317a4aea3405e4b9e1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_cumulative_test_cumulative.for_axis_in_0_1_2_.assert_eq_ma_dmx_cumprod_": {"doc_hash": "a798b75c0557404e4ce4942f6c40743e2dc59d1a6eb0870e05e694f0d2ca160e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_accessors_test_accessors.None_3": {"doc_hash": "7f0e34eab362cfea5c4d56b953708d12f2484b4dfe73f401193c244e53e143b1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_masked_array_test_masked_array.with_pytest_raises_np_ma_.da_ma_masked_array_dx_ma": {"doc_hash": "60641999cf87448bb0113bab0cb4a5c823c02df778db4925f8dc347dc95dd8cc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_set_fill_value_test_set_fill_value.with_pytest_raises_ValueE.da_ma_set_fill_value_dmx_": {"doc_hash": "378aa1646ab6817390b7ff4e897803ca745c28f45f5ea972dd0b86d207719343"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_average_weights_with_masked_array_": {"doc_hash": "229df04d64aac4198e1ff48398b6fee2ad00d730a9146502a800e8d435d9f9e2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_numpy_compat.py_pytest_test_slice_dtype.assert_result_expected": {"doc_hash": "b1bf5577a4eaec77aac008c132c0a356bf5776d5b9856ad43dbfc09220a4bf31"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_numpy_compat.py_test_min_max_round_funcs_": {"doc_hash": "7439a4f00fc089c9d33c9d8153fd75ed6fa556b7b62604a6442eb03077553c23"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_pytest_test_fuse_getitem.for_inp_expected_in_pair.assert_result_y_ex": {"doc_hash": "8d4f2ce63cd9929b3eb37b061d9cf58a456317a919b152cd7c162623ea4ede0f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_fuse_getitem_lock_test_fuse_getitem_lock.for_inp_expected_in_pair.assert_result_y_ex": {"doc_hash": "6c06fa2e39e710244ac85582d307ac1c50d7ae262b886dc35b688a703bec25fc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_optimize_with_getitem_fusion_test_optimize_with_getitem_fusion.assert_len_result_len_": {"doc_hash": "8ad1c49fde51b8beb84f53db922dcf8f8c7dce1904d826e647445071a479d843"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_optimize_slicing_test_optimize_slicing.None_1": {"doc_hash": "b61c1d631ee355ed4cce0fac0d17d31488d31a2e755b4bc8293b11e63be334a8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_fuse_slice_test_fuse_slice.None_1.fuse_slice_None_np_array": {"doc_hash": "64914ccc058e57a25dc5e1c310a455a9c514f55f009f3aa65b2ee1d171e1e7e4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_fuse_slice_with_lists_test_fuse_slice_with_lists.None_6": {"doc_hash": "59ac7f7a5331a8ddee8ffa72ddabdef178d01008d44aacc2d868429c49cc2ae1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_nonfusible_fancy_indexing_test_nonfusible_fancy_indexing.for_a_b_in_cases_.with_pytest_raises_NotImp.fuse_slice_a_b_": {"doc_hash": "79d3e6a5ecfaacad1545e3e00a0da7dfaafb77cb3f93bf2cccf0e8044ac21e0b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_hard_fuse_slice_cases_test_dont_fuse_numpy_arrays.for_chunks_in_5_10_.assert_sum_isinstance_v_": {"doc_hash": "a60dbd7839aeb0d0c1c763c4335375f939d06c60f4c9e860f874e57ac71fcdee"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_minimize_data_transfer_test_minimize_data_transfer.for_dep_in_deps_.assert_dsk_dep_1_big": {"doc_hash": "16da15053ea426b1eff693d3f6199b283c94ae7b485eaf685411f76b4b73d0ed"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_fuse_slices_with_alias_test_fuse_slices_with_alias.assert_dsk2_fused_key_": {"doc_hash": "b87c1f5702d6cf1fe00a2e7c082cee6539b6b1e4464126ed21c6baf4508db8eb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_dont_fuse_fancy_indexing_in_getter_nofancy_test_dont_fuse_fancy_indexing_in_getter_nofancy.None_1": {"doc_hash": "3cf9971f95fb73ca6a221f5cfbfd0d1faccb90194ec6213a69f8605f91d88600"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_fuse_getter_with_asarray_test_fuse_getter_with_asarray.assert_eq_z_x_1_": {"doc_hash": "0b17308e743e932b547ade9ab37fa3ca524db62254c02bbffe3a59e77f3c926b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_remove_no_op_slices_if_get_is_not_getter_or_getter_nofancy_test_remove_no_op_slices_if_get_is_not_getter_or_getter_nofancy.for_orig_final_in_opts_.assert_optimize_slices_": {"doc_hash": "c1fdccbf49405dfeda38fb0f22bce4d008d55bf8a480672e76577d03f6695a0d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_turn_off_fusion_test_turn_off_fusion.assert_len_a_len_b_": {"doc_hash": "3219a92b4bcc946e363f8d712a7669b5f1a8d7876abda4517ea7a81acfb7d736"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_gh3937_test_gh3937.y_compute_": {"doc_hash": "29de2d34d19a8bc75e83f0d37aa989c63463e6d267ce3eb78b3acfca7a4def8f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_double_dependencies_": {"doc_hash": "28709ddb069720df992abb3c9f49764861da71f017d0632d691e48ecd02c9e4f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_pytest_test_fractional_slice.assert_isinstance_fs_1_1": {"doc_hash": "f597456a73f591e815fe99835dfa25951aae624eb7c671150c0adf067decded5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_overlap_internal_test_overlap_internal.assert_same_keys_overlap_": {"doc_hash": "da65f34facc8130e9acefbb0c6a97ae42aeb9365e78f22957e15b25cae80eeb5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_overlap_internal_asymmetric_test_overlap_internal_asymmetric.assert_same_keys_overlap_": {"doc_hash": "4e9ab70969b790b6ec2c60d7f48e04a8c84a3ecafb44f0359a1def915e8e0452"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_overlap_internal_asymmetric_small_test_overlap_internal_asymmetric_small.assert_same_keys_overlap_": {"doc_hash": "ce5d051590780cb1fd8b300ad962d1ae56b8267967d5109d7b611cde52558a96"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_trim_internal_test_periodic.assert_eq_e_0_d_2_": {"doc_hash": "095cf645e32e66b4e407f7774d5a886a69887e81fda9e03af2cc6d759c5fead8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_reflect_test_reflect.None_1": {"doc_hash": "1deab0dcf8adc7ea964319bf210ec0cd68152eab82524ae8c5cf61cde9986bc4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_nearest_test_nearest.None_1": {"doc_hash": "5989f0b4b73d30b4ac1d05408ebbe3b18eca23684fd113109ace627ea8e037bc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_constant_test_constant.assert_eq_e_1_np_on": {"doc_hash": "d92839efee8e7bac502440e73b820e952ff668a570886f4b40ee5699d7c4f220"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_boundaries_test_boundaries.assert_eq_e_expected_": {"doc_hash": "15bb7bd9513cd01da3f71c4abfe5ab6256787fbc9e052ee97f3f5c172a04897c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_overlap_test_overlap.assert_same_keys_g_overl": {"doc_hash": "0a32f6223231644378f09cc5a975233f1a8b24d124d3d680d2b3721219381639"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_overlap.g_4_test_overlap.None_2": {"doc_hash": "c1a8899f0c5e1543b5ce7e93c7a418895eb5393d3b90a92c2097c353ce5fb6fb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_asymmetric_overlap_boundary_exception_test_map_overlap.assert_eq_": {"doc_hash": "ba4a7393e55b57aef3db33e84b4f3505b39dc92900fd1db72e2671563fcf1176"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_map_overlap_no_depth_test_map_overlap_multiarray._are_not_somehow_shifted": {"doc_hash": "636d67c63057608f5a1a890c18208f0f06d96ebbd5b05e92893d76e8326c0cff"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_map_overlap_multiarray_defaults_test_map_overlap_multiarray_defaults.assert_eq_z_sum_20_": {"doc_hash": "4b79a578e69c20d853e6cf36292c9f349c8ed767c723c0931f36ab0bc08958bb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_map_overlap_multiarray_different_depths_test_map_overlap_multiarray_uneven_numblocks_exception.with_pytest_raises_ValueE.da_map_overlap_lambda_x_": {"doc_hash": "9a3fa3ce57a35f735856f3e86ffc1e1daae73bfda8a8713c6eb637ba5c19ad42"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_map_overlap_multiarray_block_broadcast_test_map_overlap_multiarray_block_broadcast.assert_eq_z_sum_4_1": {"doc_hash": "7f8da61df26a9c5a08d4b2700f3dcfc77a358c8c77cc1dcbf859f52dd550aa39"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_map_overlap_multiarray_variadic_test_map_overlap_multiarray_variadic.assert_all_x_compute_": {"doc_hash": "692b1e039c32726cc23beb664886fd52e3fca2f896a8c9e5febaa5a4f5948277"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_nearest_overlap_test_nearest_overlap.assert_array_almost_equal": {"doc_hash": "9cde8fab4a776eeea57f7c452cb870a1da5e2cdd578a7bddfc1cc38eda017e8a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_0_depth_test_0_depth.None_3": {"doc_hash": "2fcc5bd640ecebdce180e1f43ad5dde186a5e21198f6c62f054dca1593c865ea"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_some_0_depth_test_some_0_depth.None_3": {"doc_hash": "3a27faa173c097939bbb31feb00c6496028b06ad55d3a7f007a3d06ba1f6cdb2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_one_chunk_along_axis_test_constant_boundaries.assert_b_chunks_darr_c": {"doc_hash": "759045858be2c64cbb65d8060f5cf8da0303bbcc57e06582ec485ad3e878adda"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_depth_equals_boundary_length_test_depth_equals_boundary_length.None_3": {"doc_hash": "3ff8209f0c59fc372104052e15ddf5eff159f5fd2cfd888e9595895f910f849c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_depth_greater_than_boundary_length_test_depth_greater_than_boundary_length.None_3": {"doc_hash": "fb6ca1a091fab27a93ed77b80bed5cab4a134ba4ca5495633f677990d5e43778"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_bad_depth_raises_test_none_boundaries.assert_eq_exp_res_": {"doc_hash": "a18755c5c472869980064a6bb17343a4e964da3388fdca886a4283d3c63c7370"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_overlap_small_test_no_shared_keys_with_different_depths.da_compute_r_scheduler_": {"doc_hash": "5e9e6cf30c6b16ce2f749130d36d29bcd17eb78d316f2e45ebecd3557b6865a6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_overlap_few_dimensions_small_test_overlap_few_dimensions_small.None_5": {"doc_hash": "6ff6b21940e991bfce337848dc90d96cead39d9c49c2f44cd68d2873fb93d4f1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_overlap_few_dimensions_test_overlap_few_dimensions.assert_len_c_dask_10_": {"doc_hash": "ed4828c3574536268604767cc5feaba57267055e498f4d6d0f2bedf19475d243"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_trim_boundry_": {"doc_hash": "8b280a6d7e5543ed6a0799432e25bb91c18019713175fcec6746a4e336849355"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_percentiles.py_pytest_test_percentile.if_method_tdigest_.assert_eq_": {"doc_hash": "b7ff74f651bb236fe1b37dcdb04f4caef64e2f5821f02c41d015e4095e58bb02"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_percentiles.py_test_percentile_with_categoricals_test_percentile_with_categoricals.assert_same_keys_da_perce": {"doc_hash": "eb2d7918f7601d4de1a34e0ade60ac1d3fe3e46d35e05c5a459bc9fe50b7a805"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_percentiles.py_test_percentiles_with_empty_arrays_test_percentiles_with_scaler_percentile.assert_eq_da_percentile_d": {"doc_hash": "3f0ab974a0fdb9c90d8533f6ca393cedc773bac1f7a6ebc027db503260d01dbc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_percentiles.py_test_unknown_chunk_sizes_": {"doc_hash": "5e6d390a02dd7cb81b57ba2e69a0cc2cf2c480ffe8d411ce6b26a4b178e572df"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_random.py_pytest_test_determinisim_through_dask_values.assert_eq_samples_1_samp": {"doc_hash": "aa40f18b7bf15e93422c84c036f03272d07a70747dda736126975a16b7d06e5c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_random.py_test_randomstate_consistent_names_test_randomstate_consistent_names.assert_sorted_": {"doc_hash": "eba20ed8c3814a4a49de6a3c0ddd80157fcb1993e908fa74cada5f5dcab32370"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_random.py_test_random_test_parametrized_random_function.assert_len_y_90": {"doc_hash": "629d14c379335ae79e3b1689484af1bded7dcf088546cff43d967c70b892bb5a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_random.py_test_kwargs_test_consistent_across_sizes.assert_eq_x1_x3_": {"doc_hash": "f7a42cd19efb71de758dcf83f2239121394acadd1dc6e4ee81714fc26b9b466b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_random.py_test_random_all_test_random_all.da_random_standard_t_2_s": {"doc_hash": "0fbb40073ad4864c514dc0f6f3b85814c57955ed8985a0218b90d6a0f0043d8b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_random.py_test_array_broadcasting_test_multinomial.for_size_chunks_in_5_.assert_x_shape_y_shape": {"doc_hash": "3fee01c1efd823ce5e4c62782c401c6c851f03ebb80ce13b67e05939ac9d703a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_random.py_test_choice_test_choice.assert_len_res_len_np": {"doc_hash": "e1c9324f5f23655640fc7e00f9060a60888e229804aaf9b36c72bfedb40a813e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_random.py_test_create_with_auto_dimensions_test_permutation.assert_x_shape_100_": {"doc_hash": "8c9d7d3285260b0880cf9eea901268c96fdd2807b1d04ca74a0df10533d9ce27"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_random.py_test_external_randomstate_class_test_external_randomstate_class.assert_eq_a_b_": {"doc_hash": "3cd6cb2613138028f21916f404a9a9d5e0210fcea4e6843a8834629d2a72e157"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_random.py_test_auto_chunks_": {"doc_hash": "6ac97d45458a7a46007793182f6e86511fb9528d4d1fa37ea4b670c74aa362da"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_from_itertools_import_pro_test_rechunk_internals_1.assert_i1d_1_answer4": {"doc_hash": "8085715d071fb6fea87dfa95fe24d3a30bc98c04b7a35a104d60b7f48075bc3f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_intersect_1_test_intersect_1.assert_answer_cross": {"doc_hash": "b74959dbe9760b2bd188df55a67834fac24019518df929cd32565f62c8e70894"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_intersect_2_test_intersect_2.assert_answer_cross": {"doc_hash": "7aea0facdfa4ff073c947269a2eab21818404767514a526a3699b27cc229c0de"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_1d_test_rechunk_2d.assert_np_all_x2_compute_": {"doc_hash": "d9f6dce63b1b531dea2d9d7f17d443c0c6e0483b68ba6c2ead5c31d3aee8039b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_4d_test_rechunk_expand.assert_np_all_y_compute_": {"doc_hash": "5ffcf9448d382f060fcd43087c0207281576dfa57e07ca303ed0f334d1ff55a6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_expand2_test_rechunk_expand2.for_off_off2_in_product_.if_a_off_off2_0_.assert_np_all_y_orig_": {"doc_hash": "9794cb4d37d79eed78c0575840f291fce233f74592497426d18e25effa88cc88"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_method_test_rechunk_method.assert_np_all_x2_compute_": {"doc_hash": "7073f2da1eb82f55adf6413ddb49f3eb354af2f4f53418967a31531de2497fae"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_blockshape_test_dtype.assert_x_rechunk_chunks_": {"doc_hash": "ec57f20a4398da9631c1504e4b1038838b9985490a71ab1c45ad006cffd754b1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_with_dict_test_rechunk_with_dict.assert_y_chunks_24_": {"doc_hash": "aa8f7d876f815fc5ebef489b8cc2fd70bf917a25f1fb26de0b1839b2f97f3b5f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_with_empty_input_test_rechunk_intermediates.assert_len_y_dask_30": {"doc_hash": "f8a5cd6e97f95f10e2d075caebefba8bef8864ff8beb3ca60cf1e5e38644cee7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_divide_to_width_test_divide_to_width.assert_chunks_4_4_2": {"doc_hash": "ca080b1e3e26a0ff34f7eafe8b2c03e83fa12ff2437cf471fb315c74c26a5b43"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_merge_to_number__assert_steps.assert_steps_expected": {"doc_hash": "af3f0d61c64499a953c462fe8937ae6fc47566581746a688a65b415bbfbba9a1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_plan_rechunk_test_plan_rechunk.for_i_in_range_len_steps_.assert_len_succ_1_le": {"doc_hash": "6f81e7032cb5875b68538e542c423d4e91eef818a133ba4178f2aabe1d0e89ed"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_plan_rechunk_5d_test_plan_rechunk_5d.None_2": {"doc_hash": "19a5b2acd6aef52246eb3bd0919cd71df75ac73abd1a2159308f026f66ceab1d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_plan_rechunk_asymmetric_test_rechunk_warning.assert_not_w": {"doc_hash": "5818cef12ea6c6e99ad2ced8baef2f25afa4ab2adb2e1f9e595c4b54ae793f88"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_dont_concatenate_single_chunks_test_dont_concatenate_single_chunks.assert_not_any_": {"doc_hash": "17ed6ad7c51ed4953bc04a61a364392b2066b64978cdb502d183de6cee88fc1a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_intersect_nan_test_intersect_nan.assert_result_expected": {"doc_hash": "bfdd901e97978864ede2e28092ba81455829776d3c593745a3c3ed45e2ddf8cb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_intersect_nan_single_test_intersect_nan_single.assert_result_expected": {"doc_hash": "fd62873d939574e887b7da8dbe39ea86fea22506a1519450a8809707e46c0a84"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_intersect_nan_long_test_intersect_nan_long.assert_result_expected": {"doc_hash": "e9336afdf0993b691bc0b153329f7bddb09c0561abfeefe04d61ac033198d817"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_unknown_from_pandas_test_rechunk_unknown_from_pandas.assert_eq_result_expecte": {"doc_hash": "b2e23227e462271e80f380d077a8bdc4119099115589a536070ceb4f8295fefe"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_unknown_from_array_test_rechunk_unknown_from_array.assert_eq_x_result_": {"doc_hash": "b6164b93e8037eeba5a4b136c4c000b026b01bc40ba6b9bbdc3baf35d9c31f3a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_unknown_test_rechunk_unknown.assert_eq_result_expecte": {"doc_hash": "aac008192767784eb86b4bc0d271a2861779bd250819dd3b5d72052f719b5124"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_unknown_explicit_test_rechunk_unknown_raises.with_pytest_raises_ValueE.x_rechunk_None_5_5_5": {"doc_hash": "90f72328a3ed5b7bf44264dd3a85f17705b4541f6b17d5b92298ae5ffecec1f9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_old_to_new_single_test_old_to_new.assert_result_expected": {"doc_hash": "11b6dd1c80d345d1f268950077ae915419780f459b71c869be7207a80f34f212"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_old_to_new_large_test_old_to_new_large.assert_result_expected": {"doc_hash": "ea92b62374b6ee8011fc9148798024e246e28acb38cd43e83126a9c8a985621f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_changing_raises_test_old_to_new_known.assert_result_expected": {"doc_hash": "014e5c5b3b966fbad5bd418cf47df1eb4e60583802bf830f6ec540d0bf79c947"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_zero_dim_test_rechunk_avoid_needless_chunking.assert_len_dsk_8_2": {"doc_hash": "b25f43428233af22b397dfbc9ec93f94137ec52dc37f3fe99e16182956dc40e0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_auto_1d_test_rechunk_auto_1d.assert_y_chunks_expec": {"doc_hash": "9e97ad4d9ec821262f88f5af6287100c111ccbdded77ad24f6caa07f2dbd2e03"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_auto_2d_test_rechunk_auto_2d._limited_by_largest": {"doc_hash": "2d1617605f6476fc707773ec58afb91c69d1eac445cd86279e2144dcc22b8d1a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_auto_3d_test_rechunk_auto_3d._even_split": {"doc_hash": "137ae4e03a26298458dea33cd344269afe3ae6314cab631e853dd3bcee0c349c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_auto_image_stack_test_rechunk_auto_image_stack.None_2.assert_z_chunks_1_": {"doc_hash": "0fd0e4290affdd89150eb9c884f8ed5346c366686f7e0cd3303677a6989c4b74"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_down_test_rechunk_down.None_2.assert_z_chunks_10_": {"doc_hash": "6e15253e9b98e8b7540d17ea6675a640d9eddff7dd708de14073c4101e249e48"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_from_itertools_import_zip_test_numel.None_1.for_sub_in_itertools_comb.assert_eq_": {"doc_hash": "3d80c195db33d92c991b8f194edaa4ea4385572707d27ef08724cff91c233999"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_reduction_1d_test_reduction_1d_test.if_split_every_.assert_eq_": {"doc_hash": "6a42e8e597ed78f33ffc283ef7842861e0eace14187ccdfdc067b22951254a2e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_reductions_1D_test_reductions_1D.None_15": {"doc_hash": "48aea01596966fb4d3a58536599c07b74184e33921b731e6fbcec94a198a6f24"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_reduction_2d_test_reduction_2d_test.with_warnings_catch_warni.if_split_every_.None_8": {"doc_hash": "157da34345257a0c92c1093cd0df542cabc99655338ec464e4d1268edbd9a3e1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_reduction_errors_test_reductions_2D.None_15": {"doc_hash": "831836b73904f5e1a91772392559b4358ab298391a6de62bef43e953b6f957fd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_arg_reductions_test_arg_reductions.assert_eq_dfunc_a2_0_sp": {"doc_hash": "10e47ce7447e2d8b1c535950e08249c97dbda9979404f99202876a79adee760c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_nanarg_reductions_test_nanarg_reductions.None_2.with_pytest_warns_None_.dfunc_a_compute_": {"doc_hash": "fd05faad1341a35d5c41ff4742d08e34d72bdbc3ea2f9ae758e2136456244e8a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_arg_reductions_unknown_chunksize_test_arg_reductions_unknown_single_chunksize.None_1": {"doc_hash": "8f2a166bb301ad0e0cbb84e3102545aa5b55f18974d3397eddbae6bc48d92a68"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_reductions_2D_nans_test_reductions_2D_nans.None_9.assert_eq_da_nanargmin_a_": {"doc_hash": "5b309a6fb45b5764fd1cb492acb7389d182190ae864addeebf4307501bb6084f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_reductions_2D_nans.None_10_test_reductions_2D_nans.None_12.assert_eq_da_nanargmin_a_": {"doc_hash": "835051b05c9ded4c872d82f262907b37c793a451b5c171173d777d680c16863d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_moment_test_moment.None_7": {"doc_hash": "8fe3d6e73906859b89d9580ab4eb29da978c6c271a7bc39047d8774199103b9b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_reductions_with_negative_axes_test_reductions_with_negative_axes.assert_eq_a_sum_axis_0_": {"doc_hash": "30ced6dfd7c65bc02a31e0439d65b7b6af0e57414f42419e21fac324f36a3d25"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_nan_test_nan.assert_eq_np_nanprod_x_": {"doc_hash": "1f3939d55824268ab985366c8e8cfd82429623a0532e2d933038c4e526e607ec"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_nan_object_test_nan_object.with_warnings_catch_warni.None_3": {"doc_hash": "07173c95d165aefbc385969ca308ea10d8f54ef34becf18f1524c80dae408d17"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_0d_array_test_reduction_on_scalar.assert_x_x_all_": {"doc_hash": "ff3b34887ec3aad2c1ad361a5de9ea9491c0bf4a0e9c6d536f9e87cbb1839f4d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_reductions_with_empty_array_assert_max_deps.if_eq_.else_.assert_max_map_len_depen": {"doc_hash": "99e19b1cc8645162481fe8f1205b6a2a48aafe01723f789f16c3f4c9342b8bca"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_tree_reduce_depth_test_tree_reduce_depth.None_26": {"doc_hash": "8ef0527b00790c4c58b8f99fd56619220f2774d11b695a7ef522782ef5390121"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_tree_reduce_set_options_test_array_reduction_out.assert_eq_x_func_np_ones": {"doc_hash": "68f44ef6e6f282e638f882a483b87a46029e63c67cf908af1d8cd393fa2bcd9a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_array_cumreduction_axis_test_array_cumreduction_out.assert_eq_x_func_np_ones": {"doc_hash": "dc4c4abf32277fbc9be4baa6084e2b410f345e15e5512ac03fd022638ffc7ac5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_topk_argtopk1_test_topk_argtopk1.None_1.daskfunc_b_k_axis_3_s": {"doc_hash": "1fbf579c889ce86e0bc4e8a39967b00b770e744cc5f3a40d767d96b852b9c85b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_topk_argtopk2_test_topk_argtopk2.None_1": {"doc_hash": "9c87f5102d38d5aa13881ccb6b069ed7d14d5b8f896502e3aea59092948bce83"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_topk_argtopk3_test_topk_argtopk3.assert_eq_": {"doc_hash": "f2f88084139648e21b78a5c9592b1c7a7bc08ef757dab22ef6d9a9a3a291e1a6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_regres_3940_test_regres_3940.if_func_not_in_da_cumsum.None_1": {"doc_hash": "9057faaeebd4ed3c6208bd9a7db534d2a7006802e02e0a53affbff29fc414676"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_trace_test_trace.None_13": {"doc_hash": "99e2c39bc9c0dccb09e79af22390621fc071d8a3dd1e0d342c566af9e0aa0d7e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reshape.py_pytest_test_reshape_rechunk.assert_np_prod_list_map_l": {"doc_hash": "0b61d6d8e5c5f7099079989f3025217a00a4b70d74f3575242d9a4540ae4ccee"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reshape.py_test_expand_tuple_test_expand_tuple.assert_expand_tuple_7_4": {"doc_hash": "ba0c55faf016f38ba39d407876842a8e398d48581d1cdc6725b0fc9c323d483e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reshape.py_test_contract_tuple_test_contract_tuple.None_3": {"doc_hash": "e8e6eec6fa53b866b400d0fc13951161da8076b90593e14f28af7d784537fd5c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_itertools_test_array.assert_isinstance_y_da_A": {"doc_hash": "616fb1cf9aed4ee4df493d745c2a0afc6841b5d3e7352e79eac4c70c93db33b9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_array_return_type_test_atleast_nd_no_args.assert_np_r_n_da_r_n": {"doc_hash": "f5302e12e56eac249da0b750a7ced31ee03361268135afc3c4adf081f70e418d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_atleast_nd_one_arg_test_atleast_nd_one_arg.assert_eq_np_r_da_r_": {"doc_hash": "f95a7b42534189df7313d292c303cacb8d6789a89f071090cc29225ede4d7d16"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_atleast_nd_two_args_test_atleast_nd_two_args.for_np_r_da_r_in_zip_np_.assert_eq_np_r_da_r_": {"doc_hash": "94b1b17e2d80d5880f7472ba65e443d77010163bbba680f1f091d1039313396f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_transpose_test_transpose.None_1.d_transpose_1_2_": {"doc_hash": "2cdec967647dd6c8df9d68c1502a0fbb04ae6f6f15a6426b0c570c9aa8be9dcb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_transpose_negative_axes_test_transpose_skip_when_possible.assert_x_transpose_3_": {"doc_hash": "b6489591e974a6d6626b7e312504161483c3db07ba491905601b12a0d70aae01"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_swapaxes_test_swapaxes.None_1": {"doc_hash": "4f4f900a3d54a7a59de1b4a408b7bc9c6456b294f8dfd5b59d4596eba53db7ec"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_moveaxis_rollaxis_test_moveaxis_rollaxis.for_axis1_in_range_x_ndi.for_axis2_in_range_x_ndi.assert_eq_np_func_x_axis": {"doc_hash": "14ec6df936e4e0b57c264c8f2d46fc131af111473d416441e84a2b9248da808e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_moveaxis_rollaxis_keyword_test_moveaxis_rollaxis_numpy_api.assert_eq_result_np_roll": {"doc_hash": "44586488d36359f7d7c071c198a49ed83a1947303555111608b6dd35efd35972"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_flip_test_flip.try_.else_.assert_eq_np_r_da_r_": {"doc_hash": "d89ad089505209d6487a5f44465e7bb347de8070f5733e0af2074a8fc5762a42"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_matmul_test_matmul.for_d1_d2_in_itertools_p.if_x_ndim_0_or_y_ndim_.else_.assert_eq_expected_da_ma": {"doc_hash": "a1d43a97bddcc5eb6c213555e460d4589a18336098ca44c1688a6e038db4bf04"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_tensordot_test_tensordot.with_pytest_warns_da_Perf.assert_not_same_keys_da_t": {"doc_hash": "b84cc15f622d7b6e45b7d967f3fafb1c53466ff667e47c56ebef4dd020f0f33f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_tensordot_2_test_tensordot_2.assert_eq_da_tensordot_y_": {"doc_hash": "6660beabe967e580a1e09452348173eee95ba389b3272b9ec20d2d45fac9c8f2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_tensordot_double_contraction_neq2_test_tensordot_double_contraction_neq2.assert_eq_da_tensordot_y_": {"doc_hash": "0fa656ccef42f00e30e296a36c58fff56f0688bdcecd51d696e7deb236e03f17"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_tensordot_double_contraction_ngt2_test_tensordot_double_contraction_ngt2.None_1": {"doc_hash": "21bddf8f96cc1feb7fe2496edbd22f27bfc4c92d17f490621b3206aae0775505"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_tensordot_more_than_26_dims_test_dot_method.assert_eq_a_dot_b_x_dot": {"doc_hash": "bdeba4cf07a62a4c7fa52e9ce9ceced0653bcfad40e147485a73d9002a2d88c4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_vdot_test_vdot.assert_eq_da_vdot_a_b_": {"doc_hash": "fda3815d5e4300597812014708c76ee5499ae60d4201feaee3df3552c759f0c7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_outer_test_outer.assert_eq_np_outer_y_x_": {"doc_hash": "c00a44bd5ec3adb61e3af521bed02c37dc4f4791e0f1b96420bf001ba35eb212"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_apply_along_axis_test_apply_along_axis.assert_eq_": {"doc_hash": "6e623970cf5af48b458379f2781464879af45a7c7875a1ab1f12631100f8d466"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_apply_over_axes_test_apply_over_axes.assert_eq_da_apply_over_a": {"doc_hash": "2c778889886b66f0bcbb8eb55c79ab05ec356c2cf93177808a5b71643876b5f0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_ptp_test_ptp.assert_eq_da_ptp_d_axis_": {"doc_hash": "338c8c04f811b4a8cff7a31eb2457f6093cf5da91be4bc21ee172228735aec14"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_diff_test_diff.assert_eq_da_diff_a_n_a": {"doc_hash": "cc63cb213b2e5192966e77c364db82265230830373d3cc28f195125d1fc24b5e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_ediff1d_test_ediff1d.assert_eq_da_ediff1d_a_t": {"doc_hash": "ac44ba9ba88e99460eb8dbc581dc6bbde2fff25a441f84dccae75f9e6b51bb80"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_gradient_test_gradient.if_isinstance_axis_Numbe.else_.assert_eq_": {"doc_hash": "8ad980f388adfa1a6c3fa1a955e8d96529b53998283e14dcf371d8c98af7ebda"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_bincount_test_bincount.None_2": {"doc_hash": "394cf217d5b7b9cbc635b51c5cce01467a61eda02bb1a05653692eeec744fcb8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_bincount_with_weights_test_bincount_unspecified_minlength._shape_is_nan_so_must": {"doc_hash": "706be33ff612774e9ed5e2ac62aa58f8a06a23ee67525a15c5e7376ed78b32e7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_digitize_test_digitize.for_chunks_in_10_10_.for_right_in_False_True.assert_eq_": {"doc_hash": "b5abaa280b7230d3c1291edeecaaa881237021e9e11f34e17133646914954a9e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogram_test_histogram.assert_same_keys_da_histo": {"doc_hash": "5b0328ced166d9cba88ba95c8d7622c001f75fa76e1f880065064088f54e78ce"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogram_alternative_bins_range_test_histogram_return_type.assert_eq_da_histogram_v_": {"doc_hash": "f5efa11d90de239f24b3eabe8a229626315765c9be908b606e6558ea7c7df851"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogram_extra_args_and_shapes_test_histogram_extra_args_and_shapes.for_v_bins_w_in_data_.None_2": {"doc_hash": "e1832d0b0da34e81a9f2dfad780e2a6ed3357225f439c84323e9a88c34c00494"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogram_normed_deprecation_test_histogram_bin_range_raises.assert_bins_in_err_msg_": {"doc_hash": "0810e66728741354b844f282d672123160b6f1b382693ee3986dbc48ea721110"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogram_delayed_range_test_histogram_delayed_range.assert_eq_bins_d_bins_": {"doc_hash": "f92ca49ef74b800bc7640f73d58e87ffb45e3bf5cae6c76b8196b9d930642698"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogram_delayed_bins_test_histogram_delayed_bins.assert_eq_bins_d2_bins_": {"doc_hash": "88c825da014b81040710509a864f2d3a50a99050146bc1cc7980fc12186a05b7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogram_delayed_n_bins_raises_with_density_test_cov.with_pytest_raises_ValueE.da_cov_d_ddof_1_5_": {"doc_hash": "bba6e8c785c385cd78d02a3b239ea1b191ba94bd33d33dabc42174a050671f4a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_corrcoef_test_round.assert_eq_d_round_2_da_": {"doc_hash": "c245c313fcd1f6d0135c194cf3eb4d3e25741ae19642d6edc028c038638998c9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_unique_kwargs_test_unique_kwargs.for_e_r_a_e_r_d_in_zip_r.assert_eq_e_r_d_e_r_a_": {"doc_hash": "2e99360f19aa61e17638fdae111e799e2dd5fa4c589292551b8e3a3d7ac30715"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_unique_rand_test_unique_rand.for_e_r_a_e_r_d_in_zip_r.assert_eq_e_r_d_e_r_a_": {"doc_hash": "b8375a0258c7e5cc2f30d568b3bb7af060f33f38dea1ced6b063602b65331032"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_isin_rand_test_isin_rand.assert_eq_r_a_r_d_": {"doc_hash": "044a03e83b4774c4adcd4d86a15075d96116635b515fb5273e5941c01577454f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_isin_assume_unique__maybe_len.try_.except_TypeError_.return.0": {"doc_hash": "8d8180dea55f0b804536c55434e52c47ebf37e18f3543930684f5f4ff88ebc64"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_roll_test_shape.assert_np_shape_x_sha": {"doc_hash": "b153aa7e01fb2e770fea1bf75dacb27c24891057d35d530986f483deb822b784"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_union1d_test_union1d.assert_eq_result_expecte": {"doc_hash": "0d20b2e4de9e94385bb8876e3b6cf77c78c1107d69139d914718f880e1bdd308"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_ravel_test_ravel_1D_no_op.assert_eq_dx_dx_2_rave": {"doc_hash": "a961c75b470f7652f087142deba63fd5589af5200645342822215cd0097bebfc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_squeeze_test_squeeze.assert_d_s_chunks_exp_": {"doc_hash": "3e0643927c7ef95f915ce03bb3a7decc0b21863e4666ae58a9bdda8ea501acfb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_vstack_test_hstack.assert_eq_np_hstack_x_y": {"doc_hash": "df334cad7986502494c75501f7247ef7677d8bc70e10a09113ef04b0ac8d939e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_dstack_test_dstack.assert_eq_np_dstack_x_y": {"doc_hash": "39d35a5d277efba219f80299a3e62c3fc195597ede553e5112d93ee205308534"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_stack_unknown_chunk_sizes_test_stack_unknown_chunk_sizes.assert_eq_np_stacked_dsk": {"doc_hash": "28be35f76de26c6ccae181fd62e44b4007a8511b6b69cb35c1636e98c61f93b3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_take_test_take.assert_same_keys_da_take_": {"doc_hash": "2da290f9036e73c20a023451be41d8a5fcd7908fe11dd0b7b54225b960c0b8a0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_take_dask_from_numpy_test_take_dask_from_numpy.assert_eq_z_np_array_2_": {"doc_hash": "89e522d72d4dc63d5f6641e803e46c8bb81ec2e18ea196212d946405cbe2bbcd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_compress_test_compress.None_2.da_compress_True_Fal": {"doc_hash": "99c2aa5fbe2c25e75c9a3ff87efca6a84aa5750e37f0578bd94fd1c7632a7115"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_extract_test_extract.for_c_dc_in_c1_c1_.if_isinstance_dc_da_Arra.assert_np_isnan_res_chunk": {"doc_hash": "4acaa4dacbff074ad0485879eff1b7b69cab4dfdfdc64c53ce801bd436913f8b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_isnull_test_isclose.assert_eq_da_isclose_a_b": {"doc_hash": "f01b63841214e8cd2d3f6359f46ee8188579931df57f5d96291809087f548092"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_allclose_test_allclose.assert_eq_np_array_n_r_": {"doc_hash": "78b94c0d16a58ba90dda863c4fbb1511a05b70175e5a33e2ced2c0f8078260a8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_choose_test_choose.None_3": {"doc_hash": "137f4f9aea513c0369c5702947b5636747464a071d1f5c65a344021869115d7d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_piecewise_test_piecewise.assert_eq_": {"doc_hash": "a528dd24f0281743d6178294708e100f05e4c39fbe61b720308fdd89436802f9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_piecewise_otherwise_test_piecewise_otherwise.assert_eq_": {"doc_hash": "9b4f78b81259d861bd4a8faa16b7a53db9102f2347a3ad37fccf3a0604a31275"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_argwhere_test_argwhere_str.assert_eq_d_nz_x_nz_": {"doc_hash": "b2d0ffd7040dd9047c73d3afd94a227d088117bbf42377ebaf2be470ff5b0c37"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_where_test_where.for_c1_c2_in_.for_b1_b2_in_0_0_.assert_eq_w1_w2_": {"doc_hash": "5a2303828733af5c3dde03344987a1af3b7971abbad2c5811bd8f702d8e1a420"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_where_scalar_dtype_test_where_scalar_dtype.assert_eq_w3_w4_": {"doc_hash": "6e0132e1b2e3d686dd207a4cb10a439c0e7210ae2941f97f8787b24dbeaf9b00"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_where_bool_optimization_test_where_bool_optimization.for_c_in_True_False_np.assert_w1_is_ex_w1": {"doc_hash": "0a356516c0047e9e322f4d4969e659bd013524c5e0868de92dd3747a1315fc64"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_where_nonzero_test_where_nonzero.for_shape_chunks_in_0_.for_i_in_range_len_x_w_.assert_eq_d_w_i_x_w_i_": {"doc_hash": "ea3083acc2ed229dd8d635c5f2d682bd62c2e9b39702e16086b639d3d2ccc882"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_where_incorrect_args_test_count_nonzero.for_shape_chunks_in_0_.if_d_c_shape_tuple_.else_.assert_eq_x_c_d_c_": {"doc_hash": "19331d3752b316f71d3e26f18167417b16f1e72eda15bb89e604418425042c6a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_count_nonzero_axis_test_count_nonzero_obj.if_d_c_shape_tuple_.else_.assert_eq_x_c_d_c_": {"doc_hash": "5d956f167ebf2335d01fe1ae718fcfb175c643bf7854a494ed3c35b9d1726ca1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_count_nonzero_obj_axis_test_count_nonzero_obj_axis.if_d_c_shape_tuple_.else_.assert_eq_x_c_astype_np_i": {"doc_hash": "98649f8c480c4f4fa585d1ad391a99c6cf17dadb18dc86cab795c7dd963cc283"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_count_nonzero_str_test_flatnonzero.for_shape_chunks_in_0_.assert_eq_d_fnz_x_fnz_": {"doc_hash": "9d25753dd800f8c85963f44a167ad88eed0f33bc99b963d3484fb524260d3f5f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_nonzero_test_nonzero.for_shape_chunks_in_0_.for_i_in_range_len_x_nz_.assert_eq_d_nz_i_x_nz_i": {"doc_hash": "e08068857f7bbb40aa7bc5343f6adf681155a61da5b62aeb4223d39043046fca"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_nonzero_method_test_nonzero_method.for_shape_chunks_in_0_.for_i_in_range_len_x_nz_.assert_eq_d_nz_i_x_nz_i": {"doc_hash": "bbf2954f2d20a0249e757ea43d56d512e0c5ef1a305e49ddbb2f5a18c3b3fe9e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_unravel_index_empty_test_unravel_index_empty.assert_len_d_indices_": {"doc_hash": "fb9a99bfe6a7759df28b478f9ddc92ce36f1c07074a160ca953e9215f1fe4081"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_unravel_index_test_unravel_index.for_nindices_shape_orde.assert_eq_darr_vindex_d_i": {"doc_hash": "8233fd5c265acbd762db5b13d5f3a6366f9cb52333502a14e2eb42969e0f62de"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_coarsen_test_coarsen.None_2": {"doc_hash": "d3ed8a61d8121c07616d9a001a6b76c6e11459fe4e7b53494abbd1aec17f5c8c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_coarsen_with_excess_test_coarsen_bad_chunks.assert_eq_da_coarsen_np_s": {"doc_hash": "74d8e3fcdc445cec849efda42d8285409e64542489994df5aa16e811a7d6a4c2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_aligned_coarsen_chunks_test_aligned_coarsen_chunks.assert_acc_10_20_30_4": {"doc_hash": "cfb4de70f4418f955d018d9d12326c8cf83765f2f6a174cbbcc11785b25df8eb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_insert_test_insert.None_2.da_insert_a_3_1_axi": {"doc_hash": "1a698126794d2387af83f793e3a0a0ee145068dca039bcf24b5a4413fbadec4c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_multi_insert_test_result_type.None_10": {"doc_hash": "a211e59edc332a2df6d2c2d0e1bedb538af9a76a8bbe9113b1257cc9f262f579"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py__numpy_and_dask_inputs__numpy_and_dask_inputs.return.np_inputs_da_inputs": {"doc_hash": "9e33dc7ee545e061e0684c6c62759e6b460f975c34edc58b79b1071e8a85a2cc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_einsum_test_einsum.with_pytest_warns_None_.assert_eq_": {"doc_hash": "5719a6ac53536af95d4130a3457d66d01b1693a4f0e0bb1959a75e9549b49dfc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_einsum_optimize_test_einsum_optimize.None_1": {"doc_hash": "3a85d5a307f734e56c7a9b17252a19382ba5266fec79cf8287cc1524803145c4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_einsum_order_test_einsum_order.assert_eq_": {"doc_hash": "69444844d10697a61e69d73378169b9a79538a5885eb3257b5bdebd9266ead50"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_einsum_casting_test_einsum_casting.assert_eq_": {"doc_hash": "546464071d8fd1bcc99bde4af639f70c78d37018c5914854d876e5045e2dd1a0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_einsum_split_every_test_einsum_invalid_args.with_pytest_raises_TypeEr.da_einsum_a_da_inputs": {"doc_hash": "317e53317b1e0792b5df2338fe086f832f53549c25244ee0e3d9a663cfa1c0e4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_einsum_broadcasting_contraction_test_einsum_broadcasting_contraction.assert_eq_np_res_mul_res": {"doc_hash": "dd5064baa72948dd4d899873b330a8c7ba1ef65a1eaad53cd187e8209a7d8764"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_einsum_broadcasting_contraction2_test_einsum_broadcasting_contraction2.assert_eq_np_res_mul_res": {"doc_hash": "c73352a71c6a5be85b9a2c3386adc26c75f4a9c441a7bdfe41a094710cf50774"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_einsum_broadcasting_contraction3_test_einsum_broadcasting_contraction3.assert_eq_np_res_da_res_": {"doc_hash": "dc919cb7146c625adfe31a412293242949d28a184bcb2ad98416a4f02cb173ed"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_average_": {"doc_hash": "cb89393276db49888874921c3a5516ee3fce1d7d4342ff51fe4f0fee2f1c23aa"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_itertools_from_dask_array_utils_imp": {"doc_hash": "cec2fbc283f6714299c0123020760b076d49cd4341f85aab6f1c705b26109384"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slice_1d_test_slice_1d._x_1_8_1_": {"doc_hash": "45a736b2c46ae225cf43794670689eeba83f0eac3685b0fff91bd20e16dba31c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slice_1d.expected_14_test_slice_1d.None_14": {"doc_hash": "67ed2e3a4defa877b60c73ba423b3d33a42fef8b2b737806dcfab57803cd2c9d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slice_singleton_value_on_boundary_test_slice_array_1d.None_7": {"doc_hash": "b36723ac891eba7cd18d1dae465d7b71a5c3db7f9c582cbaae3095b1a33fd2ce"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slice_array_2d_test_slice_array_2d.None_3": {"doc_hash": "1f42c329b15f4763f7d44756fd9949ae3c2b3190cd39876e68bf26b9498845c0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slice_optimizations_test_slicing_with_singleton_indices.assert_expected_result": {"doc_hash": "8497d4bda28609ba854263cdab1c558dfe27016904b9f93d7915139baf8fe3a9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slicing_with_newaxis_test_slicing_with_newaxis.assert_chunks_3_": {"doc_hash": "87f02a3f365f68f5bfb98304a23a7403b6865b042ace131bb5c89661a3091f7c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_take_test_take.None_3": {"doc_hash": "92464a2a2d770dfe0fa1db1e7ba2ebe9a5660e8a60a7881e6e27734aeddf6c34"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_take_sorted_test_take_sorted.assert_chunks_20_20": {"doc_hash": "4e9b656cc2f0a5a541bf9e9dc4aba81fbeff236a20b27a4db8c5d61495e2e9cb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slicing_chunks_test_slicing_chunks.None_5": {"doc_hash": "d0bb37589cb916c765fc5c0550149476c52e1a9ee772c98f874fd6e600300776"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slicing_with_numpy_arrays_test_slicing_with_numpy_arrays.None_1": {"doc_hash": "7d2bd780b5a0918472a724bb92a954f995152cd3346d44e8a63af22038562edf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slicing_and_chunks_test_slicing_identities.None_9": {"doc_hash": "b23f2b5926a33b154cac8789b23242099e2dcb1d92622482aa04f6a8f03f7ded"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slice_stop_0_ReturnItem.__getitem__.return.key": {"doc_hash": "df22706e09e578ecd7fdc0621cc214e65654e3748f6ac0ee2df8b35e43e10015"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slicing_exhaustively_test_slicing_exhaustively.for_i_in_first_indexers_.for_j_in_second_indexers_.assert_eq_x_i_j_a_i_j": {"doc_hash": "f700ce9e07447c16ad6c8a3e228f18e6c346a932f785cb7bdfa483196596092d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slicing_with_negative_step_flops_keys_test_slicing_with_negative_step_flops_keys.assert_y_dask_y_name_1_": {"doc_hash": "d08c1521e0696f82a477d8a50175a1d4d5f82f8c3244d1f4b9a532821a338ea3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_empty_slice_test_multiple_list_slicing.assert_eq_x_0_1_2_": {"doc_hash": "4c13dfb3be0d85a2c6a85d388bc0f4ca21158f1a3d23dea4c21c86fd0e521889"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_boolean_list_slicing_test_boolean_list_slicing.assert_eq_da_asarray_0_": {"doc_hash": "22ff62d496f627bf064192d671a5661be595dddbc96f11a3bf6b66e850ae51ef"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_boolean_numpy_array_slicing_test_boolean_numpy_array_slicing.assert_eq_da_asarray_0_": {"doc_hash": "5e9fbdc6e9d41a9b2e5a961c84bf12b2f5ecea53c905f27bf5275e1a0e501fbd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_empty_list_test_new_blockdim.assert_new_blockdim_20_": {"doc_hash": "c1b86da6d3464590a055fccb45f231a127503194572caaa0a1a7095a089e1f87"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slicing_consistent_names_test_slicing_consistent_names.assert_same_keys_a_0_1_": {"doc_hash": "e2829a9fb32be077dff88bf5b94cc5bb6b57143327ae3adca5fa5ede5d933932"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slicing_consistent_names_after_normalization_test_sanitize_index.None_1": {"doc_hash": "d0a3da9596337f496160a32bf5751584201cdf7d9769ac2f13c75737241fa4ef"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_uneven_blockdims_test_uneven_blockdims.None_5": {"doc_hash": "6924c971f3bf2cdfa5bbcaa50bb4f51417c804e437dc1b512f51051a3ca34411"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_oob_check_test_index_with_int_dask_array.assert_eq_x_T_idx_ex": {"doc_hash": "6d1dd22c78b51c5880aa0312561fd7912e60eae4dc7c37afe9b041966faead14"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_index_with_int_dask_array_0d_test_index_with_int_dask_array_0d.assert_eq_x_idx0_x_": {"doc_hash": "aef3d7ac7ebc06135b9e51f7f973e7284112cb512b775dc0c608d2c9257c3fe0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_index_with_int_dask_array_nanchunks_test_index_with_int_dask_array_nanchunks.None_1": {"doc_hash": "5ef2206c2cdb7e71a96276bedcbd5e43adcda127c4feb2cf1deb34c4e65c8941"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_index_with_int_dask_array_negindex_test_index_with_int_dask_array_indexerror.None_1.a_idx_compute_": {"doc_hash": "eda17cbdca5f0abb9d7d5b8e834265fe85d2acecdd907dece0a5bfb47c7e92b3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_index_with_int_dask_array_dtypes_test_index_with_int_dask_array_nocompute.with_pytest_raises_NotImp.result_compute_": {"doc_hash": "4ec82051f66a340d048935620178ba07fa56ba91a4433a672f57f01d5ec09dcf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_index_with_bool_dask_array_test_index_with_bool_dask_array.for_index_in_ind_slice.assert_eq_x_x_index_d_i": {"doc_hash": "1d07550f8f08710fa7eebc544831e178e713a74c7b78cdbe01c27d2cfa689ec3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_index_with_bool_dask_array_2_test_index_with_bool_dask_array_2.for_i_in_range_x_ndim_.assert_eq_x_tuple_index3_": {"doc_hash": "fb234bca913a67deebe84b10a15147312ac454d73c0cf8979c12038df2c2d7b6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_cull_test_negative_list_slicing.assert_eq_dx_4_1_x_": {"doc_hash": "ac1083a6624a4a083048483ac89117878c4a05709a9574e3a1644a6e0f5133a0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_permit_oob_slices_test_take_semi_sorted.assert_y_chunks_5_5": {"doc_hash": "a97f7963782db12ca2f458f287f56b07ba14c747e0abd8e6e2e2f5a8b79eec45"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slicing_plan_test_slicing_plan.for_i_x_j_y_in_zip.assert_x_y_all_": {"doc_hash": "6a348f56062ccdfe6e14397cd23c1cd1a8e86c425328703ff3bad1857c657902"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_pathological_unsorted_slicing_test_pathological_unsorted_slicing.assert_out_of_order_in_": {"doc_hash": "a25d67987892dd4fd8ad863ba14ad92aee85e6250b5683167baab63e4fbacfbf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_cached_cumsum_test_cached_cumsum_non_tuple.None_2": {"doc_hash": "29bf8318bec9be22e53e428cc9f34a970e62a0c23dcff6a02a7907728be21182"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_setitem_with_different_chunks_preserves_shape_test_setitem_with_different_chunks_preserves_shape.assert_x_shape_result_": {"doc_hash": "4de263d043a46efc2cfba63a31a9909311a98741ece8a59a2e0d91e1a18a60d1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_gh3579_test_make_blockwise_sorted_slice.None_1": {"doc_hash": "6f0048cf89634cb33210a910663d735762b651496cc9cf3784bf2aa35da52fe6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_shuffle_slice_test_shuffle_slice.assert_eq_a_b_": {"doc_hash": "783e704f13d6f3a7f9c2a9dcf4fa1aecfcb073fe93035f9bf4df7f114bbd617d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_gh4043_": {"doc_hash": "8584c5b2b279eb8d8f9c437780681ef24fc2a4ba669304e504009e363137995f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_sparse.py_random_numpy_120_xfail.pytest_mark_xfail_": {"doc_hash": "262305eb8d4439fb9584757977a722d4b3309ec98d910395e620f1b646c642fa"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_sparse.py_functions_functions._": {"doc_hash": "032ee98bf6b62285eb470188b41ce2b2ed1bd7eefb5e16427563961f9800e922"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_sparse.py_test_basic_test_basic.if_yy_shape_.if_not_isinstance_zz_spa._mostly_dense": {"doc_hash": "5a1de8edeee909930af8ac3909746b84b49f3ccfd7b8af527be28820f21525d3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_sparse.py_test_tensordot_test_tensordot.assert_eq_": {"doc_hash": "116a7f181abebf92b965fe38346cb97d7e8d86b3322bf927f9f9dacf1e1acc52"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_sparse.py_test_mixed_concatenate_test_mixed_concatenate.assert_eq_dd_ss_": {"doc_hash": "95a74bbda6223e3d0da2d7403a22dc12c576a796376b0b4cde9ca480e9c75330"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_sparse.py_test_mixed_random_test_mixed_random.assert_eq_dd_ss_": {"doc_hash": "609129fca2e07f0f70da65dec3b1224c232553e4d08b9097ac6213a3eff8e8bc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_sparse.py_test_mixed_output_type_test_mixed_output_type.assert_zz_nnz_y_comput": {"doc_hash": "8daffa956ea43fb8e11efcfdaa1c1deab53bc6af7e7d4336fff9816c9d3837b9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_sparse.py_test_metadata_test_metadata.if_IS_NEP18_ACTIVE_.if__numpy_117_.assert_isinstance_np_conc": {"doc_hash": "1d0179fb3fe285c273a4e5134bd37ab99130ef7470883f38516a044e0e060212"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_sparse.py_test_html_repr_": {"doc_hash": "6abd0248081c37b85688a947814dff28de9d8dc64b6311aa56a372b081c957aa"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_stats.py_pytest_test_measures.assert_isinstance_result_": {"doc_hash": "32cc2582c7c22ffcbfc32c309a05ea09d40c86daac4baf6498d0073bf6a661ec"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_stats.py_test_bias_raises_test_one.assert_allclose_result_co": {"doc_hash": "44d23cf1e5937ae824ed95baf39179a4c85f3ac790088bd630f3b547f2a0c161"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_stats.py_test_two_test_two._assert_dask_compute_re": {"doc_hash": "6f1e4e6e3bb7c5dee41514f499c4c8fba52ceaa842bab6b0ef4ef69631353a11"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_stats.py_test_moments_test_anova.assert_allclose_result_co": {"doc_hash": "f8d134c7f99354c7824f6fb6cf11d1c20ee4f7d4e80051ff38a880c9cf3f897f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_stats.py_test_nan_raises_": {"doc_hash": "e6a245ef3b6c8674f7d8a11aaf64d05110881bb2034a6a51770aa1fc2a64a209"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_svg.py_da_test_basic.None_6": {"doc_hash": "7d5768328ce2af41309b103faf19574a47dd4e04d786176c2cc4e3a8c5a8e19f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_svg.py_test_repr_html_test_errors.assert_unknown_chunk_siz": {"doc_hash": "a695830fd68d8ae7a03a27bc436c1753987c71b66c1dcd8c46cca4fddd9a5b79"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_svg.py_test_repr_html_size_units_test_repr_html_size_units.parses_x__repr_html__": {"doc_hash": "10fe4c5b65f960c073a0cb0704d144d42e351979870aa40c2f1eaf2443d7abf6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_testing.py_sys_": {"doc_hash": "a416f860d0810cba30cb0c86f627fc38c169b5a07eda28f0f594e6f12a0107ab"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_pickle_unary_ufuncs._": {"doc_hash": "c50b79d6a3f03cb202970d6c717c9f73f79697b7f236458661a28627332b436d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_unary_ufunc_test_unary_ufunc.None_3.assert_eq_dafunc_arr_np": {"doc_hash": "b568dfb5dcc020bdb9cb43f941e21bd51e49d815df047890e41ed915328cbd21"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_binary_ufunc_test_binary_ufunc.None_1.assert_eq_dafunc_10_arr1": {"doc_hash": "855afdfe1c6d6a24d643172abeffb0e993a177c8e5326160fb83e9971a9d6743"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_ufunc_outer_test_ufunc_outer.None_2.da_sin_outer_darr1_darr2": {"doc_hash": "c22a1b226ecbdc561ea4d4965f5f286b13a65da0c3147ba9247f11e6ae9e1b0e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_complex_test_complex.for_darr_arr_in_dacomp.assert_eq_dafunc_arr_np": {"doc_hash": "1fe36a3324aad8d712d949516394eff07c19d02977e5d12249d81fd9221c4b49"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_ufunc_2results_test_ufunc_2results.None_5": {"doc_hash": "7c36488e259cea07d004831eef07db610f5364e4f1dd5fdd26989e2e13c324bf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_clip_test_clip.assert_eq_x_clip_min_1_m": {"doc_hash": "a0bb9c17818baffbcac50efad2e96cdb352bb7d351c8c1f5cf93c808b879268f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_angle_test_angle.assert_eq_da_angle_comp_": {"doc_hash": "f7c68ca77b74e31b6ef0ad433d78725ec0917b90f894c6d2916baa1bed8b2ff7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_issignedinf_test_non_ufunc_others.assert_eq_dafunc_darr_n": {"doc_hash": "ccd37c2060d40efb00baad0f5a38731a9a878c5c1866af3b1f7ee7a8307d40e9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_frompyfunc_test_frompyfunc.with_pytest_raises_NotImp.da_frompyfunc_lambda_x_y": {"doc_hash": "064ece928acf5c50fd25a5cf4e6ca7a9785cf1899b12c25a483b1a3143d63ae1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_frompyfunc_wrapper_test_frompyfunc_wrapper.assert_tokenize_da_frompy": {"doc_hash": "e6e7769a21c1543c51072a63eb80f8e618448689dcf1f44f60d490151f893ad6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_array_ufunc_test_out_shape_mismatch.with_pytest_raises_ValueE.assert_np_log_x_out_y_": {"doc_hash": "bfe146f8fc229e85fe339828ee3ddad4089311e584b4fe9376696312a5482c9a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_divmod_": {"doc_hash": "2f457847ab2f49ecfa28e537c2e41e1a38637796bf4efe8f73e4c394d43954f3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_wrap.py_pytest_test_can_make_really_big_array_of_ones.ones_shape_1000000_1000": {"doc_hash": "36fb0345b73abec493d5f70ab9a6ae18e4786cd7684bc22ca421eb372f913fec"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_wrap.py_test_wrap_consistent_names_": {"doc_hash": "df994849e8dcfbccf94fc5fa8c114e311f0e531973f4be04f32fa0235a6c8a7b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_xarray.py_pytest_": {"doc_hash": "9a08f8f36d4ed402bfc5c30d2ad79af836ecb26aaee3aaabd04a18ecabb39998"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tiledb_io.py_core_from_tiledb.return.core_from_array_tdb_chun": {"doc_hash": "0889f8cc3db4d8455178d9997b3063497dd93a0875bfaa75a1002dae1953d28d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tiledb_io.py_to_tiledb_": {"doc_hash": "c4d37af360240ecfb4506f203a001d68dc42262bafe81f9563bc4c4a772a3cef"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ufunc.py_from_operator_import_geti___array_wrap__.return.x___array_wrap___numpy_uf": {"doc_hash": "f1e4a9cd37367b2c20b14d4be29f0f5ef50b9ab2adeb8fe77f0796662eae9012"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ufunc.py_wrap_elemwise_wrap_elemwise.return.derived_from_source_wrap": {"doc_hash": "ed19864579ec0b8a9b67f569287131c8cbed810177c918bcdd5a6b3caa733510"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ufunc.py_da_frompyfunc_da_frompyfunc.__dir__.return.list_o_": {"doc_hash": "521c1fc271f1aae1bf2123ed64e2ce5718a93db744203e9e7b0f953dcde6209b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ufunc.py_frompyfunc_ufunc.__repr__.return.repr_self__ufunc_": {"doc_hash": "7e8dbe4e0911f084e2980bc0fd6bccb53306709808735105c4a22790822f6d39"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ufunc.py_ufunc.__call___ufunc.__call__.if_len_dsks_0_.else_.return.self__ufunc_args_kwar": {"doc_hash": "52ea435fc3d679f7e9519f9d1c2520da8fb916ea831fe0d62c038fab6eefdf89"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ufunc.py_ufunc.outer_ufunc.outer.return.blockwise_": {"doc_hash": "87d67491fc93a5456eadf7bc9cb3776b8d1519418e284366a2d837dbfda553c4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ufunc.py__ufuncs_copied_from_thi_degrees.ufunc_np_degrees_": {"doc_hash": "1c1f97a566358b5dd319d1974838898f0f8f2c4e3274de4b212501da60f475c3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ufunc.py_radians_angle.return.np_angle_x_deg_deg_": {"doc_hash": "8eaa6df4a796b849c9dc929d67fe16cfbb6a8956195641e5ed3aba40fdc13ab2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ufunc.py_frexp_frexp.return.L_R": {"doc_hash": "7df10ea761e48e6138debd2e62c747d698bf363cae8bda1cd3409f1199f4e86e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ufunc.py_modf_": {"doc_hash": "0200922afb839283510484838f4b35a0a0c7c54fc51526c3ab8975c110075076"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_difflib_normalize_to_array.if_cupy_in_str_type_x_.else_.return.x": {"doc_hash": "514cc3019dead3ad2212b82b53e77d357464f1f564667cf5ef76c0fad963f167"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_meta_from_array_meta_from_array.return.meta": {"doc_hash": "b1b76fc3c9ec484c65a663e459f85370e450f3daa8ec29001fc95d590d6bc8a2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_compute_meta_compute_meta.with_np_errstate_all_ign.return.meta": {"doc_hash": "805219aa60342e51f3f59866d93b6719df924f87d4261ab412c3f20d3e0bb14b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_allclose_allclose.return._a_b_all_": {"doc_hash": "2c174f65913381302602963a617252a94a0b4a8c39f54e63eb0fc363908f8612"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_same_keys_assert_eq_shape.for_aa_bb_in_zip_a_b_.if_math_isnan_aa_or_math.else_.assert_aa_bb": {"doc_hash": "09651598c07b9ba2e748208e34e2a08010f997f9661971885b8e3615b04584c3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py__get_dt_meta_computed__get_dt_meta_computed.return.x_adt_x_meta_x_compute": {"doc_hash": "ab4e827e7350597b663250e96360948366652c8d6e1487b417a3e48fb9c0f890"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_assert_eq_assert_eq.return.True": {"doc_hash": "34de5bffab3780f516c72c8ac2cf69104cf7c5c37074a00d34ff4f259e3a48ce"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_safe_wraps_empty_like_safe.try_.except_TypeError_.return.np_empty_shape_kwargs_": {"doc_hash": "56d14afa7ce0ec99e8d116cc262d6d0b3000b87f260269904bf00bace2715714"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_full_like_safe_full_like_safe.try_.except_TypeError_.return.np_full_shape_fill_value": {"doc_hash": "524aa9ab0017ad7f306b6cfa9473c4aa4911fa8552076ff2b0cca484bded860e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_ones_like_safe_zeros_like_safe.try_.except_TypeError_.return.np_zeros_shape_kwargs_": {"doc_hash": "8d84c769e41aeee797706c253dba44e35eb32e921aadc3d115ac608f59c9841a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_validate_axis_validate_axis.return.axis": {"doc_hash": "69bcdc9bc7309839c9378633c49a23e3c4c16ff4c5d602c0593ff29cdf6e67b6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_svd_flip_": {"doc_hash": "7ad3f9374ee0b9dfacca74fe127e29cc6a5a774f23a3292d026ea19dd6b719fc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/wrap.py_from_functools_import_par__parse_wrap_args.return._": {"doc_hash": "ad9024d5c1ad83c11adf952c20290ed8eed0200084b2d0ecaa75e909e12504ae"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/wrap.py_wrap_func_shape_as_first_arg_wrap_func_shape_as_first_arg.return.Array_dsk_name_chunks_": {"doc_hash": "4036aa236e72c68686aa56371de159fa6a93d8b47f5ad26bb75ff7d5847441a1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/wrap.py_wrap_func_like_wrap_func_like.return.Array_dsk_name_chunks_": {"doc_hash": "faac998a73314fea65dbf98e63838cdf6ba33e2fd1b747ed148fe5ec3be18358"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/wrap.py_wrap_func_like_safe_wrap.return.f": {"doc_hash": "1c08c9e6955978d3e4741a07f5700b03de2f9642c792baa06a81d15931f18f21"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/wrap.py_w_broadcast_trick.return.inner": {"doc_hash": "68ea4278dc66cd6b5a22b3be7235ec95625aec977784f0609652bd9bf2dda750"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/wrap.py_ones_": {"doc_hash": "11c43ee794f5a76cafb893eacc9ec550cf0b6b4922839d2023f2ace30734a136"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/__init__.py_try__": {"doc_hash": "93d49d99cad4643022d718a7ea89a32b1c12ee4f8d1281a11617de25b93b07e2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/avro.py_io_read_bytes.return.fo_read_size_": {"doc_hash": "5952210077fafaeaf47892694839e29bef4c95268d03277da03c9e7044bb49e4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/avro.py_read_header_open_head.return.head_size": {"doc_hash": "8f2a6c631fa44c51fc1d0dd8bd2469c94d67fba4cda285837d01422527769ab5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/avro.py_read_avro_read_avro.if_blocksize_is_not_None_.else_.return.from_delayed_chunks_": {"doc_hash": "202972bdbebad7d6aa946e8283725d67977a328f848262dbd96fcf877c3a978b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/avro.py_read_chunk_read_file.with_fo_as_f_.return.list_fastavro_iter_avro_f": {"doc_hash": "989cfc734aabb90471da5a1535b9a9f02360c6b125643cf5bd5937ad46aded64"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/avro.py_to_avro_to_avro.storage_options.storage_options_or_": {"doc_hash": "f55d9f46dce978f6b9ae625be46c1277cf5c052d061a7c6d30c647f26ae5c7fa"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/avro.py_to_avro.files_to_avro.if_compute_.else_.return.out_to_delayed_": {"doc_hash": "80a51d27216abfb89532dfa96cb53082a4c34fc0f73dfff81d82fc5730a557c4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/avro.py__verify_schema_": {"doc_hash": "d090e20a48979bc698f864f7efed6d50f78d43f7ea2da4f49194a45b58627826"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/chunk.py_barrier_": {"doc_hash": "77c4a57bd6b0d4a17c9b5712ba28335d19ce0eab6f4df8d804c227968b2633ee"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_io_no_result.type_": {"doc_hash": "ac3e96905e326c1d39d5e3804ebb6631a4114966a8c7fb627a10510799c60b72"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_lazify_task_lazify_task.if_not_start_and_head_in_.else_.return._head_tuple_lazify_t": {"doc_hash": "9f34d0598b6d7d3d7fb6d546ee8c435d4f16853a9df9f1b50ef7dd5f1deb1714"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_lazify_inline_singleton_lists.return.dsk": {"doc_hash": "73e9a4a09dd3a9b48b14032de8b872b2e52d4939ec9e03f1da9e44bcbcd98ca2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_optimize__to_textfiles_chunk.with_lazy_file_as_f_.if_last_endline_.f_write_endline_": {"doc_hash": "6a1bb18418d417ca7c565d2f616fb10c50555d92e936009c86c668930615f21b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_to_textfiles_to_textfiles.if_compute_.else_.return.out_to_delayed_": {"doc_hash": "dc2aa8d3c059abceb837d8a783cf81c473c6766cf9774e7d9152561b6987ceb8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_finalize_StringAccessor.__getattr__.try_.except_AttributeError_.if_key_in_dir_str_.else_.raise": {"doc_hash": "35adfca70fdc5650bcfaf1357478d7cc5d13d9d4a2efa5b9e46cdc3b240ae2bd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_StringAccessor.match_robust_wraps.return._": {"doc_hash": "9b47d761a61446dcc9377b3541cad219ef0661e3a944db9eb9bedcb53b2b8ce6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Item_Item.__int__.__float__.__complex__.__bool__.DaskMethodsMixin_compute": {"doc_hash": "a08a01a62e643241fd80624cd183ce8c6d840789cb3eda6194bcb88f90dee66d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Item.to_delayed_Item.to_delayed.return.Delayed_self_key_dsk_": {"doc_hash": "720572333bf338e181568e7d1473d116c1b8637a44999fdc15d8df7ebb0b5709"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag_Bag.str.property_fget_StringAcces": {"doc_hash": "dc2ae8358602c07d5ab0fa3f15b84279e7173724e474eaa34a75f3dca4be4ebb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.map_Bag.map.return.bag_map_func_self_args": {"doc_hash": "d351daa549a3c5eccb2508cce2b06e2c9b5cafd81d4713de7784c644efe24e39"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.starmap_Bag.starmap.return.type_self_graph_name_s": {"doc_hash": "143bc09a31284088be457d6e1ac0e1d2189c77bca37b5e64d8f5c13d602baafc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag._args_Bag.filter.return.type_self_graph_name_s": {"doc_hash": "37094a9d9c8fa180e3dcd8b6631f2154b3dc1457408480f96c12bc27c066b676"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.random_sample_Bag.random_sample.return.type_self_graph_name_s": {"doc_hash": "a0bbea49d8f8a5d3a42df3ea4ec0d679b4979428d13bb7f05cc0cc2b70d0c77e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.remove_Bag.remove.return.type_self_graph_name_s": {"doc_hash": "d2e00e54fe43315c070c633d08a541fa8ef810100bb2758fd03e90a6dfc22a43"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.map_partitions_Bag.map_partitions.return.map_partitions_func_self": {"doc_hash": "3942f90971a958f20680920559f5de642ad36eb0cbe05322bab4d591299a57c5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.pluck_Bag.pluck.return.type_self_graph_name_s": {"doc_hash": "0c701a2553668bf0332ec9f924a95f5428ecd99711104c56a1ea40fcd50b1ec2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.unzip_Bag.unzip.return.tuple_self_pluck_i_for_i": {"doc_hash": "e1e02f77c25b63835129f681f7f76078735689ed97952b899bee988a343bcade"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.to_textfiles_Bag.to_avro.return.to_avro_": {"doc_hash": "f8a57b06f364e79e0b2a2b0e5ff843a121bc1b400533cdb6550903cb309749f4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.fold_Bag.fold.if_initial_is_not_no_defa.else_.return.self_reduction_": {"doc_hash": "24f4c1d0d37119a67aa454ba34fcad13f64c85e752c620dfd2141f302d3c83f5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.frequencies_Bag.frequencies.return.result": {"doc_hash": "376ec959b5064a9296cf8920864ea0ff927e3aacdcde5c058173101ef33c94e3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.topk_Bag.topk.return.self_reduction_": {"doc_hash": "53d38a13a96d9c52e0f173d910e05f74567228372b635e39a2e18b948de3c4d1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.distinct_Bag.distinct.return.self_reduction_func_agg_": {"doc_hash": "8be5233f7212dbe28e592c98cd2ca96d000addb88c304138a5a77d24b16d3b63"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.reduction_Bag.reduction.if_out_type_is_Item_.else_.return.Bag_graph_fmt_1_": {"doc_hash": "f4fb5cd6b25f0816a797f1011ad60b4e0b7038088dc03b8011bbbc3b57b82344"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.sum_Bag.std.return.self_var_ddof_ddof_apply": {"doc_hash": "cee5593abc76ce9c8e429095fed26099f97b560edb0606075411a5c4faf3ca30"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.join_Bag.join.return.type_self_graph_name_s": {"doc_hash": "399abd6c453533836401905650ca22c7cf89a273a62bf683b5262c9db80c51de"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.product_Bag.product.return.type_self_graph_name_n": {"doc_hash": "c85a273c24cdc22002c7bb84b7d4514ba40ef96604e165311b2828d8a561cca7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.foldby_Bag.foldby._Combined_reduction_and": {"doc_hash": "dfc1a799a39c166837cbc1d960fd9c4933d6400615cfa6f8c567d7f88831e2dd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.foldby.if_split_every_is_None__Bag.foldby.return.type_self_graph_e_1_": {"doc_hash": "5c01fa68e1e35fec087c9ac84c09da32c1413b94bf1181f4b62aed969dab3c5a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.take_Bag.take.if_compute_.else_.return.b": {"doc_hash": "81f3d03c45f2dfe42d7668d7b11e3eeedcdd0659af7a4ff0a33802f51d6fbb94"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.flatten_Bag.__iter__.return.iter_self_compute_": {"doc_hash": "9e5f1aaefee4f3dd1b7d3001cedaf4ba01385437927ea72a083eba616becf40e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.groupby_Bag.groupby.if_shuffle_disk_.else_.raise_NotImplementedError": {"doc_hash": "25dd0a38a40bf4a9c68c4fc8b3de8ec92c764dec0491c69e79fd223ca9beec7e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.to_dataframe_Bag.to_dataframe.return.dd_DataFrame_dsk_name_m": {"doc_hash": "04fa903701c885c8545bae6161db6e18f5518d62a094244569fa41cc7726575b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.to_delayed_Bag.to_delayed.return._Delayed_k_dsk_for_k_in": {"doc_hash": "4290fdd0f957f1f414da41f8d9d1f31c79189aa3d0a92d3538b5f0ed662c62ca"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.repartition_Bag.repartition.if_npartitions_is_not_Non.elif_partition_size_is_no.return.repartition_size_self_pa": {"doc_hash": "af1a7f2fe11d87659416939ab21c01f1937a412fe1232ebb7567cb3da99463d4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.accumulate_Bag.accumulate.return.Bag_graph_b_self_nparti": {"doc_hash": "f94e17e45a9250fdb495c9fd5335bd60f2741f05070d7acf427550ef81d06444"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_accumulate_part_collect.return.list_d_items_": {"doc_hash": "58fe062d55f7cb7c145994a9ffdc75648ad38f04c3812c0da382bb00d5c2b6da"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_from_sequence_from_sequence.return.Bag_d_name_len_d_": {"doc_hash": "c415339f7ec81cfd2e5a613cbd803ca54a31bf5d262cddbcb110cfede50a9ea9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_from_url_from_url.return.Bag_dsk_name_len_urls_": {"doc_hash": "528e411215a5241d148b8c76646686b6219ff81c6088213ba354e638a73e525b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_dictitems_reify.return.seq": {"doc_hash": "b3996376dce02250e1c39c45f1dd22f34667f5dab289b19355278d5c571c23a8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_from_delayed_from_delayed.return.Bag_graph_name_len_valu": {"doc_hash": "fb6c3ab115471184528795bf62770ea6ec6351be9acc1fc64432ad3b265b2ad9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_chunk_distinct_merge_frequencies.return.out": {"doc_hash": "cdbb1050ad7c682f86d4af3e7807256ecafed2311ee5ec5d380f1aaed6023701"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_bag_range_bag_range.return.Bag_dsk_name_npartition": {"doc_hash": "86fc7db4534b54146edad9c77167a8e101b4bcab8d288477f05e7120f4cbf264"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_bag_zip_bag_zip.return.Bag_graph_name_npartiti": {"doc_hash": "366d1d793f9b7ce25613369a7f2a68021e6e1f433789cd8492c8eef8d6b91c67"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_map_chunk_map_chunk.return._MapChunk_f_iters_kwarg": {"doc_hash": "1e15d09daceb28e25cdd59b0dcf840cadd8966c13c6a94f952cbd01e39d13ea5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py__MapChunk__MapChunk.check_all_iterators_consumed.if_len_self_iters_1_.for_i_in_self_iters_.try_.else_.raise_ValueError_msg_": {"doc_hash": "a6194206d15d7afa522515384bb3e8214ca55b2609c844efbde64a5f1e616e63"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_starmap_chunk_unpack_scalar_dask_kwargs.return.kwargs2_dependencies": {"doc_hash": "569fdcfdea17cc23ab5e6c5119962009de352d129c1e6b14daef4a307cee3760"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_bag_map_bag_map.npartitions_8.npartitions_pop_": {"doc_hash": "0dc9cc67ccbaec7ecc0a61b947e4606fe79507823e51009420dfcb03230415a8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_bag_map.build_iters_bag_map.return.return_type_graph_name_": {"doc_hash": "41232288ae16e954cdeaec4f945f98789c5d1759d2f47291bb8c0211658a2761"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_map_partitions_map_partitions.return.return_type_graph_name_": {"doc_hash": "264c951e25f939039900b82404a71b2c93ab379ed8849a32e4847d133c4bd207"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py__reduce_groupby_tasks.return.type_b_graph_name_len_": {"doc_hash": "d2070097314de707430dcbea51107e535c10092d8f622a947383d70142147f25"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_groupby_disk_groupby_disk.return.type_b_graph_name_npar": {"doc_hash": "9bb1e5a5e2038c710d63e95c5d5af3faa9e46789891b558d35ca871cb964960c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_empty_safe_apply_safe_take.return.r": {"doc_hash": "4bae5b3b90357b55005866cbc2707879df17da5e631631834d497b70210fc554"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_random_sample_random_sample.for_i_in_x_.if_random_state_random_.yield_i": {"doc_hash": "f7e20d28b52949abca422ba4ebc4dc64646884b1d4d04d0e64e190b943fdea1d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_random_state_data_python_random_state_data_python.return._": {"doc_hash": "f5fe0b96908ee250266ae2f0e9ac949c02638a4e7556e85c9734bc6a52ae64d1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_split_to_dataframe.return.res_astype_dtypes_copy_F": {"doc_hash": "f0de6cc5a8338033478a4da9050b2f86ef28fdfb812ab107efcc4cd8dcd2499b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_repartition_npartitions_total_mem_usage.return.sizeof_partition_": {"doc_hash": "f7f96f9841c9e7beb2c20a217438db4981802fc76393246935b34c753d477df2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_repartition_size_repartition_size.return._repartition_from_boundar": {"doc_hash": "24eed188bb0ecc5573e6015249d88888d41bc5f326056cc728775ec17e1dd2bb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py__split_partitions__split_partitions.return.Bag_graph_name_new_name_": {"doc_hash": "0e523a067903ff031fbcf0d8b3ce067f7a3f2cfd41d9b4bf95d496e62a9bceca"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py__repartition_from_boundaries_": {"doc_hash": "9648f945b92946e5a8d4e832dcd7cb6649fc802f0f461eb74c5eb37b230e4026"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/random.py_heapq_sample.return._sample_population_popula": {"doc_hash": "d3ffda1e463df14d7c5aba883d488528dd341ae3553662170c763589aca8e549"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/random.py_choices__sample.return.population_reduction_": {"doc_hash": "664565c1213f93b79b4ab2e22a0d3a22e9b5948f5c606a037d4af9b85da57ea3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/random.py__sample_map_partitions__sample_map_partitions.return.sampled_lx": {"doc_hash": "3ca9223b33f4041737cfd2b4920cd53f855dace9a0be2e852f25d01185bb2ad3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/random.py__sample_reduce_": {"doc_hash": "45f7cb4bc4b15313e2dd68158683ea670abf8821f9c5b62c4918191b06ac902b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_avro.py_os_test_onefile_oneblock.assert_b_compute_exp": {"doc_hash": "7edd41e9b8a207b0b23d5d5faf3e7b8cd93880f05fcab32e6ae781318dd456ce"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_avro.py_test_twofile_oneblock_test_twofile_oneblock.assert_b_compute_exp": {"doc_hash": "b6185b3811c6d650cec902cdf8c4f736253044b4e89b57b49c38b44b66be6a01"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_avro.py_test_twofile_multiblock_test_twofile_multiblock.None_3": {"doc_hash": "c3f54e959c2264b7ea675f7564137350f738bc2538728895aeddf234b6456c4e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_avro.py_test_roundtrip_simple_test_roundtrip_simple.assert_b_compute_b2_": {"doc_hash": "c11bdc5a3f657f02520ea86b7bba4220e580133865bd2a7c65f5fb9a682a6e59"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_avro.py_test_roundtrip_test_roundtrip.assert_b_compute_b2_": {"doc_hash": "f01dd0bff9f67e79c3b5c5986c2053b6942d71217c885baa344483598dafb11c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_avro.py_test_invalid_schema_": {"doc_hash": "22993c35870e4851a1977a78bce769886f1006fbe7fae2a922706216688bf318"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_bag_map_test_bag_map.None_3.db_map_myadd_b_b_unequa": {"doc_hash": "aa7f0efce4c604c060aef70d385a3686e5ab97bfdcea83790e4e890012c99a89"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_map_method_test_map_method.assert_b_map_myadd_b_sum": {"doc_hash": "760a546eed50a6bc57558a197178098ede44b92a3bb8d335eb4460d921a9c749"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_starmap_test_starmap.None_3": {"doc_hash": "66176e4187d0a899b67e42f452e5b4b1971daf32e9940cef64f66f8ec0c924a4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_filter_test_repr.assert_from_sequence_in": {"doc_hash": "099f6bfc84ea03caeb51ae851cebfaedd22f8e90e3fef1a931d7c88aeaf474ac"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_pluck_test_pluck.assert_b_pluck_1_0_na": {"doc_hash": "8800635d01d75b02d89f0f9911cf57b66b2a485923e65d05becb6d7e21beac34"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_pluck_with_default_test_unzip.assert_one_name_two_na": {"doc_hash": "95f430def4f29c6c4675d296e5135c9399dc043a76dc0f3df4a78e1f29946ec1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_fold_test_fold.assert_set_e_fold_add_in": {"doc_hash": "3e1f41e0e5e8a2481030f04bf9c6f827c74182254c2745564c3039b71b328e7c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_fold_bag_test_distinct.assert_bag_filter_None_d": {"doc_hash": "79cac33404b0e2c11ea3511e5491e1ee7f9180bcfe6640f46b4da4803a1ac0ae"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_distinct_with_key_test_distinct_with_key.None_1": {"doc_hash": "a8a82100fce227a1042480f23fa58e247049f9d920894b41d5b8e69aca10403a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_frequencies_test_frequencies.assert_eq_bag2_": {"doc_hash": "922aeeca8d7e383ef57ff76f3778618b929865ef3d156138668ce22f900eda47"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_frequencies_sorted_test_topk.assert_b_topk_4_name_": {"doc_hash": "3cc7e44d37f213ef28dba3b0858d5353c4dff38d3b8cb9396fd247a02fcb356c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_topk_with_non_callable_key_test_topk_with_non_callable_key.assert_b_topk_2_key_1_n": {"doc_hash": "3bb0002646517311bea362da8f368bd3101d8ea735b82c5b974c41ad9741b22a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_topk_with_multiarg_lambda_test_reduction_names.None_3": {"doc_hash": "e62e2ce9f0a15c3dc728fa4c17f870fac057de875a666399dd988da21285911f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_tree_reductions_test_tree_reductions.assert_c_key_b_sum_k": {"doc_hash": "3e5dabcd765b0057812c758c81bff2b95d4145b622b94d9675226a81a42b0fb3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_aggregation_test_var.assert_float_b_var_": {"doc_hash": "2d80171c298ce3d3dbd6afe5afe74582948800d3a006bc7c2d642d58dac8c955"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_join_test_join.assert_c_name_b_join_o": {"doc_hash": "e4aed67a9cb69878dc71ab408d79c2de111d2a5b1aec5b27c67d69da29814784"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_foldby_test_foldby.None_3": {"doc_hash": "79cc570881a7c18c01fe4c7439afc14a212d77fdcd37cbf8aa48cad3a56cfb05"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_foldby_tree_reduction_test_map_partitions.None_2": {"doc_hash": "7e6defc5ac92f37e8ffcffabeb07d78a7a91e09a18d0ae36536159f46c23f33e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_map_partitions_args_kwargs_test_map_partitions_args_kwargs.None_9": {"doc_hash": "eaf9d58ced3176aea90c9b39c6e1c3cd3f1bd54553f471f84058765f1a17fc91"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_random_sample_size_test_random_sample_random_state.assert_list_b_list_c_": {"doc_hash": "6ad16f98f12d3e935deea1217eeb9e3e0ad8f80b71b8c52dd6ea792faec60673"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_lazify_task_test_lazify_task.assert_lazify_task_a_": {"doc_hash": "fe8f67007b2fcc8fc9fe6b11e946cce60c3ced538507d755a54defe37d6c4d2e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_f_test_lazify.assert_lazify_a_b": {"doc_hash": "09e62c895567a053bd229467d5f9f03292583269abb8e7412293466530dfb674"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_inline_singleton_lists_test_inline_singleton_lists.None_5": {"doc_hash": "3e06bb88fe519316fc933c506636d8d73a47de901209d40fe28a71495aec9707"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_rename_fused_keys_bag_test_rename_fused_keys_bag.assert_optimize_inp_c_": {"doc_hash": "1cd078f729830bd0aff519a1fc3525888375defca41260874a09dc6a00f546c8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_take_test_take_npartitions.with_pytest_raises_ValueE.b_take_1_npartitions_5_": {"doc_hash": "0a36b7156d0ba67c5d68eb4d02aba82ecb1c029e72baf9299aee47d8575a7c9a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_take_npartitions_warn_test_take_npartitions_warn.with_dask_config_set_sche.None_1": {"doc_hash": "167f72c335a76d54175114a1e2417229862428e9b29f89520b22d2a575c0723d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_map_is_lazy_test_read_text.pytest_raises_ValueError_": {"doc_hash": "0dc00d9f0143b1b5050be96cd763bcc342a742a69758706439fd3d281b272c16"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_read_text_large_test_read_text_large.with_tmpfile_as_fn_.assert_list_b_list_d_": {"doc_hash": "64a51748a951370ac14da86965d88003060e611c9a9d6aa596b918a868526f19"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_read_text_encoding_test_read_text_encoding.with_tmpfile_as_fn_.assert_list_b_list_d_": {"doc_hash": "dee38d89ba0e64664a4867582791d5790cb198bc287d139c1bb2f8358bfa36a1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_read_text_large_gzip_test_read_text_large_gzip.with_tmpfile_gz_as_fn_.assert_join_c_compute_": {"doc_hash": "68174ffd71eb2f2913e99ae98b1a664daa9959a52084b26e965603b44f35f3b9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_from_s3_test_from_s3.assert_c_npartitions_3": {"doc_hash": "1a4d07de7570a32c4279dbe40f14f424a1bc74e8800570a709db5eb25613c5e3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_from_sequence_test_from_empty_sequence.assert_df_empty_DataFra": {"doc_hash": "e98bb3d75aa093c3d7dd9d3504d4da5a9694daecde30e8a2213ccc8a18eddc1b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_product_test_product.assert_z_name_x_produc": {"doc_hash": "7ab76d50dc91caac67c164b3b2193ae71debaaf8c309d26c39c2b18e94190411"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_partition_collect_test_groupby.None_3": {"doc_hash": "00ca06171feb31f448d32abc6fa5dc7612270a61f906068e3391070693e341eb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_groupby_with_indexer_test_groupby_with_npartitions_changed.assert_result_npartitions": {"doc_hash": "d75241db51b3cc42fa3264de888125ea82a15f3dc6dd6fa2ce6e2892696ed5b2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_concat_test_args.assert_c_npartitions_d": {"doc_hash": "eac083e380bb684029828c645f321b252f4615156fabf43f77f2f5956ef27eca"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_to_dataframe_test_to_dataframe.for_f_in_iter_tuple_.check_parts_df_sol_": {"doc_hash": "f8a858aae4e0bca27171e6a68de19148a274f349899fde49e7705d7ebfb7be65"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_ext_open_test_to_textfiles.with_tmpdir_as_dir_.f_close_": {"doc_hash": "0e007b00eebbedab9dd0d2daf0598873659212086d2089b2d64733fd057bde91"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_to_textfiles_name_function_preserves_order_test_to_textfiles_name_function_preserves_order.with_tmpdir_as_dn_.assert_seq_out": {"doc_hash": "16b71a58f3865123050036ed887af62f0686af1aec3d4693a6cb99c8291c2cb1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_to_textfiles_name_function_warn_test_to_textfiles_name_function_warn.with_tmpdir_as_dn_.with_pytest_warns_None_.a_to_textfiles_dn_name_f": {"doc_hash": "eb506107602f4c07597daf23762d7d8a024bb900f0af582dc21e372e55882b91"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_to_textfiles_encoding_test_to_textfiles_encoding.for_ext_myopen_in_ext_op.with_tmpdir_as_dir_.f_close_": {"doc_hash": "30d8e5acc9c59a9ee86bea04cb2adcd707fc28bba96ca52042f3c9df2364c2a1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_to_textfiles_inputs_test_to_textfiles_endlines.with_tmpfile_as_fn_.for_last_endline_in_False.assert_result_a_n_": {"doc_hash": "6b47638c047779a66830b54c8d714ea0d3bc49076577858488edddc10609ee3a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_string_namespace_test_string_namespace.None_6": {"doc_hash": "8facf602e61be44e0efbf5a61e6bace5fad3fca0f770850ee9aa317997663b78"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_string_namespace_with_unicode_BagOfDicts.set.return.self_map_setter_": {"doc_hash": "70cfc2bfeb58d72edb514b1b2a61bc1b3ce9483e8f4b570828796eb913bf1a60"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_bag_class_extend_test_bag_class_extend.assert_isinstance_dictbag": {"doc_hash": "851f81cb4bac536f58569251ed295a8fd4004cd1cef8cdb2f4ee0ee5f38877da"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_gh715_test_bag_compute_forward_kwargs.x_compute_bogus_keyword_1": {"doc_hash": "019a24cb279ce004bb9342df2a788c2c171c343e9d4c5efa48f1fc1a8b1f514b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_to_delayed_test_to_delayed.assert_t_compute_21": {"doc_hash": "f67d58abf59fbdaefc2c1d6726a224acd99684f563d1b001f17cf21f2b2b3402"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_to_delayed_optimize_graph_test_to_delayed_optimize_graph.None_9": {"doc_hash": "f60280fffd5db7c359c62e14c890372386ecb445843c65c2d7f6dc01c8048745"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_from_delayed_test_from_delayed.assert_asum_value_compute": {"doc_hash": "550f14dd8e14ec22cc01be245301c947fb65631ac810552b2e5fc8396a185dc2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_from_delayed_iterator_test_range.for_npartitions_in_1_7_.assert_list_b_list_ra": {"doc_hash": "5d50e7de2f9616372ea0ceb505083c59cb214dd5b2d41302972a40160e4b148b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_zip_test_zip.assert_list_pairs_lis": {"doc_hash": "782f31fc9b487063d4fe328c3612cdba4490fdb13658f46b2505e0b879589290"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_repartition_npartitions_test_repartition_npartitions.assert_all_results_": {"doc_hash": "f3195439dac9fe9ebc80f7ce2087a81325b0de4f15c0c7d9b42aae6f8edf1e60"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_repartition_partition_size_test_repartition_partition_size.assert_eq_b_c_": {"doc_hash": "420cedb9c100e4a172d6763a91e73beb3434558e8752debf97c52e8811e0757b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_multiple_repartition_partition_size_test_repartition_input_errors.with_pytest_raises_ValueE.bag_repartition_npartitio": {"doc_hash": "3bb673f45bf7635d2590cbc0eec63fa2af265f599b55ca47214c65586bdb5e6b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_accumulate_test_accumulate.None_5": {"doc_hash": "8d65b2bc4176301529803248ccdcefb128840719c9984922402ef0f0be6aab0c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_groupby_tasks_test_groupby_tasks.None_2.for_b_in_partitions_.if_a_is_not_b_.assert_not_set_pluck_0_a": {"doc_hash": "67a261846175607aa2474a0628a6e69a3ceecefdcae55b42b24cd41e801835d4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_groupby_tasks_names_test_groupby_tasks_names.None_2": {"doc_hash": "a6d852a21ed896e8e6534553cdfd9da4f5e4dcb19ed1d65be6e2418756d6cee7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_groupby_tasks_2_test_groupby_tasks_2.assert_dict_result_gr": {"doc_hash": "7d1e285261e2ebf872ad67c10f9b5273b2ac6a895007635ecda1ba9b93c7aa76"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_groupby_tasks_3_test_reduction_empty.None_1": {"doc_hash": "3e6bb4d578d6cb9e3800e9decf665c90d721559121e008a32695bc81b19486f6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_reduction_empty_aggregate_test_reduction_empty_aggregate.with_pytest_raises_ValueE.b_filter_None_min_split_": {"doc_hash": "4b22aa80341cad3cb3b2e6774da9df632f620a9b57e9603b912cb27199d8fd2b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_StrictReal_test_bag_with_single_callable.assert_eq_b_f_": {"doc_hash": "56c85f284080151bdf82eca53e2aedf8f2a040ea86ea6c15a3498a0560226032"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_optimize_fuse_keys_test_optimize_fuse_keys.assert_all_k_in_dsk_for_k": {"doc_hash": "924eb58ae4ecef11f8951e67dbdf1255dcf6a6c1cf3455936c4b0bad23952f61"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_reductions_are_lazy_test_repeated_groupby.assert_valmap_len_dict_c": {"doc_hash": "7866f56f92fed79c717ff552b822f54ce51ae073608794977e8625de728db305"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_temporary_directory_test_temporary_directory.with_pool_.with_dask_config_set_temp.assert_any_fn_endswith_": {"doc_hash": "35777bbbc72ce40a5ee6ec7a419269fa06c32d85c949a1120a8f46d9dafd8466"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_empty_bag_test_map_keynames.assert_set_b_map_inc___d": {"doc_hash": "fdb2a3e3df65f1e473f0c24d406b470a023efd2c129fcb082d94fea4b522731c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_map_releases_element_references_as_soon_as_possible_": {"doc_hash": "7a567f37d62d284c7a97a27b84da2036b96eaa5233a92d7dad84d4a96e3b90d6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_random.py_pytest_test_sample_size_k_bigger_than_smallest_partition_size.assert_len_set_li_le": {"doc_hash": "33db67a3b38da1ad04fd187b6cbbdb7acd974cb27c2372214ee8c3b3d72424fc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_random.py_test_sample_k_equal_bag_size_with_unbalanced_partitions_": {"doc_hash": "a2d2d6f935905e9bdd8a42ec8624df80bcef0dbdcdb36309c1ab4e4e79bd7447"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_text.py_pytest_fmt_bs_enc_path._": {"doc_hash": "22d418ca6503773c2068e1fa3ef76b91b3d19bb02a1e14e3ede606a80b73c54d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_text.py_test_read_text_test_read_text.with_filetexts_files2_mo.assert_join_line_for_b": {"doc_hash": "a97c8886560d1a68acf402dc4f8a912febfab2d53b5ad3c93cdad85dc48f65b3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_text.py_test_files_per_partition_": {"doc_hash": "1c3fec87e70a7e631a02eac88a72184c18a57bd181d3896db24b7d195dca2435"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/text.py_io_read_text.if_isinstance_blocksize_.blocksize.parse_bytes_blocksize_": {"doc_hash": "ace6a532a1da707ec7534e5e11c8165ea8234321c39741cac76c9e8b6bb8907d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/text.py_read_text.files_": {"doc_hash": "cc3e6393c3655a57140b54c548ef4b29177af7d872eac0b8f1a256ede3e0230b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/utils.py__": {"doc_hash": "930740fc7c6b0f995a0262dca4a34b61e680f6b43c13f5109ae3e852962ae71e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_from_collections_import_O_is_dask_collection.try_.except_AttributeError_T.return.False": {"doc_hash": "9c0be2122ea03d7ab77f0fb285fb63ce88cae2d63b926d955fc779d094470a26"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_DaskMethodsMixin_DaskMethodsMixin.visualize.return.visualize_": {"doc_hash": "e971b50f30fc3a92161b79a3bd9c4111124c5a5032789f163be4c119d3a33fc6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_DaskMethodsMixin.persist_DaskMethodsMixin.persist.return.result": {"doc_hash": "042dc788ab7ad0e3caa728fd4611acbe94f36d1e989ffca9bc6c41e7ba471d96"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_DaskMethodsMixin.compute_DaskMethodsMixin.__await__.return.f___await___": {"doc_hash": "4ec54d31a2c746655be2fe641e3876689b95bf2829eb1f0dd7e771d97d72a698"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_compute_as_if_collection_optimization_function.return.getattr_x___dask_optimi": {"doc_hash": "65d60c1789fa336fa0b7ca06305177caccb680c590e64bfaf1f19af9f5194bfa"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_collections_to_dsk_collections_to_dsk.return.dsk": {"doc_hash": "b801a8adc556516b953f3bb2d4056aedb284929f18207513af33479b3ea2ea0d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py__extract_graph_and_keys__extract_graph_and_keys.return.graph_keys": {"doc_hash": "9a82860491682ba5dca0c587113de6a0dc73321f4c975cd43d6abb66390b6cd6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_unpack_collections_unpack_collections.collections_token.uuid_uuid4_hex": {"doc_hash": "72f5ce6819186cdeba0dbca676984ee3c0d7ff98af1760eb4d970fd06efde617"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_unpack_collections._unpack_unpack_collections.return.collections_repack": {"doc_hash": "29545c5030df477a1ed65f79d56e651de8960301425600d53cd2283d84d22968"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_optimize_optimize.return.repack_postpersists_": {"doc_hash": "0c7a422e92cfea7b0d1b54e92286436dd2296ae2932fd5672852334896f5c8ee"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_compute_compute.return.repack_f_r_a_for_r_": {"doc_hash": "33306585c3800f67be707e7b47b9a8529868b911db957b72f47828204cbf0a31"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_visualize_visualize.color.kwargs_get_color_": {"doc_hash": "99df00b3a8297b809a0080943afabb3bc5e0f15d3eb950fb80f3680e66f3f75c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_visualize.if_color_order__visualize.return.dot_graph_dsk_filename_f": {"doc_hash": "584b0a3147e1939bb75b6b5cac68dd5138868935ad66691b288d756d36872886"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_persist_persist.if_inspect_ismethod_sched.try_.else_.try_.else_.if_client_get_schedule.return.repack_results_": {"doc_hash": "806fafdfdb009141f827f773ad1e075cf36e8b05955a7d009c21582a0826b3cb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_persist.dsk_persist.return.repack_results2_": {"doc_hash": "d66b1209a6b5535fc60e57704c6af074cbd6edb3bc53db079d9136afed06a616"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py___normalize_function.try_.except_TypeError_not_.return._normalize_function_func_": {"doc_hash": "a8b18b88580437879e5ce6b01e2d9466700e6ddb7ba77ef19988a1c73bb8dd30"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py__normalize_function__normalize_function.if_isinstance_func_Compo.else_.None_1.except_Exception_.return.str_func_": {"doc_hash": "a8344cca74ea9aa7c5c312d832030f2d1c7b32e25d49cde9ddc4a0fae54e6a9f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_register_pandas_register_pandas.normalize_period_dtype.return.normalize_token_dtype_nam": {"doc_hash": "4cf4212b91c68e018e9111bed622bba7499cd7a7582b82c74ddc2bb4144acf40"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_register_numpy_register_numpy.normalize_ufunc.try_.except_AttributeError_.return.normalize_function_x_": {"doc_hash": "ad610fd504ec4f9bf7dc027ecb21fa4d5656b1696973ba4d8a776b8d65fecedb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_register_scipy_register_scipy.normalize_dok_matrix.return.type_x___name___normali": {"doc_hash": "52b08192f31836dde165fd98af5d4c70e14b1ea5372a728f18c4f2d680d368a5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py__colorize__colorize.return._h": {"doc_hash": "b06bc20dd6b213e2c8e49ceb441122120e54dbc4cca91b301287997e410d82f7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_named_schedulers_get_err_msg._": {"doc_hash": "27e4bafb0a578bf484162c0a419e478232aad71eb71bc05a585dd12511dcb54a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_get_scheduler_": {"doc_hash": "fd5ec4ff80ba37e1db5bcd5c32844e5fbdb6b84c6c86e82717fae5b2cfffdc0d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_blockwise_blockwise.return.subgraph": {"doc_hash": "3b81ba9044cb35fb28ced1d09531bb3feccd6d5d5559fda07df462649d6c81aa"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_Blockwise.__getitem___make_blockwise_graph": {"doc_hash": "b86a287150f3cb8a9ea5b39165f0cc2457e5982b894d8f6359693bcdedd10f09"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_make_blockwise_graph._Tensor_operation_make_blockwise_graph._Tensor_operation": {"doc_hash": "1a01785f9c47df415e8184110716e7d01b0e097aa5ad95337406898cf430a9ee"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_make_blockwise_graph.numblocks_make_blockwise_graph._Create_argument_lists": {"doc_hash": "7a12db90505c68fcdc2ab29c5158a448b3e6157d5f65ddd0648c836923fcae7c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_make_blockwise_graph.for_out_coords_in_itertoo_make_blockwise_graph.return.dsk": {"doc_hash": "04ed463ce38548cfb2cc3b14b2f7481f8a6656afa6ee0e40815d9788c5c05270"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_lol_product_lol_product.if_not_values_.else_.return.lol_product_head_value": {"doc_hash": "c4b4a6c18ba16e099370e8999ecba925d18347aadb888ab11a83e013b511d0a4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_lol_tuples_lol_tuples.if_ind_0_not_in_dummies_.else_.return._": {"doc_hash": "0d4d8c59257339b315885a6c0e145b585373804d05f5d58aea933642ad461c55"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_optimize_blockwise_optimize_blockwise.return.out": {"doc_hash": "928fcf963fb1e414b02b32180914f0d2d77d718c6144439935710819b8ea3925"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py__optimize_blockwise__optimize_blockwise.return.HighLevelGraph_out_depen": {"doc_hash": "53139ea974471b8bcb26a21719672e25c586d79cd1ee5ca0f7083c27cb94276e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_rewrite_blockwise_rewrite_blockwise.changed.True": {"doc_hash": "deec0ae5ea077c4fde5712a277232eb3b26ba6ef9d68fcb110d1365eb1af2a62"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_rewrite_blockwise.while_changed__rewrite_blockwise.return.out": {"doc_hash": "1064d0288dac6e410608f49cbe0a1df92c81313257409336d4f62df3a5341e0f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_zero_broadcast_dimensions_zero_broadcast_dimensions.return.homogeneous_deepmap_f_lo": {"doc_hash": "d9fa4855fc586670b7bd6b6d4ff15231e8c191e19315b3c4c52d08469370d5fa"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_broadcast_dimensions_broadcast_dimensions.return.toolz_valmap_toolz_first_": {"doc_hash": "609f4c2668123c0cb252783dfe20a45045ee9fd12bdc12b60ef98756903c17cc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_fuse_roots_": {"doc_hash": "07ccf83b42c2cddd9550d2de60c07acd06dcdfbe42ad7769a0a609a3ece73372"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/__init__.py_from_distutils_version_im_": {"doc_hash": "fda4a4846f28c94345582ab004b4c097c93b6943f2636b6183445ba79b338381"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/_compatibility.py__": {"doc_hash": "7565fd766589d9633f96ca9b3093bede4ad683f3c872c894b6033e37c601f366"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/core.py_os_is_integer": {"doc_hash": "91d18f6559605b755365cab79ee431280b52f159687147f07cc48af3d4ae23cd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/core.py_read_bytes_read_bytes.if_blocksize_is_not_None_.blocksize.int_blocksize_": {"doc_hash": "c6a77827e975438a976527d50812e88479c3ed55ff991a98c1fd90813aee1261"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/core.py_read_bytes.if_blocksize_is_None__": {"doc_hash": "955c7924242676f490e756df8e3bb427a8b24c5050f2fbd30602135d40b3fc3e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_bytes_utils.py_io_test_read_block.for_ols_in_0_3_3_.assert_b_join_filter_No": {"doc_hash": "6217888e97c8160e9be1fc7667d2b28f0af474b26f00bf8545dc5aaecca222c4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_bytes_utils.py_test_seek_delimiter_endline_test_seek_delimiter_endline.assert_f_tell_7": {"doc_hash": "1111e20246acb881045d62d1551a6c6e1d2f78a870f8d7f26e3a843917f38380"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_bytes_utils.py_test_infer_storage_options_test_infer_storage_options.None_2.infer_storage_options_hd": {"doc_hash": "38f6beea2666d33e75700e455cec00c953f9b89d014f76c9ab060a1c92dd88db"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_bytes_utils.py_test_infer_storage_options_c_test_infer_storage_options_c.assert_so_path_expe": {"doc_hash": "d7e45876f46fe81d11a2e1fbc896da4af7a97d0c1e7ec70cbd50bcd170e215f6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_bytes_utils.py_test_stringify_path_": {"doc_hash": "59915d54c3a6b46a109d903c6ac0f38f4a0feb3aa5bf9f57f6b13e7723ea49d0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_compression.py_from_io_import_BytesIO_": {"doc_hash": "f0055e8f0e2dcc4935c42b9325d5c6d6123cc6c9c3ba57efd125d49ed7a72597"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_os_require_pyarrow.pytest_mark_skipif_not_py": {"doc_hash": "3960023ba1f5e1955b49329cc6bc588bebda26fc1655bf7ed099df15379124f8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_read_bytes_test_read_bytes.assert_b_join_r_for_r": {"doc_hash": "17e9104a98916329a29e9911c181816795af594a8e21fbc0e7c9e8935861a9ff"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_read_bytes_URL_test_read_bytes_URL.assert_b_join_r_for_r": {"doc_hash": "6d290f359c2ce73e13d84c41f71d33f4aa505a5e296ce64df51cd612ff194884"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_read_bytes_big_file_test_read_bytes_big_file.for_r_in_results_.assert_set_r_decode_utf_": {"doc_hash": "e2aa272048157180819c62b75269064948e2c9cab78c8dfe41d921484e2ec7f6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_deterministic_key_names_test_deterministic_key_names.None_4": {"doc_hash": "c894a21720db49796c06a983ec53fea11ee11669ca20295a73dc66f1c72885a1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_open_files_write_test_open_files_write.assert_data_results": {"doc_hash": "d7703a848c3942b443cad6b1e28da8456073af09cfdccd449625531de91933a5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_read_csv_test_read_csv.assert_df_id_sum_comput": {"doc_hash": "252bbf49442f98304052d10aa56278b165d8e1d7f5de13c1a74d6a85c6b44ee5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_read_text_test_read_text.with_pool_.assert_result_a_b": {"doc_hash": "dbaa4ed9e6248e25cc032b003599a90954c96ed69c2549a771ce87c46c32a5e1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_read_text_unicode_test_read_text_unicode.assert_len_result_0_stri": {"doc_hash": "6be00033335cf817561472617f4cdf56dcc955f4a839a6c3595f25285b0930c3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_parquet_pyarrow_test_parquet_pyarrow._smoke_test_on_read": {"doc_hash": "04528981a2315c0c22ec4879fc9c83fbabadcce0e134b58bf511c5b49e5a63c2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_glob_test_glob.None_10": {"doc_hash": "a546086891053da7ab792a2f562d3783432026f87496972ba9d4bccdcd1c8bbe"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_distributed_": {"doc_hash": "a06b4a0544b06d12fc9981ac8c9ce741e04a9f6871cf99bbdd729b23a6b3276d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_http.py_os_if_LooseVersion_fsspec___.errs.errs_aiohttp_client_ex": {"doc_hash": "af643b29e5c738b2e34212ff2f7625a26f8c05a4100be123df245303fe3c9670"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_http.py_dir_server_dir_server.with_tmpdir_as_d_.p_terminate_": {"doc_hash": "543868ad175113bd28ac22e0c51a595efd40858fc3d5fce3a5d4dfd3a4d53fcd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_http.py_test_simple_test_loc.with_f_as_f_.assert_f_loc_4": {"doc_hash": "b06471d6b35e78ced511b773aeb4f5f46d431ee32dc2ed0f729595d0262ecb1b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_http.py_test_fetch_range_with_headers_test_fetch_range_with_headers.assert_data_open_os_pa": {"doc_hash": "288072be6d336bf5508bf0921dc48252d9438ad0b625a8d389f768de33941466"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_http.py_test_ops_test_ops.with_f_as_f_.assert_f_read_data_": {"doc_hash": "285cbc1f2f212f06c606b1e71a4ac66d2af2e3d1b95bbaab55c38ce70add934f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_http.py_test_ops_blocksize_test_ops_blocksize.None_1.with_pytest_raises_ValueE.assert_f_read_10_data": {"doc_hash": "d719ed2b97fc5762437e421d505eac6b159d3f689aa8974b5bb8fa63786e3e6c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_http.py_test_errors_test_errors.with_f_as_f_.with_pytest_raises_ValueE.f_seek_1_": {"doc_hash": "bb3693934f8a4c8a12a4a6fd24f83701c75597fd6f41b8b2323234f576adc596"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_http.py_test_files_test_open_glob.assert_fs_1_path_htt": {"doc_hash": "9f61a5b6b74da2df91cf76b793bc13c8ece9a291fb19b348c280ec6ff18bc3b8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_http.py_test_parquet_test_parquet.assert_df_columns_tolist_": {"doc_hash": "60322eb456d80f9976158da6b01776d2942ce01e4866e119076a6bb17af146f0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_http.py_test_bag_test_bag.b_compute_": {"doc_hash": "b36d1753c0146b333e1f20bd7711bef7614d8694a5cdea8addef36c975e7d316"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_http.py_test_read_csv_": {"doc_hash": "48601d75535070ed8097f30899d0591c07734d7815c9962acf3e845413f5fae0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_gzip_test_unordered_urlpath_errors.with_pytest_raises_TypeEr.read_bytes_": {"doc_hash": "6d12655eb0a7453ce8202ef10fcb2f77f3e94e778c32bfeea0d5e022eb07364f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_read_bytes_test_read_bytes.with_filetexts_files_mod.assert_set_results_se": {"doc_hash": "14e9f27c323f969730088b35edcf975f9c88314e25c1651ced4336d1b3f4806d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_read_bytes_sample_delimiter_test_read_bytes_sample_delimiter.with_filetexts_files_mod.None_5": {"doc_hash": "2f1ef6a417886bb9e788047fd3d1e432b066071fee61487ec6d8774fcf61fbd8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_parse_sample_bytes_test_read_bytes_include_path.with_filetexts_files_mod.assert_os_path_split_pat": {"doc_hash": "f4ad7f14d6e26ceeeb3e713929b736047cf0e4dea04aa878e2a6b631cc5d7810"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_with_urls_test_with_urls.with_filetexts_files_mod.assert_sum_map_len_value": {"doc_hash": "378f8c73e8ad20a5b8d7e1abd384822cc1353a55a8ada6eab07d8de711415f5e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_with_paths_test_with_paths.with_pytest_raises_OSErro.read_bytes_url_blocksize": {"doc_hash": "dc67c81e3c35a81d571560d3e73152bad6898ec8a419b7a6f6c3ef245c6013e3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_read_bytes_block_test_read_bytes_block.with_filetexts_files_mod.for_bs_in_5_15_45_150.assert_set_ourlines_s": {"doc_hash": "268be65e857aefd26b81b0c0e7d2b503e9128ad3cfdb0ed3f2109f6e01424bd1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_read_bytes_delimited_test_read_bytes_delimited.with_filetexts_files_mod.for_bs_in_5_15_45_1_.assert_ours_test": {"doc_hash": "c00aa149c3d985184880516480e5485854e82877d60d19aecfebf3decb5323c1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_fmt_bs_test_compression.with_filetexts_files2_mo.assert_b_join_results_": {"doc_hash": "2efd6f79f9dfd10c3c61bc7d9e0cf1ac4a38ee70dba407486a11139f2396eb46"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_open_files_test_open_files_text_mode.with_filetexts_files_mod.assert_list_data_fil": {"doc_hash": "82d5ddfe4bb0ebdf839f914060559387dfdb86c7f5ba792ec2fb83e879255c17"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_open_files_compression_test_open_files_compression.with_filetexts_files2_mo.assert_list_data_sol": {"doc_hash": "12d7ec7511a915f63d817bb85e4cee78aa438899eec0c2cb5a2edff676bfaec4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_bad_compression_test_names.with_filetexts_files_mod.None_4": {"doc_hash": "490a649b957f7724c290edbd51ecdd961638e0429341d496eb5c86ec7de08139"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_open_files_write_test_open_files_write.assert_d_b_000_": {"doc_hash": "04f30aa83320efb1ffc97ad918333864745abafb3c6d1d2c363dd2994c49a42d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_pickability_of_lazy_files_test_py2_local_bytes.with_files_0_as_f_.assert_all_isinstance_lin": {"doc_hash": "e37b7e683c91fec8b488cb4ac3d0fc484b9603c2c6838d9fa9a9d38bf4eb3c59"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_abs_paths_": {"doc_hash": "55a27d4c579dffb8ca53c424899302165616a561836803dd08233efa91a0d359"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_io_endpoint_uri._http_127_0_0_1_5555_": {"doc_hash": "7577c5b9a4308ff25b4d8478530af716bb6eb8cc963f027fecdef1ec69ac2ccf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_s3_base_s3_base.with_ensure_safe_environm.try_.except_subprocess_Timeout.if_sys_platform_win32.subprocess_call_TASKKILL": {"doc_hash": "92688382e8539eca716dddf3e97c303e8524bef79ec08f5b36d421ccff8fc7cf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_s3_s3_context.try_.finally_.fs_rm_bucket_recursive_T": {"doc_hash": "0eda9d444523366de397597e4c3ea428a81c660ac5a36e2db7d1e5a96578c614"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_s3_with_yellow_tripdata_s3_with_yellow_tripdata.data._": {"doc_hash": "24827840c59b34d4891b700f730abf849af4c0706e35f405735911a01f25ac0c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_s3_with_yellow_tripdata.sample_s3_with_yellow_tripdata.yield": {"doc_hash": "3ca90b408789aac0bdab1317668187f4b1a59d433cc28b18cb6975a3d9db0daa"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_get_s3_test_get_s3.None_1.DaskS3FileSystem_secret_": {"doc_hash": "935771fe071c23866b083a16cf4c735c9ed04fd75aebd2ce073794b79c2e3e57"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_open_files_write_test_open_files_write.assert_set_list_files_val": {"doc_hash": "bc103e079d1e56778fe14df0f0bb7dfe56f458b3e6a52a666ea9a2c577b35b68"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_read_bytes_test_read_bytes.assert_set_results_se": {"doc_hash": "b9be4e1b8d51bfa65b29cdf0875f4e5ddd946f226646d70ba3d5c9eae62cc37e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_read_bytes_sample_delimiter_test_read_bytes_sample_delimiter.None_5": {"doc_hash": "9c21a31fac64cb9e3768997d69cf7bf8a018e4592613e6b7c61a1057943cfec2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_read_bytes_non_existing_glob_test_read_bytes_blocksize_on_large_data.assert_len_L_12": {"doc_hash": "a2b8f6a874b433fb1eeb9b6a9011d3e18751a8f028b9b6625cd7f2833cfc780d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_read_bytes_block_test_read_bytes_block.assert_set_ourlines_s": {"doc_hash": "76abbdf08633d44e3cd8b60027675a61d00c86f00ea9ce2c4747a1e45d42effe"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_read_bytes_delimited_test_read_bytes_delimited.assert_ours_test": {"doc_hash": "f83d106178a8e0511df9f4ebe838bca53576b37bd430b516edb349909195a53c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_compression_test_compression.with_s3_context_compress.assert_b_join_results_": {"doc_hash": "38d2de76df00cfdcee0c758dac139f83a2039688aca7169c7bdfc133c5276c5b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_open_files_double.lambda_x_x_2": {"doc_hash": "75da4b341127f02be9e66eb4e3d715aaf4ccc22a04accf79e1e63b7869ceb06a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_modification_time_read_bytes_test_modification_time_read_bytes.assert_aa__key_for_aa_in": {"doc_hash": "cca6a4b8fd46c46b0d1089e38202220a15f28bd8212fae078300c1a5f0c79618"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_parquet_test_parquet.tm_assert_frame_equal_dat": {"doc_hash": "4acbbfa6317539dea832eb5abcc3a65c5349cd4e647bd3ed657218e144ff6b32"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_parquet_wstoragepars_": {"doc_hash": "ad721d9bdd68215ba235de946861b5b011991acb4fd2b89706c3edcbe2e8cda3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/utils.py_io_": {"doc_hash": "0e8b3be2b21d7f619ca3b5844227d205f8bd5406b4fcec35c9e791b4a2674307"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/cache.py_Callback_Cache._pretask.self_starttimes_key_de": {"doc_hash": "ae8d0a66e0f02e6129e0bed76b50d21d42fc6060397d87fd5265bb017d033539"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/cache.py_Cache._posttask_": {"doc_hash": "72bad6928b0ce007ae8aef0d98ebb09b8e98684907c760163938532403125338"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/callbacks.py_from_contextlib_import_co_Callback.unregister.Callback_active_remove_se": {"doc_hash": "b50a9163e05881aab4137d8aa26f0e5e7673e9b258be7e751ca14a34277ac5a1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/callbacks.py_unpack_callbacks_normalize_callback.if_isinstance_cb_Callbac.else_.raise_TypeError_Callback": {"doc_hash": "f0f867f25560dfe15e74bd9cd365b546414b7e97dac7948e6647796c8f06b1b6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/callbacks.py_add_callbacks_": {"doc_hash": "2cb23fe5e8b952befc17cfbf62ff73948bb7219ef72e7bd5c658b569dbc1460b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/compatibility.py_sys_": {"doc_hash": "8b99453eef26e10d9600102cd3b233cc8d2414de014c9225133850f3c6fef666"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_ast_defaults._": {"doc_hash": "f9d8cc40d148ae499d75e621444b87dafd1fb682fdae8b2a87d0fb71ec45d80c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_canonical_name_canonical_name.return.k": {"doc_hash": "24b4eede0f411e9b88e48ce2800256b3def3b738acbd5facb059f63def9edccb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_update_update.return.old": {"doc_hash": "6bfdec22738e1cb99ba9e8ea1123590665b44915cd12470ff84da5d2cb0c0979"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_merge_merge.return.result": {"doc_hash": "4f023c9b50cf9f122f99233b73ba2b13d4cb0e227f111a5b17d3c9ebd507055d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_collect_yaml_collect_yaml.return.configs": {"doc_hash": "4c36eab724020a01665b14f5103ab39076b3d1c49cdb49e9c71db0d9dbe0b28f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_collect_env_collect_env.return.result": {"doc_hash": "bb6f532f74d5d0e871ebece2b0f2f7937d317714fa661a23e84276448735e637"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_ensure_file_ensure_file.try_.except_IOError_OSError_.pass": {"doc_hash": "c68a42cf4d9a2a4924dda93c6c7d36a5cd92a02a7c9bdc2da06979033509dad2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_set_set.__exit__.for_op_path_value_in_re.if_op_replace_.else_insert.for_key_in_path_1_.else_.d_pop_path_1_None_": {"doc_hash": "c3f0a85881f254547b3ecb5f475fe35f82412f0b4622e984495c3e8ef336b5ca"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_set._assign_set._assign.if_len_keys_1_.else_.self__assign_keys_1_va": {"doc_hash": "965e26cd5a7bf34f157119742b04ec598480008db9c9990aa30fe844f028de95"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_collect_collect.return.merge_configs_": {"doc_hash": "b07075bfa8db8a8f8d065179eedd013bba311f4b2e3a625e3074e84d5e14fe77"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_refresh_refresh.update_config_collect_": {"doc_hash": "7d6527189d04e6140fb5ed3a99ef10d82994a0abcd0564b9d588af6688df7c16"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_get_get.return.result": {"doc_hash": "26880a082b9a07922c05818f44cd92663e279f54e3e6c99edd8255e6e25e28d4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_rename_update_defaults.update_config_new_prior": {"doc_hash": "e98f4128efada59460cd1fed26e7013b4ad2ac42bba909201c989bf10e450c57"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_expand_environment_variables_deprecations._": {"doc_hash": "459b82751bce3c0cc0d12ed4cb8f00f798e7a31d4388682449f49fa2806c0850"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_check_deprecations_": {"doc_hash": "f4154cd89356c1bcdc306bad3fc103733893bf931b377b5574858a03928d631a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/context.py___": {"doc_hash": "475721192de14f078c7cccbe73f45249b3ad187618bf18df5bddc06973ebfafc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_from_collections_import_d_istask.return.type_x_is_tuple_and_x_an": {"doc_hash": "31a4ef00d2766bd28778e69d72b6a45aa58fa961f56c610dd9afcd071cfb855d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_has_tasks_lists_to_tuples.return.res": {"doc_hash": "71fc5cad846cfcee6e40c39aad569df3c988e01222cc75580c5d7892f3b5c5f5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py__execute_task__execute_task.if_isinstance_arg_list_.else_.return.arg": {"doc_hash": "6a15c3edf3515a7e0129829081f1f1a8fd758f503f9cef27de8f1de5275f331e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_get_get.return.result": {"doc_hash": "4bedb44b1d4dd98d92041367797addec36ccc412069899b02ca92093a5693afd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_keys_in_tasks_keys_in_tasks.return.ret_if_as_list_else_set_r": {"doc_hash": "5b9d5b6397ace8dee5f0a8b626c495381f1e76745a2d3335fddffdd867921532"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_get_dependencies_get_dependencies.return.keys_in_tasks_dsk_arg_": {"doc_hash": "e82606560c47aa77e9e170d6cace852f0fb977d1d4b1eedfbe4d56d0e69de537"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_get_deps_get_deps.return.dependencies_dependents": {"doc_hash": "5f40eb7e9d1d79c241cab8baeb72ca8111a242769952bfc7b5e0d96e9feca485"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_flatten_flatten.if_isinstance_seq_str_.else_.for_item_in_seq_.if_isinstance_item_conta.else_.yield_item": {"doc_hash": "246f78dbcbca5966916743d8064a689cea43a380410ac7cac2c9937de2358c6c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_reverse_dict_reverse_dict.return.result": {"doc_hash": "61f986782a27d6c5e414f730911857a427d8495e832d37f2e8d4c75fa83fd450"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_subs_subs.return.task_1_tuple_newargs_": {"doc_hash": "bc38bb6d15a6cb773f3145685866ee99074a64a64020939721feae67496ca44f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py__toposort__toposort.return.ordered": {"doc_hash": "8e24723829374d780bd3e3241dd14f88ff1bb792b95b1b09412da1f971395989"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_toposort_getcycle.return._toposort_d_keys_keys_r": {"doc_hash": "be82e487f0aff3c7fea0a460728bf14e83e073dd76ceefc1a4d5356cba1e0ab7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_isdag_isdag.return.not_getcycle_d_keys_": {"doc_hash": "91fe022049e156aa453bb6be6028c4af997d7dee832020aaeb2918ca8516c691"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_literal_": {"doc_hash": "023cb3aaf8a9a03de489cfce9aa1a1dfd586c4653a28996b1f3d0ef151565801"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/__init__.py_try__": {"doc_hash": "521683552fdc020a63ee6fce96d6e5866223661ffa07497b2075276b9aeb6b1e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/_accessor.py_warnings_CachedAccessor.__init__.self._accessor.accessor": {"doc_hash": "765c87191d6c53c297521b76a49363407a7f3d78e2cfa1fa1a35b38059d30c15"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/_accessor.py_CachedAccessor.__get___CachedAccessor.__get__.return.accessor_obj": {"doc_hash": "b22c1c700f0fbdbfea12bfc1c112c361457c5bab0a7e41f826828f824e511543"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/_accessor.py__register_accessor__register_accessor.return.decorator": {"doc_hash": "c58c27689816dc68ab3412f073e24c91c9ba1a2ed864a286f05c58c1d8593321"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/_accessor.py_register_dataframe_accessor_": {"doc_hash": "3af87056283baa53a4880f4aff0e7a9fbaf40501123732a739a92294711539d3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/_compat.py_string_": {"doc_hash": "2624a18ee310681a9b42eacd076e50968c3fe1bbaa427b76276931fdde6b174c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/_dtypes.py_pd_": {"doc_hash": "93a2b592f7ada1fa18434dc9fd23a7bb4ac19dba7c87a49ecd70426f87123ab7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/accessor.py_np_Accessor._property_map.return.self__series_map_partitio": {"doc_hash": "ee118e4248a46d40dc7d6a677e19e06a2cc4cba699ae93ed4c7ea178feb8b428"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/accessor.py_Accessor._function_map_Accessor._function_map.return.self__series_map_partitio": {"doc_hash": "9e23b352026474fb8e053401faefd48a95047bf8b75903ac52ec5fc5234732e0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/accessor.py_Accessor._delegates_DatetimeAccessor._accessor_name._dt_": {"doc_hash": "ae90699491415b86db5bb5fec1acbdefecb3031f0f5b4e539461fc0db529ba69"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/accessor.py_StringAccessor_StringAccessor.split.return.self__function_map_split": {"doc_hash": "09fd9acc2fe72de9a595c9391ff4366c686683de5e9ecc38e1b5539b6721bcd5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/accessor.py_StringAccessor.cat_StringAccessor.cat.return.self__series_map_partitio": {"doc_hash": "f2ce97ec894ae71fb99c87c91e01fb6f484de6260c5818f14e0db39e5638d385"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/accessor.py_StringAccessor.extractall_": {"doc_hash": "5fb47a4bc9c2d58003713edb16b0795ebd7b9ae1309f5e04c4141d83fc5bdce7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/backends.py_concat_dispatch_": {"doc_hash": "377267e8d1bb107de7e4a3bd4a4d9d3909c91fa4ffc76cd1b57e4539868183c0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/categorical.py_from_collections_import_d__categorize_block.return.df": {"doc_hash": "53919e77e4260222c259dadb14cb191b1f1fb4c628242c2d1386f4a07be6e53f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/categorical.py__get_categories__get_categories_agg.return.res_res_ind_0_append_re": {"doc_hash": "13b6dd5ca1507a61853bcc1d5f040d03195b3fda13da4600dbd8da31e35f1e6a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/categorical.py_categorize_categorize.return.df_map_partitions__catego": {"doc_hash": "50a3cfa2c43e87965a0a096e48521a5220b959d15dc18b644c065e1ef3355627"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/categorical.py_CategoricalAccessor_CategoricalAccessor.known.return.has_known_categories_self": {"doc_hash": "3fe5d4b5d07c19e830dc4de50278e7c496e59fe58593e189ebac366e2463bad7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/categorical.py_CategoricalAccessor.as_known_CategoricalAccessor.as_known.return.self_set_categories_categ": {"doc_hash": "4429e7aa6782760148b07fb0ceae4f6328a430fdd814de528965c5470eb30a91"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/categorical.py_CategoricalAccessor.as_unknown_CategoricalAccessor.codes.return.self__property_map_codes": {"doc_hash": "8661377c723524e9091d63c608ad7d644596bf781c37da4992f7ea7027edca11"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/categorical.py_CategoricalAccessor.remove_unused_categories_": {"doc_hash": "5e1bbcccccb6cec1d5f448aa41c9bbf4b4c7296f6325aefd4a17bd3f11ee2656"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_operator_pd_set_option_compute_us": {"doc_hash": "f799b7a3010fa57c002a663578f7da130226fa817a2f7edf0113cbb4bf0d3eba"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__concat_finalize.return._concat_results_": {"doc_hash": "b84dcc351ff13456bfe236a0ea9fedd5aead305d56571756b4ccd4501b3ea9e4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Scalar_Scalar._get_unary_operator.return.f": {"doc_hash": "1b29d9fdcd3c6c48649cab318783ef4628a8c44769278786a31130ff2976c84a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Scalar._get_binary_operator_Scalar.to_delayed.return.Delayed_self_key_dsk_": {"doc_hash": "d9a0697c564d5fd4139bc9a2163437656b06807f3d6ee074ca341bc5d8072590"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__scalar_binary__scalar_binary.if_return_type_is_not_Sca.else_.return.Scalar_graph_name_meta_": {"doc_hash": "1b9df0138cf8f97caeffcee0ce14faf199cb2aecfc7b648f2100f93e0042d3e8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame__Frame.__array_wrap__.raise_NotImplementedError": {"doc_hash": "9d37ccba566ce35b7e16e4fd8e510452d2a201b5113c8a984c09c440800205f1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.__array_ufunc____Frame.__array_ufunc__.if_method___call___.else_.return.NotImplemented": {"doc_hash": "3bf199d1c4837a1759294d07bfbaf0829cca7f967aa73dcd5037adb39c5cbf15"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._elemwise__Frame.__repr__.return._str_fmt_format_": {"doc_hash": "fff89701229f9c59525d6d0e92905d758f760316b3549b76b3ffd15a95d1b3b0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.reset_index__Frame.reset_index.return.self_map_partitions_": {"doc_hash": "1c8f82b436848871be21dc1995a9005be898f5234f90910c29d4e339ceca1580"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.known_divisions__Frame.get_partition.if_0_n_self_npartiti.else_.raise_ValueError_msg_": {"doc_hash": "685c97b5d58309a3885638a2bca99bde942365feef7cfcf0e091ab7e6628f388"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.drop_duplicates__Frame.drop_duplicates.return.aca_": {"doc_hash": "7699ed750ce017b74c2dd525be1cf4f89bd3e02f425f0f8173ed996bf254970c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.__len____Frame.__complex__.return.self__scalarfunc_complex_": {"doc_hash": "39975b3dd1cbf8539912433f717dd9f3921a14c57180dd4668c4e0d192d0cf53"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.map_partitions__Frame.map_partitions.return.map_partitions_func_self": {"doc_hash": "3a0e7e55d49315d23c3eee4f19706a4433e3506a451cb63621b7ad969e70cdc7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.map_overlap__Frame.map_overlap.return.map_overlap_func_self_b": {"doc_hash": "9530658ccda9f39c0790b144660a4712a0f485750d23c01c3b4ddf7d0ede94da"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.memory_usage_per_partition__Frame.memory_usage_per_partition.return.self_map_partitions_": {"doc_hash": "566456afc062eee7cf0cc77adbcf44189dff337241c35c3fd878b47aa1b36484"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.reduction__Frame.reduction._Generic_row_wise_reduc": {"doc_hash": "ae5f9449a7dc0d4ddafba71dcc89d974112bd3f37688d214d19b8d2ef692de16"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.reduction.if_aggregate_is_None___Frame.reduction.return.aca_": {"doc_hash": "ff4fcec0d10b35a4d5f34fb6b965b9458aad25c7a2638e2bdcd9d082dfff844f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.pipe__Frame.pipe.if_isinstance_func_tuple.else_.return.func_self_args_kwarg": {"doc_hash": "9ed7dd07abe16eb032828dbe16e31ed334762fedefea4a1b8b5fc188a8bc414a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.random_split__Frame.random_split.return.out": {"doc_hash": "45644ce7bc4b37088abc2cf95e5c08f59079189b39b51b5262b652eba6708a2d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.head__Frame.head.return.self__head_n_n_npartitio": {"doc_hash": "8287d0bb05af37192897fa1fb90671123417dfe77a5e588de6f3cbe2f3953398"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._head__Frame._head.return.result": {"doc_hash": "bca8e1671df2f362650b0c61a7a4cd4cf991214d1fc4c7198f16ede965366af5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.tail__Frame.loc.return._LocIndexer_self_": {"doc_hash": "c2771e74c29a85be5caf1897fcf9bf2074847febcedd34c8e493a9e740dc0427"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._partitions__Frame._partitions.return.new_dd_object_graph_name": {"doc_hash": "064d36739508c8a7e51aad2b9f46e4d70ab1f257d62c342760313b6f2ee8a2f7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.partitions__Frame._Note_iloc_is_implement": {"doc_hash": "72ef4e3b8aaab183dd8b016670907d736b6f0858624794a9af9128bb3e04023e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.repartition__Frame.repartition.if_partition_size_is_not_.elif_freq_is_not_None_.return.repartition_freq_self_fr": {"doc_hash": "2ff5142f9557d077d6df713a8a3f001b002430495820c2c0c3d8bfffb7733c4f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.shuffle__Frame.shuffle.return.dd_shuffle_": {"doc_hash": "ef47e44ee1f06ef4d41778431def17c806e29ec7d633c916182ea8c1712ff288"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.fillna__Frame.fillna.return.parts_map_overlap_": {"doc_hash": "41acfc559a9c2c8a81e4a07b8ed048a3571b46a326600aa0e5a50f90697c391e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.ffill__Frame.sample.return.new_dd_object_graph_name": {"doc_hash": "48fb49bfca56caad92a15e1637c5ab43f643ce875d3133d9ae22cf92eebfcd56"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.replace__Frame.to_dask_array.return.arr": {"doc_hash": "44716012a1a8745f682a7b90874959cc34a18bbb24321528fe0bc7e8771d9838"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.to_hdf__Frame.to_sql.return.to_sql_": {"doc_hash": "1cb15928e1cdbf0a76376a891d9bfeabf3ea67d3cef125e70fa8bd4706b07d70"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.to_json__Frame._get_binary_operator.if_inv_.else_.return.lambda_self_other_elemw": {"doc_hash": "d1f0c5ae418bd027ab4ec59adeed8e4c528c4bd3d904d2b831afaf15a38fcd43"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.rolling__Frame.rolling.return.Rolling_": {"doc_hash": "2abe969aa449b8ad70737da837dda34760c4ebd3f0388ae67543114b4eda6639"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.diff__Frame.diff.return.self_map_overlap_M_diff_": {"doc_hash": "4fb6f2bed1fff80b61e6136ae04903e2a068c9042820c51ba2df8c6fec701d95"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.shift__Frame.shift.return.maybe_shift_divisions_out": {"doc_hash": "7ff6f98b12af221c1b7060759422f6cc5392826fe8a62eb94418fb700e73463c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._reduction_agg__Frame._reduction_agg.if_axis_1_.else_.return.handle_out_out_result_": {"doc_hash": "93a24f926b775aa614315d65346b94cda7b2b360fc3494fbbcfd2fcc3f467a7c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.abs__Frame.any.return.self__reduction_agg_": {"doc_hash": "8b27d84e79032af37719d3a92d2ae62e5ab2c6fb685fac20add5836f4e930708"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.sum__Frame.sum.if_min_count_.else_.return.result": {"doc_hash": "691c1e57d369b4ebd5c68f5cfdd5b38ecfc82e1402cd735811cd9b00abc7b149"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.prod__Frame.prod.if_min_count_.else_.return.result": {"doc_hash": "af9a9edb100874c09f32d01e333743804c62f4b174ff668168bf312eea70c544"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.max__Frame.min.return.self__reduction_agg_": {"doc_hash": "5a41683c856e500fdf2ac99cca7d574083950999355fa6a191ea471713303d12"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.idxmax__Frame.idxmax.if_axis_1_.else_.return.result": {"doc_hash": "f7c27d7bf5a0c74cadf9b7ec6d79728efbf4ae170d46da473fc0ace730bd6957"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.idxmin__Frame.idxmin.if_axis_1_.else_.return.result": {"doc_hash": "6a61682e16b95f5d60869ac551c85bde75b5d118cfd54e7333eb2d3067d060e0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.count__Frame.mode.return.mode_series": {"doc_hash": "27e3ae66a6a79012081a8103d47c747d1ff1e6401cacd3434c7fe7321403b43b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.mean__Frame.mean.if_axis_1_.else_.return.handle_out_out_result_": {"doc_hash": "4df97813876e5885f1855a1eb32815c8cae2b5427042684bb2eebf15a8fd0f06"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.var__Frame.var.if_axis_1_.else_.return.handle_out_out_result_": {"doc_hash": "6a1243de6056594fdc3f4fc4a55866ee97586c92ed597df311b16d1a94183772"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._var_numeric__Frame._var_numeric.return.new_dd_object_": {"doc_hash": "b7fa964f6efbf1f94fb4a9a845c074ac792323355796adfe85000e4d3cd5099b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._var_timedeltas__Frame._var_timedeltas.return.new_dd_object_": {"doc_hash": "a7ec4dccf1f23b4e2d44db5957890a413f0d1d710fec54e395f8a7ef77befa2c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._var_mixed__Frame._var_mixed.return.new_dd_object_": {"doc_hash": "86cb136dfe73aa3bf5699137681cdf4d7c398e44ebeffb4178af96be340710e9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._var_1d__Frame._var_1d.return.new_dd_object_": {"doc_hash": "db060ec45f72cc56f63fd415cf0ffa4ea99df150eff7c3aa69a33f5ec04094db"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.std__Frame.std.if_axis_1_.else_.return.handle_out_out_result_": {"doc_hash": "774f268fa4c3dac8b2440ac07164746a4e86499610b97c62e8adc80af1f5e8c8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.sem__Frame.sem.if_axis_1_.else_.return.result": {"doc_hash": "f44805fad7134e8cf2945a7e407adb65297fb023070d19fd593fcc9dbebb269f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.quantile__Frame.quantile.if_axis_1_.else_.if_isinstance_quantiles_0.else_.return.DataFrame_graph_keyname_": {"doc_hash": "9161383470383603b42bb07ff6a397652ba6f0b748c5eaeb99ba31eba2ef2e5a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.describe__Frame.describe.return.new_dd_object_graph_name": {"doc_hash": "b63726bdb0887a4441b1aa4f656cd377b6d6418718422e6b8f20b21800fdb4ff"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._describe_1d__Frame._describe_1d.if_is_bool_dtype_data__me.else_.return.self__describe_nonnumeric": {"doc_hash": "e033b4d517ab30419065c2fde3f842703c53f0a9e32a198b4f9b6ff2183794e9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._describe_numeric__Frame._describe_numeric.return.new_dd_object_graph_name": {"doc_hash": "1fde032f5316859628829e82a48197721df09204e877023133638206ac94e813"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._describe_nonnumeric_1d__Frame._describe_nonnumeric_1d.return.new_dd_object_graph_name": {"doc_hash": "221070ab4890e7d7a6e4e49f68b8d8542a32a8b1daf9e170318c644353a16d85"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._cum_agg__Frame._cum_agg.if_axis_1_.else_.return.handle_out_out_result_": {"doc_hash": "4868ce49bb95d082298799bec018d14c1a1c131f8493fbb2d5f8de36feb6e08e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.cumsum__Frame.isna.if_hasattr_pd_isna_.else_.raise_NotImplementedError": {"doc_hash": "cfcbcbb9f894c5662cd32757a4f88da31e1bea3c40f2ffe18bd43add374b5f6b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.isin__Frame.isin.return.self_map_partitions_": {"doc_hash": "bb0eec5f1f2df6265e9c6fab6aed3627b7e8928255edabf886942d9920a1c12c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.astype__Frame.append.return.concat_": {"doc_hash": "48d1a993f62ed5921978f22aaab960af3636e572362bac50220391a8aa031655"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.align__Frame.align.return.result1_result2": {"doc_hash": "a2622bd6ef1ac9f414987e5db81dce7f98dd537d67d0f5154b754e033a964157"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.combine__Frame.resample.return.Resampler_self_rule_clo": {"doc_hash": "c7579590b597b5c063ec23f5fcb56e547a32411c4335b6eb4a37b0a75ea3df53"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.first__Frame.first.return.new_dd_object_graph_name": {"doc_hash": "8bf54a99f6adecf0a8666ad32f2715c2eb1c0623e12601b5f2d5efd08ede7597"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.last__Frame.last.return.new_dd_object_graph_name": {"doc_hash": "99e3624e8ed7e87fc603e7958157518dd0daaad3ca19e8ec56563044ba0601b4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.nunique_approx__Frame.nunique_approx.return.aca_": {"doc_hash": "b939ae26f9f169c7c12fecab2b86c985636022f598f54492ba0109cefbebf3c1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.values__Frame._validate_chunks.return.arr__chunks": {"doc_hash": "d10cc0225998f2da95c48d1863ec100dcd32c90bbc0a8b5122173ab94b18cc40"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._is_index_level_reference__raise_if_object_series.if_isinstance_x_Series_.raise_ValueError_s_no": {"doc_hash": "1b911491c258916531ab0de438b2ffea19fe6b9d48b8c31ecc854f9be93d14de"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series_Series._repr_data.return._repr_data_series_self__m": {"doc_hash": "4fb2a8a6828e41119c951efd5b050fb0c5fd6187a482265d52f1624621286f7e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.__repr___Series.__repr__.return._Dask_klass_Structure": {"doc_hash": "1fac02675e536538ce6e4f0a07bf9e59213a07a8aee309d57b173db9f1587901"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.rename_Series.rename.return.res": {"doc_hash": "701dd6921892f2fde30cdbd39ca11b789c17e54847e0d2c1f6e4cd9144963ee2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.round_Series.quantile.return.quantile_self_q_method_": {"doc_hash": "7524d3f4ef393c59ac190930d9f4d5731dc6709de67c75d7996610cd3d9f9f00"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series._repartition_quantiles_Series.__getitem__.raise_NotImplementedError": {"doc_hash": "0d17bde1367e3712f1947a155ae7882b147c56845aedbd80552b3654bdbb0878"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series._get_numeric_data_Series.nunique.return.self_drop_duplicates_spli": {"doc_hash": "2d414e3a62168fa2e79f3156ee1df9bfdab1af6c549889fd45e0721dbdcd811a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.value_counts_Series.value_counts.return.aca_": {"doc_hash": "e456b5e968594342a70929442836bd24bb8d64f2484c0ae53afca2b0a5938b58"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.map_Series.map.return.type_self_graph_name_m": {"doc_hash": "95eb24510c59fe36d778ff704b1b24e599bfc428676b7361a5778f889faefa00"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.dropna_Series.to_string.return.self__repr_data_to_stri": {"doc_hash": "7b896d7b0b815d1a474daf48501752803cda90cdcb05c4c41d32410ada69a172"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series._bind_operator_method_Series._bind_operator_method.setattr_cls_name_derive": {"doc_hash": "1a2a51eb5f2140dcd71ebdf494d369dc5233cbde098acf0d8de1b52ab004dabd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series._bind_comparison_method_Series._bind_comparison_method.setattr_cls_name_derive": {"doc_hash": "ea772cf4a3ab2db025a8ff8b9fc366d0e71cdf61fe245a828da3df43fb4852cb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.apply_Series.apply.return.map_partitions_": {"doc_hash": "53e50abcf4318436c215ba6fbcb522c62704a9ec4118e94e781c31fc9b7d94d6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.cov_Series.corr.return.cov_corr_": {"doc_hash": "5a2b8e8f66f40ec88b1938e3b596f6294ce91897619cc226a6d7f20c3f46d49e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.autocorr_Series.__rdivmod__.return.res1_res2": {"doc_hash": "a5a5954b4e50a2274a402faf4a0b349adc3a79cfc26e2ff6e8a468839fc71288"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Index_Index.__array_wrap__.return.pd_Index_array_name_self": {"doc_hash": "e9fe5fcf63d3b426cf1d3a5c9648f7153c04d514155d6c483ec665429a067c9f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Index.head_Index.head.return.result": {"doc_hash": "427c8a052dce818b496bea08f129808b44211dce6b1a8d934c9c7e342750fd07"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Index.max_Index.count.return.self_reduction_": {"doc_hash": "c551c26b9057dd03164f0150e4a391558e505de1f0c86284476e58b53665cce7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Index.shift_Index.shift.return.maybe_shift_divisions_out": {"doc_hash": "d26042185c3f9b8fd7a57f96a38a95895ac99e70e4bcc6206cee91a859881377"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Index.to_series_Index.to_frame.if_PANDAS_VERSION_0_2.else_.if_name_is_not_None_.else_.return.self_map_partitions_M_to_": {"doc_hash": "ebf3357f81ffc90f959676cf91cd814900ccf3aaeb7a86ce8c16434f9fb5351d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Index.map_Index.map.return.applied": {"doc_hash": "f13c349cc9b30d48e44345c3d28333d2e4e230364378d2570001f76700e50d19"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame_DataFrame.columns_2.self.dask.renamed_dask": {"doc_hash": "6ff72d291b50e40b45002d63581947e98bd28e6989a3ada3e212e52ced6b0e06"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.iloc_DataFrame.iloc.return._iLocIndexer_self_": {"doc_hash": "a341558da15c3e6ca6b652c585aa40ba27f55b87b81f863a4b01046dc7ccf019"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.__len___DataFrame.empty.raise_NotImplementedError": {"doc_hash": "4b84a20345f44c2f2662e1a944d183f0fe7a74c31e2b1807197e31f20a119d38"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.__getitem___DataFrame.__getitem__.raise_NotImplementedError": {"doc_hash": "63d971d810e05a99bada414c985536ca09853e422261837f7b1546c5764e2d2a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.__setitem___DataFrame.__setitem__.self.divisions.df_divisions": {"doc_hash": "c66c10c2e2854ecdb1ec20fb5a2ee38ccc33e3492248cad644fc6883712b07be"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.__delitem___DataFrame.ndim.return.2": {"doc_hash": "955dca6611f9d8d6f14abc10078d73191d0535291e886dc70cf8b08afb6e0058"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.shape_DataFrame.shape.return._row_size_col_size_": {"doc_hash": "b2bbebfa2b85687629a7d3b84796ce974dd4da4205f96939f437413a3dfeadc1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.dtypes_DataFrame.select_dtypes.return.self_list_cs_": {"doc_hash": "c816ace090536629472c20ed51bf859ce012f24e764021d0ad023bf94f683bc9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.set_index_DataFrame.set_index._Set_the_DataFrame_inde": {"doc_hash": "61ad7d8dc0fac39988da152491abd23ad0a8d01cc45940b846d4a0f5ff2ff3b5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.set_index.if_inplace__DataFrame.set_index.if_pre_sorted_.else_.return.set_index_": {"doc_hash": "39f7486cf7b22ff837537f6794c543183d420d42c55737cbd4a8b9a71ef71503"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.pop_DataFrame.categorize.return.categorize_": {"doc_hash": "671fb28cae3f0fecb09a45b6ce53267a8733df53906c54dff4ac2eec4c26ae7c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.assign_DataFrame.assign.return.elemwise_methods_assign_": {"doc_hash": "78b392aee844b9f3577496032cc81edd8e63995a64015553107845eb55d82a4c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.rename_DataFrame.query.return.self_map_partitions_M_que": {"doc_hash": "16107b1828bf4e6f0f5146f319a0143ba2479b59fcfa268a147ae1c4b96e26c5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.eval_DataFrame.clip_upper.return.self_map_partitions_": {"doc_hash": "65ccd11e4851ef3849ac09cd937a5647ddce54ad24eaf43104baf17d155fd587"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.squeeze_DataFrame.squeeze.if_axis_in_None_1_.elif_axis_not_in_0_1_N.raise_ValueError_No_axis": {"doc_hash": "622cbb0e95ae1b90577a3331a39b6c9c2c2a1b45014dc04d4ae787f28c8d5178"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.to_timestamp_DataFrame.drop.raise_NotImplementedError": {"doc_hash": "2b3451ce7311e0522ff93f48a21e010bb3e7b12262c0a112c39a5b2903e4475c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.merge_DataFrame.merge.return.merge_": {"doc_hash": "9e4409becea884cc092ace9e73e42be99b5024286f513c710e63386ea15775c3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.join_DataFrame.join.return.merge_": {"doc_hash": "94dfd94988d49c99fd27e1feeaeb10b261b1c9ac2d5b4aca6cd4b078e3ba9d8d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.append_DataFrame.items.for_col_idx_label_in_enu.yield_label_self_iloc_": {"doc_hash": "1b22b5a00ae26a2b032a0312ec7c713582567a08e20e3ba66ad7ad423ddedf29"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame._bind_operator_method_DataFrame._bind_comparison_method.setattr_cls_name_derive": {"doc_hash": "9a12bfc8f11f5dfd5c7f8084af8ea1b31b7a43fc9ce26cd2136b62c634860b5a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.apply_DataFrame.apply.return.map_partitions_M_apply_s": {"doc_hash": "fc444d041e4eb0fba8799a5f076467a9e5ea8eaa0b4f233c300715c3e88be19f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.applymap_DataFrame.corr.return.cov_corr_self_min_period": {"doc_hash": "213110f84e3eb52d37fe01022a38b3d15c39724dc13d5233998a1a455d05228a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.info_DataFrame.info.put_lines_buf_lines_": {"doc_hash": "ccdaae9e218f748cc7e1d8f6a36b89580bce1c2ef4e84533269d95db1a13fa88"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.memory_usage_DataFrame.pivot_table.return.pivot_table_": {"doc_hash": "98342371e3e99524c7a300b46b692b95b50b4570f8fb11980b3dd8d77e8b561a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.melt_DataFrame.melt.return.melt_": {"doc_hash": "b855cf83783db9df009269f6d437c7d9427f78b385ff3485ddac1680b8122766"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.to_records_DataFrame._repr_html_.return.self__HTML_FMT_format_": {"doc_hash": "1d2ec3cc8ff6bb125c41df9b6d50ca24e61e46626edafb3f5ee4cf560f8254da"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame._select_columns_or_index_DataFrame._is_column_label_reference.return._": {"doc_hash": "7b02ed53f82733ea07a03eaca07ad8e904a637801205167da72c6dbb131208db"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__bind_operators_is_broadcastable.return._": {"doc_hash": "45fc0d87506b8b013c86473a6500d6dabaff9a9ad0fe85579340305f8ba5b337"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_elemwise_elemwise.graph.HighLevelGraph_from_colle": {"doc_hash": "1ee7b4f931ebc0cf041d553dfe921531486e2194bcffcf9b4ede03b1875609b4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_elemwise.if_meta_is_no_default__elemwise.return.handle_out_out_result_": {"doc_hash": "3e686f5f42db7f2d03d02c866c81edfd71ef895ea6a440ba2dd95b270f616c5c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_handle_out_handle_out.if_isinstance_out_Serie.else_.return.result": {"doc_hash": "d0d63cded88364fd708b4fa5d084b5c967bbca93c2df1f104369ac366d5f1d12"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__maybe_from_pandas_split_out_on_cols.return.df_cols_": {"doc_hash": "7b603e8d7fe76fdae872b8d64015486a850d9a008224c498f01a2e93fd71a68f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_apply_concat_apply_apply_concat_apply.npartitions_2.npartitions_pop_": {"doc_hash": "e2144bf657021c24c64cea3b3f823104793a5257769eac082a1f60daa6c04b7f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_apply_concat_apply.if_split_every_is_None__apply_concat_apply._Aggregate": {"doc_hash": "4a9334192f9ce251108e3044e5401f6fe0109f66a98447c79a6e371515cf75dc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_apply_concat_apply.for_j_in_range_split_out__apply_concat_apply.return.new_dd_object_graph_b_m": {"doc_hash": "09737d3d8c04c9134d70fb93b567b31314e011b09d30e748cbea7a9e3457b538"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_aca__emulate.with_raise_on_meta_error_.return.func__extract_meta_args_": {"doc_hash": "b5c64d9dd138b94728b42f54ccf8f724850492ef0bfbecd8e0bfca5d966e27d9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_map_partitions_map_partitions.divisions.dfs_0_divisions": {"doc_hash": "e0678a1378dc33a79a2be578981d03439f7b3df6d2d65a76983303f6803c94af"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_map_partitions.if_transform_divisions_an_map_partitions.return.new_dd_object_graph_name": {"doc_hash": "90fd162ad9f391b05a7655280a7cace759352a13bde5223043c09582c9578d96"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_apply_and_enforce_apply_and_enforce.return.df": {"doc_hash": "e606df89878704585505bd938ec7e5b86a4b8d62fecb7b1176162b0b6b6f4800"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__rename__rename.return.df": {"doc_hash": "13a8f72b99544060d30010ee8cb4303eb6bdf6b69b835d9d1603e7edf4500399"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__rename_dask__rename_dask.return.new_dd_object_graph_name": {"doc_hash": "df45bdd23834d838fa8156c4f37e502ec002aa7d6b21a1413ef3c9aeb1994d25"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_quantile_quantile.df.df_dropna_": {"doc_hash": "9943f3dc733e8803b3b08970f330b6893d65025d80fd37d5369f4ff232b77d97"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_quantile.if_internal_method_td_quantile.return.return_type_graph_name2_": {"doc_hash": "5d938e5daeaa84293ca7b88915fa391a5d55857d0062324d12d9a82d276eb5bf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_cov_corr_cov_corr.return.DataFrame_graph_name_me": {"doc_hash": "d34fd5813cbe1e20864722360ce2eb988704c699fcaf77362e1ff82401cac3b9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_cov_corr_chunk_cov_corr_chunk.return.out": {"doc_hash": "6a3e622bccf742dd2abb8b0d2b853e5aa6f8653e2677e607874b165fb48311d9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_cov_corr_combine_cov_corr_combine.return.out": {"doc_hash": "b7fea35d5c949f8cf370a4989d031c9782189bfc4cbdafe3701750d4c66ae895"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_cov_corr_agg_cov_corr_agg.return.pd_DataFrame_mat_columns": {"doc_hash": "588eccccb28d73ab249cb04496485bc00fae6581e02e1e67d1024159d90e3cef"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_pd_split_pd_split.return._df_iloc_index_i_for_": {"doc_hash": "239ba94d59c461347575acac40abe2852e1e6a5cd5457bc216bbd8f39d68c7df"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__take_last_check_divisions.if_len_divisions_1_.raise_ValueError_msg_": {"doc_hash": "581400b19a60394c9fef2a5f05b0e7831c1a684293cbf7282fb98c4307857cc8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_repartition_divisions_repartition_divisions._left_part_of_new_divisi": {"doc_hash": "6c7e85736692e95b7be6fb8f51305fb1944a001f126e620064c830560fa2d3f2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_repartition_divisions.while_i_len_a_and_j__repartition_divisions.return.d": {"doc_hash": "f083b87dfe44bd2a5bd7654949845d8b42b0108f4cb8343f5eb7b9263ac18d82"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_repartition_freq_repartition_freq.return.df_repartition_divisions_": {"doc_hash": "05eea15e4b2f5e87771d985418451f20ce262c2ae141c1cb92b04c79bdda864a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_repartition_size_total_mem_usage.return.mem_usage": {"doc_hash": "a5a7d4006910cc373bb6951723a26a34d090b2024b5bcc9d5f1f41fdc3d6beb2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_repartition_npartitions_repartition_npartitions.if_df_npartitions_npar.else_.if_df_known_divisions_and.else_.return._split_partitions_df_nsp": {"doc_hash": "2258d10ba60f8fc5bad0653fd52138fe04a7fc5458f0be268f01fb642781e3c5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__repartition_from_boundaries__repartition_from_boundaries.return.new_dd_object_graph_new_": {"doc_hash": "2e553d7af69e861b4d34868ab4241cc0e302e85d6388eca071cfff898821d03f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__split_partitions__split_partitions.return.new_dd_object_graph_new_": {"doc_hash": "22a30028ae05ea4b7ae0807b2530f485a7a02e0f153ae0bb4d76c5b889eb4572"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_repartition_repartition.raise_ValueError_Data_mu": {"doc_hash": "860f1142482c7b92bbb80e5ce0668f41054b4351f04c3339f45e26d53faa0269"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__reduction_chunk__reduction_aggregate.return.aca_aggregate_x_kwargs": {"doc_hash": "f30416425faba5d33e90d2710eb5ad8417348d089c6e522cd70fadcbc1b78786"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_idxmaxmin_chunk_idxmaxmin_chunk.return.pd_DataFrame_idx_idx": {"doc_hash": "d06e3b38461269b7176649a041f2b988da4e69eea5d565ee8e616bd2b36035ef"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_idxmaxmin_row_idxmaxmin_row.return.pd_DataFrame_idx_idx_": {"doc_hash": "97884027d4d7f08232744cce68ce1cb6d9d07c102f3a432c8e9088b3fa62d4f9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_idxmaxmin_combine_safe_head.return.r": {"doc_hash": "065f8bc05743b0fe64ad1b5ed90f35489d261dabaf466cf56ce79b15b4955265"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_maybe_shift_divisions_maybe_shift_divisions.return.df": {"doc_hash": "0a95863a8801bc1afdac2bdb2d31343f546a6bb05e6214aa19b728bf1b6ea61d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_to_datetime_has_parallel_type.return.isinstance_x_parallel_ty": {"doc_hash": "4b54d14145afe94d6da44136c7589974caa21cc8a55b064d5b5402163bc356ff"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_new_dd_object_new_dd_object.if_has_parallel_type_meta.else_.return.get_parallel_type_meta_d": {"doc_hash": "cb3f3f12c151120f6facec028d39b866b6f9ac236b7189407f82616cdd860e03"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_partitionwise_graph_partitionwise_graph.return.blockwise_": {"doc_hash": "0e084714c5862b9b457da9d543f78706d97e41e7aba75a5411e69724cf781486"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_meta_warning_meta_warning.return.msg": {"doc_hash": "c8981c3cfd6be84d9502d443e49823edd84a62db4f227f47fa3ef1e96007de39"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_prefix_reduction_prefix_reduction.return.new_dd_object_graph_name": {"doc_hash": "543a63a550ddc2c2fc9b4b60e50901f9f33c07900028af2dfb3e3ece705edee5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_suffix_reduction_suffix_reduction.return.new_dd_object_graph_name": {"doc_hash": "a6d50e7a76f40a9ac3519563e65d3f5fe3c61a0d0bdf5a0d041c26880a43f85b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_mapseries_": {"doc_hash": "6657b60e903c61c4b560c34d51238ae599647a4f2f81f885a6cb608fa947a412"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/extensions.py___": {"doc_hash": "9e24fbef30bf6797f341bfe58f74cc2d6d87f92ff66abfa6b4687331deea7418"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py_collections__determine_levels.if_isinstance_index_tup.else_.return.0": {"doc_hash": "8df0984fb7ef69aaef201da0e47e8250e4c97cb830588aa15de80ae04d276e8c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__normalize_index__normalize_index.if_not_isinstance_df_Dat.else_.return.index": {"doc_hash": "857ea32c5f593980328fbc72740cabe8c538fa858a1d9627346c8cdcfb3d3ae1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__maybe_slice__is_aligned.if_is_series_like_by_or_.else_.return.True": {"doc_hash": "dd263fe3b986a09ea3f156eb9c7ec873e81e1a31ef2a978412b2b6604d659db8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__groupby_raise_unaligned__groupby_raise_unaligned.return.df_groupby_kwargs_": {"doc_hash": "3ffac40f1522ea470028db4bd53233fbb0a7bba05ef983ed28cb4528106264b5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__groupby_slice_apply__groupby_slice_apply.return.g_apply_func_args_kw": {"doc_hash": "889c56cfc4b6574e0a3a33c58f1f48d073dcfa428bcdfa87b2b072ccc88478e6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__groupby_slice_transform__groupby_slice_transform.return.g_transform_func_args_": {"doc_hash": "2a07905ef922ebcf56a54588733507f68e763374dccb60d71f02297745474f79"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__groupby_get_group__groupby_get_group.if_get_key_in_grouped_gro.else_.return.df_iloc_0_0_": {"doc_hash": "0e21f5f0c941126b6b905d108a78fc2851f384b4e2205c30b5df59291cb6a0db"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py_None_31_Aggregation.__init__.self.__name__.name": {"doc_hash": "ed92f88b6d4b7c617df048474ff598ee10577bd0955008557dd67fe230ad04b2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__groupby_aggregate__apply_chunk.if_is_series_like_df_or_.else_.return.func_g_columns_kwargs": {"doc_hash": "af106093cbbcf805773a81b817b3b7ee0e1ef75a103f145819dc9ad9d283226a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__var_chunk__var_combine.return.g_groupby_level_levels_s": {"doc_hash": "099e917bf709f8fb3c60692cee31f28f2e1ff34bdbca67f707be0d6e37b8e817"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__var_agg__cov_combine.return.g": {"doc_hash": "03f7957ce8358488feb7cf6cc6450d26fa4b8be64d97d4ade41013de17794f5d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__cov_finalizer__cov_finalizer.return.pd_Series_vals_index_ind": {"doc_hash": "4cb544824ba2e969e3ba2e97ed34661666728903b4e417faa2d68369d63e2eea"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__mul_cols__cov_chunk.return._x_mul_n_col_mapping_": {"doc_hash": "e375d3a6f241c1fb1b2e9d1e82dad003546e54b227c6335170cf883d710c18bd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__cov_agg__cov_agg.return.s_result": {"doc_hash": "39171ff7636c69e4535ad23bd95d2a8b0133b9f8db922416714836351873b4fa"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py_None_34__nunique_df_chunk.return.grouped": {"doc_hash": "7e8bd5600b29b12d971a26054a912fc635f5b862b0c7c555dea80bc302395000"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__drop_duplicates_rename__make_agg_id.return._s_s_format_fun": {"doc_hash": "1acc2ee09be71d88cc965410998039ae5415fed72fba7bd0d07e74963fb974d9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__normalize_spec__normalize_spec.return.res": {"doc_hash": "fabec08d5635c4f525b38843ebf997feb9353ae998e131312590b8a4002c777a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__build_agg_args__build_agg_args.return.chunks_aggs_finalizers": {"doc_hash": "b0bc0c291688de95a3e8a48fe1e12ac5a9ea98286e959bc04f4b2e91e59da406"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__build_agg_args_single__build_agg_args_single.if_func_in_simple_impl_ke.else_.raise_ValueError_unknown": {"doc_hash": "6d14dc1f036b2aabce2df1f08034ed94552a4d943ec6463cbba0d591b5eafec4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__build_agg_args_simple__build_agg_args_simple.return.dict_": {"doc_hash": "b6355c9cd1ed256be6e11db3c3d96c0154454e8b0164418bd86f13416aaac80a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__build_agg_args_var__build_agg_args_var.return.dict_": {"doc_hash": "9f8e8695ae0d09a48fd5b6a08d764aacbfac2d306b51a8798af73ed685bc926b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__build_agg_args_std__build_agg_args_mean.return.dict_": {"doc_hash": "7502a9e5211d2e3a9ea79236e700fffad2984d8e9a0ccf017a8520c144b966c6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__build_agg_args_custom__build_agg_args_custom.return.dict_": {"doc_hash": "635def7548548d02fe44efc4a579486cb0ca1006ad55cbb2718e2aa0c532f399"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__groupby_apply_funcs__groupby_apply_funcs.if_is_dataframe_like_df_.else_.return.type_df_head_0_to_frame_": {"doc_hash": "dfee4438d04716548a2f6127a89e3efd7a73232185b02156593c5ba0dda80304"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__compute_sum_of_squares__compute_sum_of_squares.return.df_groupby_keys_sum_": {"doc_hash": "12ebf6bde14d424b434453d449b63181b1bb762701e54784c41f44147867743e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__agg_finalize__cumcount_aggregate.return.a_add_b_fill_value_fill_": {"doc_hash": "6295c0a41bd498ada3f6dfefd966c1203177dbe1bfa208f2ea39356909e49174"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy__GroupBy.__init__.self._meta.self_obj__meta_groupby_": {"doc_hash": "d2b3aebd1b25d3bbcecccac75bbbf7fb8eb255d67bab69dd0f56293a83abb32e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy._meta_nonempty__GroupBy._meta_nonempty.return._maybe_slice_grouped_sel": {"doc_hash": "7f789cdca28556327b31064223fb37e3cbe371a31a32ae9180720bd9cb212aff"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy._aca_agg__GroupBy._aca_agg.return.aca_": {"doc_hash": "5c1434c5980b2482c7b8009918835320021c7ac176eb9fa984050f0408b369e2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy._cum_agg__GroupBy._cum_agg.return.new_dd_object_graph_name": {"doc_hash": "edb7efc1490424d657e36b50c7e353bf1e8ebb52bdb038f9d9ef899907d99bcd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy._shuffle__GroupBy._shuffle.return.df4_index2": {"doc_hash": "28333d57ae14e978644a1fb66f27c376462e83ba752d32f1450e1e5180debbb7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy.cumsum__GroupBy.mean.return.s_c": {"doc_hash": "e8e1246379f8d8149c51155d549909016ebc888530b1a74e9f71ad9b6d9d6b4f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy.size__GroupBy.var.return.result": {"doc_hash": "197d0368d5307224b39e7ea5d0d9f382e512f94f231e0ac964a8b212cb408033"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy.std__GroupBy.corr.return.self_cov_split_every_spli": {"doc_hash": "3277df8781dc3c4c24be95f475b9bdb6bfc05a7c363d8ccf0c10d81a9b5f7d56"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy.cov__GroupBy.cov.return.result": {"doc_hash": "f7ceedeb8dc76fc86eaddfddc8c72a0a21da31f460364a3e80769e40a214a8e8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy.first__GroupBy.last.return.self__aca_agg_": {"doc_hash": "a531cb495339dec16cdd559a94b721ee627849d60f5cd7ebabd1fc5a45d83aaa"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy.get_group__GroupBy.get_group.return.map_partitions_": {"doc_hash": "c161fa8623bef922a1f1299b93397e38afff5f2e43ea77df3d5c4a1ffb18aee2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy.aggregate__GroupBy.aggregate.return.aca_": {"doc_hash": "25b815c2ba7529a03f43778479b129428643bc631777fc4f25e6dbd474749db8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy.apply__GroupBy.apply.return.df3": {"doc_hash": "0d136c7c6b42941ce06a56357038c433f45df1f232927c88dcdd5cb54c0e0dfd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy.transform__GroupBy.transform.return.df3": {"doc_hash": "fc6ba8e847a03404c0d102e5ec816dbd8aa12319291bfe1dcc2c8047c2768fac"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py_DataFrameGroupBy_DataFrameGroupBy.agg.return.self_aggregate_arg_split": {"doc_hash": "550e17cb8636f7d47179f4070046fbed381fc16497b27a4eeb64be48eeeccc1b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py_SeriesGroupBy.nunique_SeriesGroupBy.nunique.return.aca_": {"doc_hash": "1fe934804f524f63e74e6a44677d193bbcef389be6d6478a4a0ac5d717c820c6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py_SeriesGroupBy.aggregate_": {"doc_hash": "c69f7dd123190e68c010af066d437579f7f0495656d33f01bc71de242eda3567"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/hyperloglog.py_compute_hll_array_compute_hll_array.return.series_reindex_np_arange_": {"doc_hash": "b24d9752c56f9d25e92b1038d2f217c6b3129051a7bfcd9643448ad290930291"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/hyperloglog.py_reduce_state_": {"doc_hash": "5f5f486a4ef82275d1a63740d0780f72f0a98859e2ca14db9b3694c9a4c15446"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py_from_datetime_import_date__IndexerBase._make_meta.if_cindexer_is_None_.else_.return.self__meta_indexer_cin": {"doc_hash": "2853c31b88c860b013dff210f784cca9480c09e9af782b0595431d0c50f8885b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py__iLocIndexer__iLocIndexer._iloc.return.self_obj_map_partitions_m": {"doc_hash": "ec410441280d2886fb614112270ca7e8af4d8e854938240c42887c18dcf560e1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py__LocIndexer__LocIndexer.__getitem__.return.self__loc_iindexer_cinde": {"doc_hash": "869a06955a8a061430919195737e4406fc8cdb5cfc45f3036b7ba83864d6c9e9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py__LocIndexer._loc__LocIndexer._loc.if_self_obj_known_divisio.else_.return.self_obj_map_partitions_": {"doc_hash": "16480b05b91a1a982e7cc9c5dda28c53c95c46cf581c6f82a782a5e01d6792ca"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py__LocIndexer._maybe_partial_time_string__LocIndexer._loc_array.return.self__loc_series_iindexer": {"doc_hash": "aed322dd1021704caf54cd5ce145c8154e94aa79e040d4d55582afdde01ec386"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py__LocIndexer._loc_list__LocIndexer._loc_list.return.new_dd_object_graph_name": {"doc_hash": "20c435c01dfebaebd1a4d6ba9dd587de43fa937ac93f5f77ad4f5ef45e7e85b1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py__LocIndexer._loc_element__LocIndexer._coerce_loc_index.return._coerce_loc_index_self_ob": {"doc_hash": "79e6ec9c246ed76321e4723528c0101cbca36d376fe49b0e2bd7c62454c0c8e1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py__LocIndexer._loc_slice__LocIndexer._loc_slice.return.new_dd_object_graph_name": {"doc_hash": "80a17f587d58cc1aad50a054e7524db483a389648e58684a4d2c865d40ac4376"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py__partition_of_index_value__partition_of_index_value.return.min_len_divisions_2_m": {"doc_hash": "cbe4fb55ad75eed4b2ff9a73f6a87334f680705641be0d5bce31c34ddd4d2161"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py__partitions_of_index_values__partitions_of_index_values.return.results": {"doc_hash": "2cb63eeb6240844d327ac03dccee6ec110ebb55954e9b317d0f8756ada6da97c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py__coerce_loc_index_": {"doc_hash": "a1aa2487c1723376ed8f2d426e5bd5680bc431e5400650ea7ebf0484d721ec04"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/__init__.py_from_array_": {"doc_hash": "b22320bc0539ec5108992a5aaf455719aef9e252e57d7ba21183f234f26ec868"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_from_os_path_import_basen_from_fsspec_compression_i": {"doc_hash": "c7319dc3a07edb35f1d07f421f37f2bb3a77f719088ee169b89b7f77c303b4a3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_CSVSubgraph_CSVSubgraph.__init__.self_colname_self_paths_": {"doc_hash": "d60bbd976441ec6dbfa0a24d6f709c7b70dc5c4f768afb961af4c71706ae8011"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_CSVSubgraph.__getitem___CSVSubgraph.__iter__.for_i_in_range_len_self_.yield_self_name_i_": {"doc_hash": "304cfe5f667551bbae09e0b0b70e7ccee2312c554c693d2fb26319a10b8ea5dc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_pandas_read_text_pandas_read_text.return.df": {"doc_hash": "3894530e8f4d6ab0c9b9f5c5994315b8e93553d02d2afb9042fc4d2d533ffe14"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_coerce_dtypes_coerce_dtypes.if_bad_dtypes_or_bad_date.raise_ValueError_msg_": {"doc_hash": "767dc67f9a00d258c8069a53520759f269ea39d43117d54ceeb38bbea635548a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_text_blocks_to_pandas_text_blocks_to_pandas.return.new_dd_object_subgraph_n": {"doc_hash": "68e7df2575ae3a4182ed3d3ce4c828f4ff963bb32a6363345f5b834415e76dab"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_block_mask_if_psutil_is_not_None_.else_.AUTO_BLOCKSIZE.2_25": {"doc_hash": "c10bdfb08aabb44a768dc7e13fcd31ff3a8065984ec494f49ca7727c8b3ae7dc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_read_pandas_read_pandas.b_lineterminator.lineterminator_encode_": {"doc_hash": "8eff340a872e44aed315ca636781bb4ce76bb697ead73f6f065d2d2ccb3c4d5e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_read_pandas.b_out_read_pandas.return.text_blocks_to_pandas_": {"doc_hash": "63d0c12da657d1e0f7d062049e9fd3bc792371e433b9f13b5488d005281b473a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_READ_DOC_TEMPLATE_READ_DOC_TEMPLATE._": {"doc_hash": "13b276b5edbb629be2f92d63cfa11a7ec45497d283e1796fce12735be082d909"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_make_reader__write_csv.return.None": {"doc_hash": "801e0573ddf7dbea4c59d87b136548f83a54a87a1eea2b121368c1a902ac78a3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_to_csv_to_csv._": {"doc_hash": "0bbc2a44e78e7ca3c1cdb469ec64724425ea3a84b4f0e5e51833d1471adf98f8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_to_csv.if_single_file_and_name_f_": {"doc_hash": "ecd97e9c2c1dad629852b9006b321742ce3ca7b34dbbf638fef9498985eb3c8d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/demo.py_pd_make._": {"doc_hash": "17cf13ae13a5d4ff4c8a00627e67d9e1b475620ed2bf06083e30c51cfd29fbc5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/demo.py_make_timeseries_part_make_timeseries_part.return.df": {"doc_hash": "fb0e4d46cbfcb8616da058cf898e36b2ee0f5b5d39663a56f057ccd2c9252fed"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/demo.py_make_timeseries_make_timeseries.return.DataFrame_dsk_name_head": {"doc_hash": "33d8ad7fb2f5962f3367afcb5d5ff147a47ad1909c970be4bb12688074f85285"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/demo.py_generate_day_generate_day.return.pd_DataFrame_": {"doc_hash": "ed185045169e29c72d429976f2bc2bf90635edce66e7b89a2a06854cb2003f26"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/demo.py_daily_stock_daily_stock.divisions._": {"doc_hash": "9524b61f82f7d8536ccfb8484ef0c2f2b2f384055432937e7d0b5048f239f3a2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/demo.py_daily_stock.for_i_seed_in_zip_range__": {"doc_hash": "e9a05b0e268ae1145ddcbd9998a1c6b4a5d9c02c24cd88d90cad8bf72410f0d9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py_from_fnmatch_import_fnmat__pd_to_hdf.return.None": {"doc_hash": "bc8be7b9cf6864b1626a9f03dd71555c6b830940755cb53419838b4945bd52e6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py_to_hdf_to_hdf._Store_Dask_Dataframe_t": {"doc_hash": "bc9f5d52162422af0aa0f85def60d9da00397901a780a9127b718a0ab82db298"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py_to_hdf.name_to_hdf.for_i_in_range_0_df_npar.filenames_append_fmt_obj_": {"doc_hash": "0960a7c82d08e01729a683cc2dd3a0bb4e4f4eb3031360cc9fd2d0163e0b8f40"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py_to_hdf.for_i_in_range_1_df_npar_to_hdf.if_compute_.else_.return.delayed_Delayed_k_dsk_": {"doc_hash": "2a2b34941005e919ca5dfd1368d78c8cb3c1f5d26d6dbea76e5c0c2b371e7720"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py_dont_use_fixed_error_message_read_hdf_error_msg._": {"doc_hash": "ec55d5a64433581a1fb152c9b6c33ce837417eedcbc166d6b5c2467a7941888a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py__read_single_hdf__read_single_hdf.get_keys_stops_divisions.return.keys_stops_divisions": {"doc_hash": "c48a2406bbbbb56259665f37df18f7fbbd0c866329ff7e6f2c606d8c874d0b20"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py__read_single_hdf.one_path_one_key__read_single_hdf.one_path_one_key.return.new_dd_object_dsk_name_": {"doc_hash": "ae44bf29db82019177b9d7495fa11f5d78186e6b8b574f0e897257e84d622ae8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py__read_single_hdf.keys_stops_divisions___pd_read_hdf.return.result": {"doc_hash": "a213f118bba2bbe6c10e0cf6fb9a18f53958029c07f8f599947f8117eb3bb844"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py_read_hdf_read_hdf.if_chunksize_0_.raise_ValueError_Chunksi": {"doc_hash": "aa80131775c19b6dda0a8f4a691f9bed8a1178b2bfa70908b3b3c27c1fec8d5d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py_read_hdf.None_7_": {"doc_hash": "74469e0f5fd3a453ab9d8cadf55f24de697c10448c381aaad71251f9fa04664b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_from_array_from_array.return.new_dd_object_dsk_name_": {"doc_hash": "b9250930bf19fcc550dc1d533955c1e8384b5673360d15d8305b250257354fd6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_from_bcolz_from_bcolz.if_index_.else_.return.result": {"doc_hash": "c741e3e5b9a4ecb293d684103517fb607c08b6aeaddf7ed4008b8650175daf0c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_dataframe_from_ctable_dataframe_from_ctable.return.result": {"doc_hash": "f565af2b4c9d771f5571ba1a26c78a9df39af61634496c09e0907305bde4d44b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_from_dask_array_from_dask_array.dsk._": {"doc_hash": "a4a048dc021be03b849eb8fa0ae9a2c43969b42d5cab8dcd3f691a08d3abb385"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_from_dask_array.for_i_chunk_ind_in_en__df_to_bag.if_isinstance_df_pd_Data.elif_isinstance_df_pd_Se.return.list_df_iteritems_if_i": {"doc_hash": "b05b65c11e6f603080e3050589f0c475ca760e2ffc928f7db29de1424dd31784"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_to_bag_to_bag.return.Bag_dsk_name_df_npartit": {"doc_hash": "bb6ebc4d11486c2246dc85579d9f940458ab539ea6a48610bcb878adab418ad0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_to_records_to_records.return.df_map_partitions_M_to_re": {"doc_hash": "653dc95a910129ebea985993ba9c60f1aada792d1abac71db60c66b0c5f766dd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_from_delayed_from_delayed.return.df": {"doc_hash": "fac053b20150acd8b42bc5f0c40f161b8968ad19f6c1d7ce980b3ed72101aa29"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_sorted_division_locations_": {"doc_hash": "d35fabe35f4455f9bae1510d22977c1d21b0afe50e5afb1d35483ef10acc5ba7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/json.py_io_write_json_partition.with_openfile_as_f_.df_to_json_f_kwargs_": {"doc_hash": "5a9bf76bb4d9b17564894cc66f80262b5827114981f68612a38103a8a4b957b6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/json.py_read_json_read_json.storage_options.storage_options_or_": {"doc_hash": "543027d75f551aacab39d1ff04ea1ab81006011e7adaef23ec36c9928ecd0085"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/json.py_read_json.if_blocksize__": {"doc_hash": "2f9c353e2185f4f0357016443db4c6d00d1bd51dbeb9889008b0e2603483b3a1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/orc.py_from_distutils_version_im__read_orc_stripe.if_pa___version___Loose.else_.return.table_to_pandas_date_as_o": {"doc_hash": "4f4faf9186954e6d2821be9da9c815329a1021a5e5a0beb755a2cc5a4830d12a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/orc.py_read_orc_": {"doc_hash": "072f05ee5cfd834473b97b4567f7772994f65e354752b847ca56ba45ffbc41ea"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/__init__.py__": {"doc_hash": "f0f2ec33b654aaa781cdd147f79f3b7231216be8bc5054bd9aa8f04b87a78192"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_from_functools_import_par__append_row_groups.try_.except_RuntimeError_as_er.if_requires_equal_schema.else_.raise_err": {"doc_hash": "8b8839422fbcf28bb94acfef8cf745ba1e454b974b753a6c68166e4d676e8120"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py__write_partitioned__index_in_schema.if_index_and_schema_is_no.else_._No_index_to_check": {"doc_hash": "aa47f9833fec2496e64e56cc7e296689455ede25f33c0190849a815688731eac"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py__get_dataset_object__get_dataset_object.return.dataset_base_fns": {"doc_hash": "4c110ffd1eca6e1bb59613425ca976a2e963d36a9a2f26a01362121c6d84cd8d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py__gather_metadata__gather_metadata.if_dataset_metadata_.else_.return._": {"doc_hash": "1860c128ddb14841eeb007e2bc0cf0731aa865140b7c70d88eaee2de06076b07"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py__generate_dd_meta__generate_dd_meta.return.meta_index_cols_categor": {"doc_hash": "844123b181e211e9bfc20c6e728525726f3bebb8c494c629baf07e6834a8dd77"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py__aggregate_stats__aggregate_stats.if_len_file_row_group_sta.else_.return.s": {"doc_hash": "e66f27f183dfd18c551b6a3bc470df85b6856d355bf75bbc02c9ae16abb8c038"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py__process_metadata__process_metadata.return._": {"doc_hash": "c8e8ae6a2c3b01b47a03978e68b4d7ab0204d92549d28fb6472d280733c74c10"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py__construct_parts__construct_parts._": {"doc_hash": "de7ad8a8bfd555e5cb76f71113061ff109b23289a4db973836d8e22f2e589ca5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py__construct_parts.if_split_row_groups___construct_parts.return.parts_stats": {"doc_hash": "aeb36646ed586d4d7a097fbced94a4124c3a5c77aa1dd3ddf3c1e7fcda13d3bf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowEngine_ArrowEngine.read_metadata.return._meta_stats_parts_inde": {"doc_hash": "b533c4da46e8e77e481415b1a05ae53de1d7bbb38cc1f61c92f642e5c3834c1f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowEngine.read_partition_ArrowEngine.read_partition.return.df": {"doc_hash": "51f8a38089aded115dfd0b03b6a1ded70f79a4363de792797a95322014f21cbe"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowEngine._arrow_table_to_pandas_ArrowEngine._parquet_piece_as_arrow.return.arrow_table": {"doc_hash": "79a1b4d0586dac024c17a991a378f801c3693b70bd24cbc1cec4a5db5d7a182b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowEngine.initialize_write_ArrowEngine.initialize_write.if_append_.try_.except_IOError_ValueErr.append.False": {"doc_hash": "1f68cd48842e65b7f863e20b73866cd38a1640c6a87b6fdb6c6b4db5ed3dd3c0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowEngine.initialize_write.None_3_ArrowEngine.initialize_write.return.fmd_schema_i_offset": {"doc_hash": "a1a30d8f9095d7b883a5c9d6f04c6719619b17a920a69a6e6f44a745393d0563"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowEngine.write_metadata_": {"doc_hash": "81cbc35ffaf253e29d8252769f06c8272d273aabc39e972d1f1a37c722632473"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_from_distutils_version_im_NONE_LABEL.___null_dask_index___": {"doc_hash": "e4bd4ca490708109c2eb28dfa6f287110c06089da4019bd042182bab1479fd2b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py___ParquetSubgraph.__repr__.return._ParquetSubgraph_name_": {"doc_hash": "a892a8d3f69d1423660e00e7b487c89b01b3f40d31f0d7734661e858029cfc4d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_read_parquet_read_parquet._": {"doc_hash": "97e573b82910ae6067a623ecac451f85b730a8f99d89d22f8211aec0f4739bf4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_read_parquet.if_isinstance_columns_st_read_parquet.return.new_dd_object_subgraph_n": {"doc_hash": "46a61a1e65c8a9aff2496b27bdea28b8733bb6f1e2dcf7f029419537951718d6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_read_parquet_part_read_parquet_part.return.df": {"doc_hash": "9fa81a7b050ae9df58483234b51a3763ad4020dd3e85bd5d4c9bdae14278f5cb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_to_parquet_to_parquet._Store_Dask_dataframe_t": {"doc_hash": "c26df96194cc2318dc75e3ea04e84d2610e7d427c60fc56e1c3f4ba837442b95"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_to_parquet.from_dask_import_delayed_to_parquet.if_write_index_.else_.df.df_reset_index_drop_True_": {"doc_hash": "0c772441cd1a7a6f185e9dd7b8659d13dbce08db37640cae37609367ba2784a7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_to_parquet._to_parquet_kwargs_to_parquet.return.out": {"doc_hash": "fd7ec9468ddb3408e3fffdf71a5dbbf3fa91ff9d16949d59bc731865706fd60b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py__ENGINES_get_engine.if_engine_auto_.else_.raise_ValueError_": {"doc_hash": "96204661779238fc6b1a560f0b85deee361f798b89113a509cff876de79e1ab1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_None_3_sorted_columns.return.out": {"doc_hash": "75f95b297bdd748657024fb0e56c4247350c4d59f0cf56afe76dfa33197e42e6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_apply_filters_apply_filters._Apply_filters_onto_par": {"doc_hash": "41a56a983090698ac355e99551b3257b16058561212e045a38e6f219a0d846f7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_apply_filters.apply_conjunction_apply_filters.return.out_parts_out_statistics": {"doc_hash": "6490603ca9738aedf3674f4bfe68c33b596c00c2955789e493044e7b1c4c8408"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_process_statistics_process_statistics.return.parts_divisions_index_": {"doc_hash": "cf350e0176c7c61acf3ad05f20f9b0de11d504464b5f7277f7b479005eafc2c9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_set_index_columns_set_index_columns.return.meta_index_columns": {"doc_hash": "5246c4260d6ed4180b099194328eaa8c9ad6903c8fe66c4e8d0ecaa608a11fe4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_aggregate_row_groups_": {"doc_hash": "a0e96efbe764dfdbf90177d327291575b66f16345030a4782dc0c185c4669d9d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_from_distutils_version_im_Engine": {"doc_hash": "fe7af24243694ce2f32a23eccb5dff68eedeaacea612e4d195b511a968357387"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py__paths_to_cats__paths_to_cats.return.cats": {"doc_hash": "55183a04587d71dc0be2e089f07731601ec0c2c926962a9418fe8ab62c3a88cd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_paths_to_cats__determine_pf_parts.fast_metadata.True": {"doc_hash": "104d1107535e15863fb06565bf24b4bbab38e576459c05bd78046fd5b6f9af54"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py__determine_pf_parts.if_len_paths_1___determine_pf_parts.return.parts_pf_gather_statist": {"doc_hash": "f1c32ea372c2e5ab9e841ed7836ac639b4ee6cbaa90c45434b36e3e659003685"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine_FastParquetEngine.read_metadata.for_catcol_in_pf_cats_.if_catcol_in_meta_columns.elif_meta_index_name_c.meta.index.meta_index_set_categories": {"doc_hash": "968cfbe0b4a9c1647bcfbf323ac269a4f103979ddca8fc18927e1f70b502b5aa"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine.read_metadata.if_gather_statistics_and__FastParquetEngine.read_metadata._if_we_have_a_list_of_fi": {"doc_hash": "7ab570f370ff138a61811085b5d96c7a5895effe41b7177427d20a426990285c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine.read_metadata.base_path_FastParquetEngine.read_metadata.return._meta_stats_parts_inde": {"doc_hash": "431fcff6080e227b9bdb0c9f67d6f6986b27605c449f3f589348948e61baf2e2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine.read_partition_FastParquetEngine.read_partition.if_pf_is_None_.else_.return.pf_read_row_group_file_": {"doc_hash": "072cc9c3e58fbc5cf3b81812d31cec77fdfd9fbcfc5981a2726ad8ad1d4af52b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine.initialize_write_FastParquetEngine.initialize_write.return._fmd_schema_i_offset_": {"doc_hash": "b4e130d7cf0f35ede96dd8084587986f052047039fa9addb7bb8bf8a5bbe71d9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine.write_partition_FastParquetEngine.write_partition.if_return_metadata_.else_.return._": {"doc_hash": "68eaf7650e30acad3a185a4062789cd4a01c035036a1511eed6e5727f726e5e6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine.write_metadata_": {"doc_hash": "ad45be034fe3867af43d6d1eeb9e074fadd33f02d907e13fa0899676d178999b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py_re_Engine.read_metadata.raise_NotImplementedError": {"doc_hash": "4ec574cf0e536991ccb78a029ab9d3a6ffe0f4f7d3bde9fa56a0271dc11abebd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py_Engine.read_partition_Engine.read_partition.raise_NotImplementedError": {"doc_hash": "439bea3c68b26190dc93c755807ed7622d9313069cdb94642058d20d5915cf11"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py_Engine.initialize_write_Engine.initialize_write.raise_NotImplementedError": {"doc_hash": "973b98ebe6617015bc498c785f4e6a4c259f8582e224bbb22c68bcd58a8be670"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py_Engine.write_partition_Engine.write_partition.raise_NotImplementedError": {"doc_hash": "83019f2d0181559d2826064e0fbba31366a93df2f2f8527d4f383bd189be7782"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py_Engine.write_metadata_Engine.write_metadata.raise_NotImplementedError": {"doc_hash": "1b5602b8675a6d98e568a363b233d41c23147f28ca6ee86ff2b3d25f95470b63"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py__parse_pandas_metadata__parse_pandas_metadata._0_8_0_allows_for_dupli": {"doc_hash": "47b6ef76ffbfeaf044b0f17c4609c6964366faa238c92cc85bf73040dfb3e83b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py__parse_pandas_metadata.if_not_index_names___parse_pandas_metadata.return.index_names_column_names": {"doc_hash": "c464d4f516a1919e04f3e244e6c96c0932dc7f486db6980fbcfd2a40ba579978"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py__normalize_index_columns__normalize_index_columns.return.column_names_index_names": {"doc_hash": "3364e81381f12531be3f08603ce946178842c3e8b535d70090c49a61153c6c7a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py__analyze_paths__analyze_paths._join_path._scrub.return.p": {"doc_hash": "ce6aa273529b7667abd7834174ac7b4ad8667c212d9ead34903dd4db8c4186f4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py__analyze_paths._join_path.abs_prefix__analyze_paths._join_path.return.joined": {"doc_hash": "7a5f295adc273257c79f4c0cc16f36aac927036d3cdc3745f665ab078c7e9504"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py__analyze_paths.path_parts_list_": {"doc_hash": "af813bd367afe90a70535b5324d3dfd6475c974f88c7721b78a6491ffc71df7f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/sql.py_np_read_sql_table._": {"doc_hash": "6a3f8875478a44f8470827f7f93274ffc4bd4477fcf190b1bc4b67a1abf4b857"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/sql.py_read_sql_table.if_divisions_is_None___read_sql_chunk.if_df_empty_.else_.return.df_astype_meta_dtypes_to_": {"doc_hash": "80c66892121d68b0ca6e692e6e0d2dd6c75f6fd4be893d7ec2e8f7a8f5136f90"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/sql.py_to_sql_to_sql._Store_Dask_Dataframe_t": {"doc_hash": "f978c105632231e2a7d4a3b4974ac9604749033cb10544472af8c3689e3901bf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_from_io_import_BytesIO_timeseries._": {"doc_hash": "d64c2071d4da0d891a625f488892f68454912987a3ae72a660d35e1b4d60ad3b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_csv_files_test_pandas_read_text_with_header.assert_df_id_sum_1_": {"doc_hash": "4d063ba39cf7c5f433f859bd1c746665841266b1fc0962e4f4300b68fc61160e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_text_blocks_to_pandas_simple_test_text_blocks_to_pandas_simple.assert_eq_df_amount_sum_": {"doc_hash": "bf710217da4bc62c3c9bd357877d9523fe43617ec1a5093cb476dab3a04fb328"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_text_blocks_to_pandas_kwargs_test_text_blocks_to_pandas_kwargs.assert_result_columns_": {"doc_hash": "b6ff139a4c3bc5ad8445fffd6f07ece3c7db12d9d11d1b44308e125088913ddb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_text_blocks_to_pandas_blocked_test_text_blocks_to_pandas_blocked.None_1": {"doc_hash": "5e29465922d6488a9621c09c7c1033416a3141c1f449cdc93b9c84c8157953bd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_skiprows_test_skiprows.with_filetexts_files_mod.assert_eq_df_expected_df": {"doc_hash": "532dcd894b4e268c8f913ae2894b081f134715af7aafdd466e7b51325791dd1e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_skiprows_as_list_test_skiprows_as_list.with_filetexts_files_mod.assert_eq_df_expected_df": {"doc_hash": "887c683ee59e903f386e6fb647305ddbdca9a0976788febfe05813ffc124c794"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_csv_blocks_tsv_blocks._": {"doc_hash": "0099c80dc505bd657e1f01e7d28070d0bed901785da41c031c4d1668e5332dea"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_enforce_dtypes_test_enforce_dtypes.assert_all_df_dtypes_to_d": {"doc_hash": "4a2ab0caaf4e0cb36946f070d4e5edb27276e7c4c3848311363a5b3734eade5a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_enforce_columns_test_enforce_columns.with_pytest_raises_ValueE.dask_compute_dfs_schedu": {"doc_hash": "cfca617b822a26eb0dbb67fd3d97a1cf308427de5a961e92f5914205ab622b6e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py___test_read_csv.with_filetext_text_as_fn.assert_eq_result_pd_read": {"doc_hash": "df6cbda0e0564a471b360e2e4da7d1e24a7eecac40e969fa7a49b748ea89256c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_large_skiprows_test_read_csv_large_skiprows.with_filetext_text_as_fn.assert_eq_actual_pd_read": {"doc_hash": "84697de2bd058f20af81d50abb4aa55f681d2b948e0097715e09c227fb0f69b3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_skiprows_only_in_first_partition_test_read_csv_skiprows_only_in_first_partition.with_filetext_text_as_fn.None_1.with_pytest_raises_ValueE.dd_read_fn_blocksize_30_": {"doc_hash": "3523f6858a2eefbf406129d449eda98afcd70bcdcff7f51d1bd5292a6fab5f03"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_files_test_read_csv_files.with_filetexts_files_mod.assert_eq_df_expected2_": {"doc_hash": "10e7e154631fd1d81c1319ca682a9a422ad34dbc1ee261183066f94ac36d2ff6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_files_list_test_read_csv_files_list.with_filetexts_files_mod.with_pytest_raises_ValueE.dd_read_": {"doc_hash": "8bc7644c0df0e029afb8962aa56cbafc25e0720d18ccdfacbb8165206162cdd3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_include_path_column_test_read_csv_include_path_column.with_filetexts_files_mod.assert_2014_01_03_csv_i": {"doc_hash": "96fddad54b8eb15cab1a19b9371b31ae8f59c4d362550f42b38388ce657af917"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_include_path_column_as_str_test_read_csv_include_path_column_as_str.with_filetexts_files_mod.assert_2014_01_03_csv_i": {"doc_hash": "ba6a44ef536fe1808b9505163f4148ac34a08f5ac6279e8e56771993bfcc4677"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_include_path_column_with_duplicate_name_test_read_csv_include_path_column_is_dtype_category.with_filetexts_files_mod.None_3": {"doc_hash": "d14c667add824aa384bf910e1c67098503c33f09a70a935c891005c98d191cb5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py__After_this_point_we_te_test_read_csv_index.with_filetext_csv_text_a.assert_eq_result_expecte": {"doc_hash": "2f254672417eedc89956ff54be7baaa08d9d113517a61549b06ced024d38c237"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_skiprows_range_test_consistent_dtypes.with_filetext_text_as_fn.assert_df_amount_compute_": {"doc_hash": "254c1f5fff81bf7c1394d1cbeeb5e0c578bf8b188ddcff6513e4bb928b794e75"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_consistent_dtypes_2_test_consistent_dtypes_2.with_filetexts_foo_1_cs.assert_df_name_compute_": {"doc_hash": "17bd19e311ae294e7d3627f196525c86c2cb3a5b01c9926266fd632343e81153"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_categorical_dtypes_test_categorical_dtypes.with_filetexts_foo_1_cs.assert_sorted_res_fruit_c": {"doc_hash": "859864e9c8128041505314ae4873b132be36217686007e272c84750d8bcb4eb0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_categorical_known_test_categorical_known.with_filetexts_foo_1_cs.None_10": {"doc_hash": "1f5f3523daef1c3fee9974190e81187fcb5d1eccb7b773ec607087fc4486ef7d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_compression_multiple_files_test_compression_multiple_files.with_tmpdir_as_tdir_.assert_len_df_compute_": {"doc_hash": "54fb7cc1d40681153a16453843258389ad6e9d7ee8c33f0fe4569f607b168ff5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_empty_csv_file_test_read_csv_sensitive_to_enforce.with_filetexts_csv_files_.assert_a__name_b__name": {"doc_hash": "4f2f529a78c5b8eb66fdd5e9bd7e3c022954d85122ee2e56c9338cfeffc8823c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_compression_test_read_csv_compression.with_filetexts_files2_mo.assert_eq_": {"doc_hash": "c3feb7a1c6418dcf30ad8063554f06a37efa63cc9c49ae1eb769f9e37e1056d9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_warn_non_seekable_files_test_warn_non_seekable_files.with_filetexts_files2_mo.with_pytest_raises_NotImp.with_pytest_warns_UserWar.df.dd_read_csv_2014_01_cs": {"doc_hash": "09ca2cb0dc7193a3760fd9f2553d40528aafedd5ea807b56f9088577f64100ea"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_windows_line_terminator_test_windows_line_terminator.with_filetext_text_as_fn.assert_df_a_sum_compute": {"doc_hash": "2cbf202a53ad3f185135002da31adb4ff6d42351dce90e8467b8fc3f9c03d569"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_header_None_test_auto_blocksize_max64mb.assert_isinstance_blocksi": {"doc_hash": "f448cce687d6eb5bedb18323d7f66c7a6e28f7caa229f25313a7bdf770b5a1ae"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_auto_blocksize_csv_test_auto_blocksize_csv.with_filetexts_csv_files_.None_1": {"doc_hash": "c8132450184938802e0cf17992e9ce67979d7a71628f86556d73eb9411e0e86c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_head_partial_line_fix_test_head_partial_line_fix.with_filetexts_files_.assert_df_dtypes_i8_": {"doc_hash": "d7c4d45e81a9661e558ad8d9b371262b8234d5eb5473664b9cbf473e2fad9320"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_raises_on_no_files_test_read_csv_of_modified_file_has_different_name.with_filetext_csv_text_a.assert_sorted_a_dask_key": {"doc_hash": "59ccdc60b29b09dd123482243c8265ed9fea6d8e53ed9e379e5c6a1d6ef68711"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_late_dtypes_test_late_dtypes.date_msg._": {"doc_hash": "041a4c6431059e42c4d97badcd69b929741b2b33a2441b216022ffbcba9bf99e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_late_dtypes.with_filetext_text_as_fn_test_late_dtypes.with_filetext_text_as_fn.assert_eq_res_sol_": {"doc_hash": "4e56d147f95767dc9bbe23c2008bccf47c5c0b186479e719e802c20508fd398d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_assume_missing_test_assume_missing.None_3.assert_df_numbers_dtype_": {"doc_hash": "27c8b3f748b3c5f18b6354d9a5e5fb2ce0614167e9cba7796f007b8bce10ebb7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_index_col_test_read_csv_with_datetime_index_partitions_one.with_filetext_timeseries_.None_1": {"doc_hash": "0b41e95d6faefe487982510b9e2b6cbdf4b189d8d6e7bf4777bac10d2902f3f6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_with_datetime_index_partitions_n_xfail_pandas_100.pytest_mark_xfail_": {"doc_hash": "25c3845cfdf65d016c04ff13dac3edee7ba4ea3ddd38d2ef6ad06a86bbb19d76"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_encoding_gh601_test_encoding_gh601.with_tmpfile_csv_as_f.assert_eq_d_a_": {"doc_hash": "e6cb4d919d8fc629ba854bde0e283eb5233e291dc51383b7de8e2eafbd5dd3b8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_header_issue_823_test_none_usecols.with_filetext_csv_text_a.assert_eq_df_pd_read_csv": {"doc_hash": "5fa628a3a100f2efe30f65e3a4e1202548a57e5947120a17663a1ec245e4b4af"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_parse_dates_multi_column_test_parse_dates_multi_column.with_filetext_pdmc_text_.assert_len_df_len_ddf": {"doc_hash": "1405ca82e589fda0f27671ac4af0929aeb5e6c783fe7ba4fb35f360950aa6140"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_sep_test_robust_column_mismatch.with_filetexts_files_mod.assert_eq_ddf_ddf_": {"doc_hash": "8182556cfe4cf4fa9f65106f159273e0e3c5f3299891922093eb57b9cf73a618"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_error_if_sample_is_too_small_test_error_if_sample_is_too_small.None_1.assert_eq_": {"doc_hash": "0b848cbb43d12ce69b3f3d597c9095d7a90bf7009f6a7c9e6de73f0dab700789"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_names_not_none_test_read_csv_names_not_none.with_filetext_text_as_fn.assert_eq_df_ddf_check_": {"doc_hash": "81e178b2138ca454ada2c747390bf32ad391a4842007bf0954f6bae86a44636c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_None_6_test_to_csv.for_npartitions_in_1_2_.None_2.assert_eq_result_df_": {"doc_hash": "cca543f7eda01854bab286b95b678f082b142fef232eea4caa0429e0507500f4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_multiple_files_cornercases_test_to_csv_multiple_files_cornercases.None_3.assert_eq_result_df16_": {"doc_hash": "0c8f568d7a89331c5619fca00112b0eddb9042ae54ff468e0f2747ab24821f65"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_single_csv_test_to_single_csv.for_npartitions_in_1_2_.None_1.assert_eq_result_df_": {"doc_hash": "25c7bf2c76f6f309f0761a7627a2c1dc6ff062c414f7548babcdc9fda8643061"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_single_csv_with_name_function_test_to_single_csv_with_name_function.with_tmpdir_as_dn_.with_pytest_raises_.a_to_csv_fn_name_functio": {"doc_hash": "9a816e0953c3a3f0ea0c05096b99e02bc4c3e3115a2fb716a19ceb6ee251d015"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_single_csv_with_header_first_partition_only_test_to_single_csv_with_header_first_partition_only.with_tmpdir_as_dn_.with_pytest_raises_.a_to_csv_": {"doc_hash": "3568b22c9af8bb47f5310c6bdafd2b9d65dce062d7074b467b7399333c27b61c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_single_csv_gzip_test_to_single_csv_gzip.for_npartitions_in_1_2_.with_tmpdir_as_dn_.assert_eq_result_df_": {"doc_hash": "0e7d9d730c78923920420298a51c4fe278a9f1de03830002d61892953816a473"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_gzip_test_to_csv_gzip.for_npartitions_in_1_2_.with_tmpfile_csv_as_fn.tm_assert_frame_equal_res": {"doc_hash": "62f749b6282e253a4a33b6fbe649a503a1e0a716f9340768d6055f44d3d552f2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_nodir_test_to_csv_nodir.assert_result_x_values_": {"doc_hash": "73b3fcf06bdaa45605667b1cb64dea779c99065264af2e39dcb87ed3fe2a42d4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_simple_test_to_csv_simple.assert_result_x_values_": {"doc_hash": "d225527be6de23e9992866334e1f1699f4984418fa4c48d5a993660c65ae7bfc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_series_test_to_csv_series.assert_result_x_df0_": {"doc_hash": "06a05c0b9be00e21b7bfbe78e1f1c2d6cbb4a677d601cd15fdb9a7c36451416c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_with_get_test_to_csv_with_get.with_tmpdir_as_dn_.assert_eq_result_df_che": {"doc_hash": "197aa5a0dd4c146361730b3a0ebc4b1abc39a268031436d6959f6121a9dd5b36"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_warns_using_scheduler_argument_test_to_csv_warns_using_scheduler_argument.with_tmpdir_as_dn_.with_pytest_warns_FutureW.ddf_to_csv_dn_index_Fals": {"doc_hash": "c1d913122dbac9d1696895528033556eaa6cd623ec9b374b913437370ef1b150"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_errors_using_multiple_scheduler_args_test_to_csv_errors_using_multiple_scheduler_args.with_tmpdir_as_dn_.with_pytest_raises_ValueE.ddf_to_csv_": {"doc_hash": "0327914c997849bac56e4acc98ea4dd07c25435540a91cd94ee40d501c672940"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_keeps_all_non_scheduler_compute_kwargs_test_to_csv_paths.os_remove_foo1_csv_": {"doc_hash": "cd9c462adc46350e38894c1b61fb0486b7fef47e168f1d863f57c90119e99c98"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_header_empty_dataframe_test_to_csv_header_empty_dataframe.with_tmpdir_as_dn_.os_remove_filename_": {"doc_hash": "5afc7cc035f0ada18eb517ddecf96054959eab5cdf8c92b47af1a89cfbfa2c14"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_header_test_to_csv_header.with_tmpdir_as_dn_.None_2": {"doc_hash": "30f5faa2a77e803db57a5cb9c58a1918ac094906edab5d34237028c46aa7abae"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_line_ending_test_to_csv_line_ending.assert_raw_in_expected": {"doc_hash": "3add9e8f49b6e48571ebf2d759f83ad42e06c2a5fa6f124f404d5db868bcba58"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_block_mask_": {"doc_hash": "f0eca8a48404bbdbf477eaedc3a578362e09e17f5dd3443e918064a1c09a6d05"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_demo.py_pd_test_make_timeseries.assert_a__name_e__name": {"doc_hash": "a2a61adfa0a41c3c3e95ed1b8536fa8dc4d878618ed06ea15fbe92d6084ad42b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_demo.py_test_make_timeseries_no_args_test_daily_stock.assert_eq_df_df_": {"doc_hash": "dde638262f8dd20b1319d9bc9f4d38b279ecf77f9e5f5b0891f02f65493ccce4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_demo.py_test_make_timeseries_keywords_test_make_timeseries_keywords.assert_1_bb_100": {"doc_hash": "410f7c58e6f3544221b48b0686017ba5899c8d2d6c89c7e05ec8c2f55cbfe4cd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_demo.py_test_make_timeseries_fancy_keywords_": {"doc_hash": "4535abc0f00d9c76f13db17a335fd570563e3859f8990a0fb70614a1bf1b0a3f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_from_distutils_version_im_test_to_hdf.None_3.tm_assert_frame_equal_df_": {"doc_hash": "d4a22e61869588c4f86611e382c567561ae4c57508cfeb961713325888ab4a7d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_hdf_multiple_nodes_test_to_hdf_multiple_nodes.None_3.with_pd_HDFStore_fn_as_h.assert_eq_df16_out_": {"doc_hash": "0cc988530ec9d7da710b1576a773ede890f33dc638ef5baca31185154e08d038"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_hdf_multiple_files_test_to_hdf_multiple_files.with_tmpfile_h5_as_fn_.with_pd_HDFStore_fn_as_h.assert_eq_df_out_": {"doc_hash": "535703411b6ec05dd05dcba74c3c3249808e8394a9c812570194507a426798fd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_hdf_modes_multiple_nodes_test_to_hdf_modes_multiple_nodes.None_4.assert_eq_df_append_df_": {"doc_hash": "0bd538a691c8867ae846ad586c30b36222b071614782ed6bc7eac1218bff0415"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_hdf_modes_multiple_files_test_to_hdf_modes_multiple_files.None_3.assert_eq_df_append_df_": {"doc_hash": "adf09f842be5bb9b50d0d98b51718fdfb226fac73f8bc91e628f902c96991695"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_hdf_link_optimizations_test_to_hdf_link_optimizations.None_2.assert_dependency_depth_d": {"doc_hash": "5a31857f643fc4d8b33ebb56aec2d9e9bb7d87228288c9f255de4bb4c8f9542e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_hdf_lock_delays_test_to_hdf_lock_delays.with_tmpdir_as_dn_.assert_eq_df16_out_": {"doc_hash": "6c3118e2dea77def85299dccd52a528158208524267239dea634c5fe5e41aa39"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_hdf_exceptions_test_to_hdf_exceptions.with_tmpfile_as_fn_.with_pd_HDFStore_fn_as_h.with_pytest_raises_ValueE.a_to_hdf_hdf_data____": {"doc_hash": "c170370377e125edc70aec091ab129a5fa4af7c1b9bebb3cad6bb3bfdccba68c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_hdf_schedulers_test_to_hdf_schedulers.None_2.assert_eq_df_out_": {"doc_hash": "c3580870a37dad63a2a957de5f710e946affd160fe22027a56cb89c461b05938"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_hdf_kwargs_test_to_hdf_kwargs.None_1.tm_assert_frame_equal_df_": {"doc_hash": "a3b9270a49d01576fad8d05a684dae48f1413ad484cc4539f5345472e9a0c1d6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_fmt_warns_test_to_fmt_warns.with_tmpdir_as_dn_.with_pytest_warns_None_.a_to_csv_fn_name_functio": {"doc_hash": "201f6d37b23760948cb84688ef520e6ce60dfb5fb088c8722235a8e56b5ac32a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_read_hdf_test_read_hdf.None_2.compare_a_compute_sort": {"doc_hash": "667c9010e84dbb02666a651de4ce00fa34f56b9c81879e81ea0fb26121d7dfd7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_read_hdf_multiply_open_test_read_hdf_multiply_open.with_tmpfile_h5_as_fn_.with_pd_HDFStore_fn_mode.dd_read_hdf_fn_data_": {"doc_hash": "5e5beab4adafb5eea6912a37ef7d5938599c145699b72ad20816e1d803c9a340"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_read_hdf_multiple_test_read_hdf_multiple.with_tmpfile_h5_as_fn_.assert_eq_a_r_": {"doc_hash": "cd8c01976d1c28fd656c85af1a78691422453d481924e8b996ed2fa27ad10c13"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_read_hdf_start_stop_values_test_read_hdf_start_stop_values.with_tmpfile_h5_as_fn_.None_2.dd_read_hdf_fn_data_": {"doc_hash": "8aed0cfece1c2f6a1c52bd73a577e186a0ab103082b156beb72c5f67ab38c612"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_hdf_globbing_test_hdf_globbing.with_tmpdir_as_tdir_.with_dask_config_set_sche.None_4": {"doc_hash": "bcf94ea4a7ac2d9b22ebe9dcf6567298abc45b24e8fabfc4c9f2a76e9813d696"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_hdf_file_list_test_hdf_file_list.with_tmpdir_as_tdir_.with_dask_config_set_sche.tm_assert_frame_equal_res": {"doc_hash": "146f5757cca3915e6ae026e0bbfa97a28300b6874d5702514210f070115c949a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_read_hdf_pattern_pathlike_test_read_hdf_pattern_pathlike.with_tmpfile_h5_as_fn_.assert_eq_res_df_": {"doc_hash": "7fb67657ecd4b6f5db86649b3d46f0bd631e045abd8969b5119f2ecd49992722"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_hdf_path_pathlike_test_read_hdf_doesnt_segfault.with_tmpfile_h5_as_fn_.assert_len_ddf_N": {"doc_hash": "6eb0ffbb91625c289b7c1ef0bbaeaaca478fb224f07385ef523afbea768d6ed1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_hdf_filenames_test_hdf_filenames.os_remove_foo1_hdf5_": {"doc_hash": "0419e00f47fd4ac512080ef1d550a248f7f3730d21e2191f0ca5edab2d187a95"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_hdf_path_exceptions_test_hdf_path_exceptions.with_pytest_raises_ValueE.dd_read_hdf_tmp_": {"doc_hash": "c6fd0ba3cb06244fe2a2a9ed4561f83e9d426bf5c33c8fd7b968ad912475e8ae"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_hdf_nonpandas_keys_": {"doc_hash": "2a349f28e60822c997f892d2033847546198201f8ccd12022c0a2a044e2728af"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_np_test_meta_from_array._Should_be_5_partitions_": {"doc_hash": "f6fe44db155cc0777bd05578cb53ed198ccbb4e98a3996593c49f82bea2dd270"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_meta_from_1darray_test_meta_from_1darray.with_pytest_raises_ValueE._meta_from_array_x_colum": {"doc_hash": "5816f4ece05c8a493d0f64cd896ccc6b9bea8e3ff4f645d55f3bd7db083c369e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_meta_from_recarray_test_meta_from_recarray.with_pytest_raises_ValueE._meta_from_array_x_colum": {"doc_hash": "79c000be533d704fd616c415f193d3ce0256b9496f4af54d0df3d32ab658fa7b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_array_test_from_array.with_pytest_raises_ValueE.dd_from_array_np_ones_sha": {"doc_hash": "6641183919342883c173e3284c08f91b5c450725ef49a1993007d3d1b25a5779"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_array_with_record_dtype_test_from_array_with_record_dtype.assert_d_compute_to_re": {"doc_hash": "a2fa16e2163010f2ba0a4f1dd58055719fbf24914e9dc2020369d8042ef877a0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_bcolz_multiple_threads_test_from_bcolz_multiple_threads.pool_map_check_range_5_": {"doc_hash": "66f58cb2419ecb867fd733832da9a89ae1d6e4df278c497a1e3aedbcf9b02ae4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_bcolz_test_from_bcolz.None_8": {"doc_hash": "f80df9b0fcf8f38ea948830986118b573f303a2ed0add98573b1d1bcd80d92f6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_bcolz_no_lock_test_from_bcolz_no_lock.assert_not_any_isinstance": {"doc_hash": "d540b5bec2caeaef40a07507b7c0330713163f24e7795329d691d754a30adb3a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_bcolz_filename_test_from_bcolz_filename.with_tmpfile_bcolz_as.assert_list_d_x_compute_": {"doc_hash": "8e76d4467ba8a1bb9a6badbbe98524ee2bcefc467141a9f1c3497f84c927ad80"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_bcolz_column_order_test_from_bcolz_column_order.assert_list_df_loc_0_com": {"doc_hash": "ca289409253147a90c0a74907210c6a4a1a660135cd6bcaba0eeee5be1310a74"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_pandas_dataframe_test_from_pandas_dataframe.None_1": {"doc_hash": "af67469e63a30ce7089997a9820b25477ec0aad1f585e76ef5a0f4de242548a9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_pandas_small_test_from_pandas_small.for_sort_in_True_False_.for_i_in_0_2_.assert_eq_s_ds_": {"doc_hash": "be41fa43548ea185034aafe429b11cf1da579fd0cfb8129e53ac8669485705a9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_pandas_npartitions_is_accurate_test_from_pandas_npartitions_is_accurate.assert_dd_from_pandas_df_": {"doc_hash": "ff6de579b1fdaf8845bf182a86cdd2b674ef0c1893255c26a70c74d66958d4e2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_pandas_series_test_from_pandas_series.None_1": {"doc_hash": "da7fc0515365b02bfe004b682852416e5a51689bd105ef54f1932e932dc6c834"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_pandas_non_sorted_test_from_pandas_single_row.assert_eq_ddf_df_": {"doc_hash": "4579b36733badea73721e512694c2318fbaa1de1b6e7916d94284690dd0f8f3d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_pandas_with_datetime_index_test_from_pandas_with_datetime_index.None_1": {"doc_hash": "350e79d60973980d011b50c60be56c237d070271d03e0a581f3fb20fd52d48d7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_DataFrame_from_dask_array_test_DataFrame_from_dask_array.assert_df2_divisions_d": {"doc_hash": "1ad22f27e9bfddfdd5181a281604ce89ee6f3a5575426c324caf9250835a753c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_Series_from_dask_array_test_Series_from_dask_array.assert_eq_ser_ser2_": {"doc_hash": "8d53ff6d6675a07c91b34b771881da6e36e950552907161e081f7bcebeabffd9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_dask_array_index_test_from_dask_array_index_raises.assert_m_match_4_2_": {"doc_hash": "0071a5526a1c86ca5cad5b1060aa776568b011c0887ab244a9f283d8d5afd0a6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_dask_array_compat_numpy_array_test_from_dask_array_compat_numpy_array.None_3": {"doc_hash": "4f63048360e2b532d123f9f743fe5cbf9eed66b91e14167b1b4d80b985c17703"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_dask_array_compat_numpy_array_1d_test_from_dask_array_compat_numpy_array_1d.tm_assert_index_equal_d2_": {"doc_hash": "f6b44ba2b3dd2f7faa4a24c89fe32c213f430a1651d65fcd7c4b656b9a111898"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_dask_array_struct_dtype_test_from_dask_array_struct_dtype.assert_eq_": {"doc_hash": "5da6e8c7b0fc83af9a8c66c0827f58937b7d779af87886f6bc65407af1dca4f7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_dask_array_unknown_chunks_test_from_dask_array_unknown_chunks.with_pytest_raises_ValueE.df.dd_from_dask_array_dx_": {"doc_hash": "36ee3c206f18be49f47b69d9b5605988b9b6b7d721f57c0363d2d89fdd392704"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_to_bag_test_to_bag.assert_ddf_x_to_bag_com": {"doc_hash": "ee1c70da4ff1370e98f8a93e1c48a85539392291e5d4834f8cfe95ee9d1f4d8c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_to_records_test_to_records.assert_eq_df_to_records_": {"doc_hash": "f9e0d54d8552252beb9ceafa2bc8b68f374ba1497c642798694aa7681f350d9b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_to_records_with_lengths_test_to_records_with_lengths.assert_result_chunks_e": {"doc_hash": "91fcbb14b7fc071f353cf1c25bbd47b0ca5afa7ad0928d3375a8f89f8cd9769a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_to_records_raises_test_to_records_raises.None_1.pytest_fail_Unexpected_v": {"doc_hash": "1adba1988b7b45de9957b2934a7fded69436b7f16483a8639e2cdae83edd9d36"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_delayed_test_from_delayed.assert_str_e_value_start": {"doc_hash": "742cbc5a6dd733b2f7a6848f94b4923ea8b9d50a3676289b38a8e4f7b81e073e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_delayed_misordered_meta_test_from_delayed_misordered_meta.assert_msg_in_str_info_va": {"doc_hash": "719419cf9fb1b3f7aa57a8209e7e0b71a9218e4b4d6d0cac0493286765e08df3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_delayed_sorted_test_to_delayed.assert_eq_dx_compute_x": {"doc_hash": "06f807281c9b4a7338413d10ea0fdb71bdb396e7894603e10d69959c0ab2ecc8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_to_delayed_optimize_graph_test_to_delayed_optimize_graph.assert_eq_dx_compute_d": {"doc_hash": "f57fe8c3894174ffecc69a4f5cb5132bbff17b5ddbb75208408578428a7ac03d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_dask_array_index_dtype_": {"doc_hash": "2e65929284817857929b3971ef6b1dbaccf765d68b1d48be13ecef82bb4108b8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_json.py_json_test_read_json_basic.with_tmpfile_json_as_f.assert_eq_out_df_": {"doc_hash": "319f6bc845c5a8b2923f4069dfcf49bdc436250b4460d585d9c222a8235a62d6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_json.py_test_read_json_fkeyword_test_read_json_fkeyword.with_tmpfile_json_as_f.assert_eq_actual_actual_": {"doc_hash": "c4314d6bcd411f2cf9c8172061d874e7f566f02bb8322604f6561d022d970e26"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_json.py_test_read_json_meta_test_read_json_meta.if_orient_records_.assert_eq_res_sol_check": {"doc_hash": "adcf274b5220fd27dc39c57280e17cd4e4de3e7e78b9bfdb88f65e4be388b2d8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_json.py_test_write_json_basic_test_to_json_with_get.with_tmpdir_as_dn_.assert_eq_result_df_che": {"doc_hash": "a1d6eee2b3cca032ce5ae0dcdbc1639647f642247046b01d96b79f89fb8fe09c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_json.py_test_read_json_error_test_read_chunked.with_tmpdir_as_path_.assert_eq_d_df_check_in": {"doc_hash": "b2934c354be5512d517a59ade7c7f2f4ebb669db24f9da59492b7a24d02e0549"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_json.py_test_json_compressed_": {"doc_hash": "2c6069f7995920380f444871fab2cafb4e958d5f966378a2379534b0f9507eba"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_orc.py_os_": {"doc_hash": "23f8afba464b8a26972dc8211cd1c90850a4eb95602a6756f6007ac571a1c55d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_math_engine.return.request_param": {"doc_hash": "d97cbca1a90d9084db17476557d4c72fe00f8928137fcc10c50b4972299e6fb8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_write_read_engines_write_read_engines.return.pytest_mark_parametrize_": {"doc_hash": "369e43376144030b4e847f680193650215f0f7b07483c51148416aa6f6bae219"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_pyarrow_fastparquet_msg_test_local.for_column_in_df_columns_.assert_data_column_o": {"doc_hash": "48359775d3ad4c1a739d2d94162ccfa80dd3c541d756f2a82640a75bfe8ba6a6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_empty_test_empty.assert_eq_ddf_read_df_": {"doc_hash": "257501b3215af90d67e723c45f2eca38fb05e42421c0a580b0724e4e6e4dd772"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_simple_test_simple.assert_eq_ddf_read_df_": {"doc_hash": "00dd8fe1b9551cc03550ec0e25eb69b5c9be3997375423fa560c8055c9042111"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_delayed_no_metadata_test_delayed_no_metadata.assert_eq_ddf_read_df_": {"doc_hash": "fca3091abe076577b532627923e9cac441fe961bfbf874bf832f8ec74b8190c9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_glob_test_read_glob.assert_eq_ddf_ddf2_": {"doc_hash": "995af5297713a30e55a0e7f0843379eb5268dd939178d94c110567ac120e06d2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_list_test_read_list.assert_eq_ddf_ddf2_": {"doc_hash": "2f37b160ee46615ac8cb8c7b63c461c4c3ed6c72722ace91c1d03a0b6e60c663"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_columns_auto_index_test_columns_auto_index.None_4": {"doc_hash": "900148a0cd4fb700f6d127a8d3644e137ce86849af888a0aa4a0ebb4a1e843c4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_columns_index_test_columns_index.None_6": {"doc_hash": "3dfad27c6e0eff1307f85c54f513c5bbcd72339cdba204ecb9f8032915a810f1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_nonsense_column_test_gather_statistics_no_index.assert_not_df_known_divis": {"doc_hash": "c7cdff9ed955fb2b413e07d137f681691513d5137305bb7a1b581e1c7c0dc983"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_columns_index_with_multi_index_test_columns_index_with_multi_index.for_ind_col_sol_df_in_.assert_eq_d_sol_df_col_": {"doc_hash": "2ce599a6cfdb3124237a9fd194620e0f0a79e183bd1c7723e0f24e91aea0c354"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_no_index_test_read_series.assert_eq_ddf_x_ddf2_": {"doc_hash": "2bc7236bcac1d3943a5a8d27b03b0c0eb294c7fb3226021c9cd82d76b31b2547"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_names_test_names.assert_set_read_fn_colum": {"doc_hash": "c396434a38ad94facedee8062484f3ef4424990ef3b83d34b49202cfab9ef072"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_roundtrip_from_pandas_test_roundtrip_from_pandas.assert_eq_dfp_ddf_": {"doc_hash": "b50186ef25e79dd6677f0958233fbc1d1106874c3f47ab6d46748d8e8a785474"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_categorical_test_categorical.assert_df_x_ddf2_x_a": {"doc_hash": "63795c396292c3fc5a69bfc2a01519ff47e97138572a54e872b0faad6bfb43dd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_append_test_append.assert_eq_df_ddf3_": {"doc_hash": "a0725e76a6efa9e479cfa30372df449e4ec5e859e0b2e5e13ec7618a55e3da8c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_append_create_test_append_create.assert_eq_df_ddf3_": {"doc_hash": "5f53858500fd21a8c93dbe1d16b2a9b234cb5920d5dfead8c580947db18f0ce6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_append_with_partition_test_append_with_partition.assert_eq_": {"doc_hash": "389cbd522ca2e7071b232f80e51452101d60a2fd76cb6b2ff87a1cc514a84e47"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_partition_on_cats_test_partition_on_cats.assert_set_df_b_cat_categ": {"doc_hash": "d8023e805d3b2fe448a51cae3eff4a65513c0d03f9691310d37b0027b2ab0690"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_partition_on_cats_pyarrow_test_partition_on_cats_pyarrow.assert_set_df_b_cat_categ": {"doc_hash": "3c7076d81f03450a9bfb25f8b8d2906694305c22840757d15d1b4615d4599821"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_partition_on_cats_2_test_partition_on_cats_2.assert_set_df_cat_categor": {"doc_hash": "f3547f88ee189a242d2ff788b75a14be3bb60e7b63e88b1bdb09364a3f1b2d07"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_append_wo_index_test_append_wo_index.assert_eq_df_set_index_f": {"doc_hash": "f47c802635fb5365bef246d7c38ed058c07e3408b2b7bb851401095c1d8d7679"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_append_overlapping_divisions_test_append_overlapping_divisions.ddf2_to_parquet_tmp_engi": {"doc_hash": "c114d364f87801cb1c6d20a08b04fadf576bd2b88b9f88d3b9b7f763ad427b24"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_append_different_columns_test_append_different_columns.assert_Appended_dtypes_": {"doc_hash": "1fce1bc967f5b030ffbd710f0a7e974d01e6f03f2f0ca17f0594c6d1fd38dbe2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_ordering_test_ordering.assert_eq_ddf_ddf2_chec": {"doc_hash": "341e9fb94c254f44729adb84635ea7f06741f184d48c377e7527a39004dec84c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_parquet_custom_columns_test_read_parquet_custom_columns.assert_eq_df_f_i32_": {"doc_hash": "9d595b451cdd67f4aa7452be6faef1564cbb32679d5063e0b39fb45b85e800f5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_roundtrip_test_roundtrip.assert_eq_ddf_ddf2_": {"doc_hash": "7695ef5eb9faea63e7494cc6f52f547c40261ffe71d3140a2f91b3c3664d4818"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_categories_test_categories.with_pytest_raises_Value.ddf2.dd_read_parquet_fn_categ": {"doc_hash": "b60564999fbd822299bc9085345a36c228fbc0f82c82e6cb45181a46029df34d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_empty_partition_test_empty_partition.assert_eq_sol_ddf3_chec": {"doc_hash": "6cebf1cb52066117c97e9749a5599e9f647183f7dc80f2a5e925f53da4f2057f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_timestamp_index_test_to_parquet_default_writes_nulls.assert_table_1_null_coun": {"doc_hash": "c76278284859c2742a0058149948eeda40de914ba889cdf38790ec6cd74b56b2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_to_parquet_pyarrow_w_inconsistent_schema_by_partition_fails_by_default_test_to_parquet_pyarrow_w_inconsistent_schema_by_partition_fails_by_default.with_pytest_raises_ValueE.None_1": {"doc_hash": "b362b43b1b875a6deeb50b9b22afb4fdaec23491e69447d45eba325e41523c75"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_to_parquet_pyarrow_w_inconsistent_schema_by_partition_succeeds_w_manual_schema_test_to_parquet_pyarrow_w_inconsistent_schema_by_partition_succeeds_w_manual_schema.None_1": {"doc_hash": "2fb1ffb29389d3bf19883156faeba326e71578c38b8e12d73853814d35ba448f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_pyarrow_schema_inference_test_pyarrow_schema_inference.if_index_and_engine_f.else_.assert_eq_df_df_out_": {"doc_hash": "7eb9de1056cef18ac72a474adefbbef9a644b0140180148abe8c49ecf98a2457"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_partition_on_test_partition_on.for_val_in_df_a2_unique_.assert_set_df_b_df_a2_": {"doc_hash": "4cc22aa0796d433bc8a82bc042fcceb9297f0ba4054ac11a360b3e0028841a88"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_partition_on_duplicates_test_partition_on_duplicates.for_root_dirs_files_in_.for_file_in_files_.assert_file_in_": {"doc_hash": "1e3d700d2f55c4629cf813dc990e6042f106d1da7e60f53c87a75ca930d80897"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_partition_on_string_test_partition_on_string.for_val_in_df_aa_unique_.assert_set_df_bb_df_aa_": {"doc_hash": "ca7fae00d56908e7de6d3a4189f71e25e7e363f9c5a8059d0b0b7653e9f6baf4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_filters_categorical_test_filters_categorical.assert_len_ddftest_read_": {"doc_hash": "d7c0d2d5cd120bd683a1e6becc1d4878e8f79eb28b29f6930cdae8f1b99b2cf0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_filters_test_filters.assert_e_x_2_e_x_": {"doc_hash": "2464046c1b78ede042a71c854ff50ea7bceb59778ac38594160dea3d5f1973dc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_filters_v0_test_filters_v0.assert_len_ddf2_0": {"doc_hash": "44dc537b585ad5c931dc6c9adee9178de5464248694368ac0a5d51f4a5f7627b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_fiters_file_list_test_fiters_file_list.assert_len_ddf2_0": {"doc_hash": "f722ae46db50bb22f58a23b362a3eb00f967a2ee30ec7d180a2885d1540639a2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_divisions_read_with_filters_test_divisions_read_with_filters.assert_out_divisions_e": {"doc_hash": "392639d60375401d83f551e7d1224f682b05d0c02cf097995bf7bc96fce46876"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_divisions_are_known_read_with_filters_test_divisions_are_known_read_with_filters.assert_out_divisions_e": {"doc_hash": "14879a7ebde170c7b48e1f26867709dcd2152a491faa479647cf92d5453ace19"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_from_fastparquet_parquetfile_test_read_from_fastparquet_parquetfile.with_pytest_raises_Assert.out.dd_read_parquet_pq_f_eng": {"doc_hash": "822be7de8aa6c624264aefa767421f4376e11503f9fdc6a7377cbb39b269915a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_to_parquet_lazy_test_to_parquet_lazy.assert_eq_ddf_ddf2_": {"doc_hash": "65d315ebfecb52b1c677ce4f92225c7c3003cf7d167c4918ec1ddfab4e5cc5f0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_timestamp96_test_timestamp96.assert_eq_out_df_": {"doc_hash": "7e06ac0b3a9a4c2f99646602850ea9b157dd812683063225f50c5b7c98aa30ea"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_drill_scheme_test_drill_scheme.assert_np_unique_out_dir": {"doc_hash": "9f12741efe0600b8d5893da5beb588a7f843f259a660bea87c2e83b960c3757d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_parquet_select_cats_test_parquet_select_cats.None_1": {"doc_hash": "fde3b9a5a2ef68837fa2c09126e20c33e1d58bb88ed197a9095b57388dd8fd65"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_columns_name_test_columns_name.assert_eq_result_df_": {"doc_hash": "308a927f5b75c5be11d73cc10bd8c2832e926f1f97f7c745c000fd2d5a7206bc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_check_compression_check_compression.if_engine_fastparquet.else_.for_i_in_range_metadata_n.for_j_in_range_len_names_.if_compression_is_None_.else_.assert_": {"doc_hash": "460299fdbe5be32db1ae526158adfbf7a536b4732cf13462225c6e2c3e34d100"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_writing_parquet_with_compression_test_writing_parquet_with_compression.check_compression_engine_": {"doc_hash": "289c80cee2c30a88c83e1e902ac317a3957cc7a443414eee1005c29b45f18dfd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_writing_parquet_with_partition_on_and_compression_test_writing_parquet_with_partition_on_and_compression.check_compression_engine_": {"doc_hash": "37276d2eea40dffd8e868288e7a74f79bc1fd87825189e39976c6ebf196035f9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_pandas_metadata_pandas_metadata.return.request_param": {"doc_hash": "fc479da81e5986846dc5e3d2994034b7b264fbfdab3a9b35362c9570db6be52a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_parse_pandas_metadata_test_parse_pandas_metadata.assert_isinstance_mapping": {"doc_hash": "2467e3e6a4782eec8730cd926d9e8ab992ba2644eb819d3a3fb3026aac31e9d6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_parse_pandas_metadata_null_index_test_parse_pandas_metadata_null_index.None_9": {"doc_hash": "09c63fcb7374f9562b856dbdede17e95fc03838fad32508fc4f1df5e3602e310"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_no_metadata_test_read_no_metadata.assert_eq_result_expecte": {"doc_hash": "2565b3b7df0826eb1815a127de310705d3c48ce5e6c602a739fc14b3e615b37c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_parse_pandas_metadata_duplicate_index_columns_test_parse_pandas_metadata_duplicate_index_columns.assert_column_index_names": {"doc_hash": "d7d87ad0c81e8ddfa3620b53d56e6e0bef1b9cfb2240919021297ecddf914aa1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_parse_pandas_metadata_column_with_index_name_test_parse_pandas_metadata_column_with_index_name.assert_column_index_names": {"doc_hash": "d789468f551f4530eef7a43d0b0250d7a08bd1ba62d1dc54564bfc93a6c67961"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_writing_parquet_with_kwargs_test_writing_parquet_with_kwargs.for_val_in_df_a_unique_.assert_set_df_b_df_a_v": {"doc_hash": "489395db80227833998214829ce5e0963779fb387b577272a2b3bdec74a1db2f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_writing_parquet_with_unknown_kwargs_test_to_parquet_with_get.assert_eq_result_df_che": {"doc_hash": "bb6f4b5870a31c11f716237099200cea6ea2577530241330169a66c84f63dbc1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_select_partitioned_column_test_select_partitioned_column.df_partitioned_df_partiti": {"doc_hash": "67a735cf5f9387241935ee0a7ea61aac10f04ded08cb0ae84bb0951dfc5eeeeb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_with_tz_test_with_tz.with_warnings_catch_warni.if_engine_fastparquet.assert_eq_df_df2_check_": {"doc_hash": "d24e4f4368b366d846dfc6f12fae3ebf96ce87375fecd17c1fe8402261cae6cb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_arrow_partitioning_test_arrow_partitioning.ddf_astype_b_np_float": {"doc_hash": "b9c279c99b943d024db5361ae81fc278f2798d4312abe1dba51a6ae56020638a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_sorted_warnings_test_sorted_warnings._still_may_have_some_arr": {"doc_hash": "9372daaf00cafcc2370c6831f4e2f0d12c2e3812c1330aedb4d4314dd4d9127d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_informative_error_messages_test_append_cat_fp.assert_d_x_tolist_": {"doc_hash": "7fdc1a0c9efe947ad589eb1a4f693338ce327c13807e7f35221dbe42f604b9c7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_roundtrip_arrow_test_roundtrip_arrow.assert_eq_ddf_ddf2_": {"doc_hash": "e4fecc4451ae6b97f5fef3bb22830fd00305bdfac8877f1e6098a4c079a7d1bd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_datasets_timeseries_test_pathlib_path.assert_eq_ddf_ddf2_": {"doc_hash": "60d5ffb40ca1ba155edf169a6a27373dc14ef143095530d71a8d46af93f72fa1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_pyarrow_metadata_nthreads_test_pyarrow_metadata_nthreads.assert_eq_ddf_ddf2_": {"doc_hash": "05302691fd22665c095efaca8e5575321692fd635c3688949f2bd6ed92ff218a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_categories_large_test_categories_large.assert_eq_sorted_df_name_": {"doc_hash": "abad7436e8111e000e5356bb57127fbb40b8390515d50b400e2f618bf0cf34ab"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_glob_no_stats_test_read_glob_yes_stats.assert_eq_ddf_ddf2_chec": {"doc_hash": "8d4bfff66ea43ed6c78206e42e7b41adff51853d6303679e69c8d718f7792f07"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_dir_nometa_test_read_dir_nometa.assert_eq_ddf_ddf2_chec": {"doc_hash": "689f7c32f835a37c29f30224160b6ec059615ccca951b78c74859a2814741ead"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_timeseries_nulls_in_schema_test_timeseries_nulls_in_schema.if_engine_pyarrow_an.with_pytest_raises_ValueE.ddf_read.dd_read_parquet_": {"doc_hash": "e3e0bbfbbbc6e9ac570b230b918b9c2de7ef97d0e73095ce6d5f68da285f99a2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_timeseries_nulls_in_schema_pyarrow_test_timeseries_nulls_in_schema_pyarrow.assert_eq_": {"doc_hash": "007458f199ae3385c4392e32b46c01619b88ad8de9217d7099c7189acff6dc93"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_inconsistent_schema_pyarrow_test_read_inconsistent_schema_pyarrow.None_4": {"doc_hash": "e31f3664ef08c9c41d4e99422fb47f8686df5c3e2a4a397ca6a5b9bcaaf773df"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_graph_size_pyarrow_test_graph_size_pyarrow.assert_len_pickle_dumps_d": {"doc_hash": "8e200e06668722ef39b60ad4e0e020c268ba3998455ab342067e8bca007a5f3a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_getitem_optimization_test_getitem_optimization.assert_eq_ddf_compute_opt": {"doc_hash": "525044200ae4a5cd3a2f23711b2499e242a9f878e836f9e3298609e712eb86d5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_getitem_optimization_empty_test_getitem_optimization_empty.assert_subgraph_columns_": {"doc_hash": "b8d6d2b32bf67de9ead815a25ea1419d4414b40214f77fc97d721d3372a7e35d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_getitem_optimization_multi_test_subgraph_getitem.None_2.subgraph_name_3_": {"doc_hash": "7b698c2939f062837ceb437a868a2ec6765ff81cfc9895a9032781effae55664"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_split_row_groups_pyarrow_test_split_row_groups_pyarrow.None_3": {"doc_hash": "8599985a92f88d85a0edc877bd360d7650299d0d524bfed7f2e79e9c97446d2c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_split_row_groups_int_pyarrow_test_split_row_groups_int_pyarrow.assert_ddf2_npartitions_": {"doc_hash": "8b0221c7a9628d7b0bf9fb91354234171aaef6146ebf32f111d17eea4addd36b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_split_row_groups_filter_pyarrow_test_split_row_groups_filter_pyarrow.assert_eq_": {"doc_hash": "d70e40afe16f180ab6ace9ec88edfaeb5343007898e2313c27362d35bb2aa2c5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_optimize_getitem_and_nonblockwise_test_optimize_getitem_and_nonblockwise.df2_a_b_rolling_3": {"doc_hash": "fa1379972ad103a48f58625646608e149e79b4f08df1ec925c4dbb768bdb276e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_optimize_and_not_test_optimize_and_not.for_a_b_in_zip_result_e.assert_eq_a_b_": {"doc_hash": "dfe86c48e50be5f8e45aa0e55c9f2758e8ae1a61d9831e71815de84cf8768289"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_chunksize_test_chunksize.if_not_chunksize_.else_.assert_ddf2_npartitions_": {"doc_hash": "f9505e4f8a71c5746da41df1e89b444b089c7d9a3f334a34defbce5aef475b78"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_roundtrip_pandas_chunksize_test_roundtrip_pandas_chunksize.assert_eq_pdf_ddf_read_": {"doc_hash": "cc0534dca939cb9a25664b46416f11f9b77b461ec2b44ba6421c4cb07619b85b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_pandas_fastparquet_partitioned_test_read_pandas_fastparquet_partitioned.assert_len_ddf_read_compu": {"doc_hash": "22bd67136ff0f421e25636e5f755a0ec4d175d8b1efb521c43686c9a90c2962a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_parquet_getitem_skip_when_getting_getitem_test_read_parquet_getitem_skip_when_getting_getitem.a_b_dask_optimize_ddf_": {"doc_hash": "02b1593cdd2277c860fe877025216e2f8b3a16da91f392c7ca139c250ece2d48"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_filter_nonpartition_columns_test_filter_nonpartition_columns.assert_df_read_time_ma": {"doc_hash": "270eca13cc9b0dcb1ccddebf205c943fdf8231ff33d65a68d2d4fdd81aef2d4d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_pandas_metadata_nullable_pyarrow_test_pandas_metadata_nullable_pyarrow.assert_eq_ddf1_ddf2_che": {"doc_hash": "e56b63e02bce6c503be9a0381b1b6e381d7e5c9468509885fab547dc691a4247"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_pandas_timestamp_overflow_pyarrow_test_pandas_timestamp_overflow_pyarrow.from_dask_dataframe_io_pa": {"doc_hash": "d933f29c76b8fee20252ad9b0886650c250e51fe5bcbe3c38c54371d9cae036d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_pandas_timestamp_overflow_pyarrow.ArrowEngineWithTimestampClamp_test_pandas_timestamp_overflow_pyarrow.dd_read_parquet_str_tmpdi": {"doc_hash": "06e3f8b1220fe4991fdf19d35ba9bdd1beba4e3d1edc5f347d2b47b77f5b42bf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_partitioned_preserve_index_test_partitioned_preserve_index.assert_eq_expect_got_": {"doc_hash": "3caf4b2e6327e47a4957fdc9ffa0396b5769ebd3012e69e27bc3a55d08fd2501"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_from_pandas_preserve_none_index_test_from_pandas_preserve_none_index.assert_eq_expect_got_": {"doc_hash": "0e02d2e1ccb31626c263d074b318bb0097851d3b481f5c701123c5d8bf7d6d79"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_from_pandas_preserve_none_rangeindex_test_from_pandas_preserve_none_rangeindex.assert_eq_df0_df1_comput": {"doc_hash": "59d2159c9690a411d249fc783870f9f9f12ab03446dd7236bd56120b173255e7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_illegal_column_name_test_illegal_column_name.assert_null_name_in_str_e": {"doc_hash": "aa26800d6ddd01c4c62c6b00df7a69b731f4f87250535aa394680d2f853bf7f3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_from_contextlib_import_co_db.with_tmpfile_as_f_.yield_uri": {"doc_hash": "999497aed48eaef2943fb225e2e2a33ac8d2d74ee0a941aefb7049989179bd41"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_empty_test_empty.with_tmpfile_as_f_.assert_pd_dataframe_empty": {"doc_hash": "b24886ea299503778d7f0a20341ce070c19e23b103227509e6d113be7415f525"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_empty_other_schema_test_empty_other_schema.engine_execute_DROP_SCHE": {"doc_hash": "969c92d07522933615eb5664468af8b11654fd7f28932ad6cc4bc72e339fa80e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_needs_rational_test_needs_rational.with_tmpfile_as_f_.None_4": {"doc_hash": "fbd40a5a602c5c0b952e15368c3ca40017a02d13025e93df7ef34dc04449e153"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_simple_test_npartitions.None_6": {"doc_hash": "2c15f86ff6eeec823996516ba5e30ed204ef49e38386d529bf20e36d36424ee9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_divisions_test_division_or_partition.assert_eq_out_df_": {"doc_hash": "691dceb07ba1b9a1cf9c89345e5d3bdde54971bb37d1ec75f2b2cc333646096a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_with_func_test_with_func.assert_d_index_d_ne": {"doc_hash": "cc692a8d02daa5bdcf8383b1a81d5af7be71e397311cd3c9830eacf6046492c0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_no_nameless_index_test_select_from_select.assert_eq_out_df_name_": {"doc_hash": "e8acbd48b2f699aeb1c4f970e2b15011ca409dc9b204860adb265dd8bf1d1743"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_extra_connection_engine_keywords_tmp_db_uri.with_tmpfile_as_f_.yield_sqlite_s_f": {"doc_hash": "aa8e41301ab6c88dc52ad97671e24a2945b1223a66b4670ded9d49d19984d7c9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_to_sql_test_to_sql.None_5.assert_actual_npartiti": {"doc_hash": "585eb593e113d6935a71451de552ce31b6504ef4d8a6ac64f3f4797ee7a2867c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_to_sql_kwargs_": {"doc_hash": "7e691ca4f68f46b83655ce3c3f2b3c5f89c66136068d558cc436582d9a727d35"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/utils.py_pd__get_pyarrow_dtypes.return.dtypes": {"doc_hash": "f5a8dc790ba399bde19d437eea4f3d1e91d416fbb68f2737c0548190ae66dd19"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/utils.py__meta_from_dtypes_": {"doc_hash": "82bdc0ca8b5054c15e88abed08c2d02f15b67b198a9a6b6c6d5f311a9b558b44"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/methods.py_warnings_try_loc.try_.except_KeyError_.return.df_head_0_loc_cindexe": {"doc_hash": "e3c1edaff84d0f890d610e14cb38db039133a6b571d54e06b5977bd224db3a5b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/methods.py_boundary_slice_boundary_slice.return.result": {"doc_hash": "1c05e71953dd75262617c5e1b92ec305d2462ac71ac64231dd2a2e47c1fe91a1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/methods.py_index_count_describe_aggregate.return.pd_concat_values_axis_1_": {"doc_hash": "3f9191d38e6f34a056ce63dc8ce97562b360fbeeb0fcab161f9ed87dbd0f0b60"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/methods.py_describe_numeric_aggregate_describe_numeric_aggregate.return.result": {"doc_hash": "6228498ac8e2427fe97014091a29647922199d3e04d8326c632f548cf27ac5b6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/methods.py_describe_nonnumeric_aggregate_describe_nonnumeric_aggregate.return.pd_Series_values_index_i": {"doc_hash": "f8ccc8653706d65444fe17c9de2a1a367a1ed980fabf27799aad74dd073af362"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/methods.py__cum_aggregate_apply_concat_dispatch.Dispatch_concat_": {"doc_hash": "6f53face46a64176580938b2af3b2ebb439e242c530f746ca3f7f99199b7727e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/methods.py_concat_concat.if_len_dfs_1_.else_.return.func_": {"doc_hash": "6456e2e383ff975cea0eef9396d87889f0474602c0719b4f66f9103bed571274"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/methods.py_concat_pandas_concat_pandas._Concatenate_the_partiti": {"doc_hash": "bd7286a5c7522a5aaa5a008884c3820976e1f7986fa87d01756d0ffb6788cc40"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/methods.py_concat_pandas.if__": {"doc_hash": "c260f05c4aa43e59f81bd65183779a943e234142433a00b3544ce1a9dcd60b81"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py___M": {"doc_hash": "7b6566b1475fbe2939bdd8867babd5956ba9655c958175239e51e2e0e376b9fe"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_align_partitions_align_partitions.return.dfs2_tuple_divisions_r": {"doc_hash": "f94c1da1d99c67c01bdf6f993283a3e5a8004e30d480f8758ecb0b4fc92980fc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py__maybe_align_partitions__maybe_align_partitions.return.args": {"doc_hash": "ae7d74c9a0947735e1b50de2a40790df7d631103997c5d779ea3fda0fca614ed"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_require_require.return.divisions_parts": {"doc_hash": "7312709933437668b0a2180ce502bd18e5c412bd415b4e3ff835c6464c4aef99"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_None_1_merge_chunk.return.out": {"doc_hash": "8f6cd87b1dff190268b7922418b6c632651eb12b399012acc54477be4498df3a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_merge_indexed_dataframes_shuffle_func.shuffle": {"doc_hash": "66d4531717cc08332052c6377494dd31355dafa140d88e56aa4ba5a948014202"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py__name_sometimes_conflict_hash_join.return.new_dd_object_graph_name": {"doc_hash": "c079871e270b4ace6d7fafd884778e7c6e196a0ca7a967acd55fa0d8a4c52aeb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_single_partition_join_single_partition_join.return.new_dd_object_graph_name": {"doc_hash": "b0a416e9820f04ad4616b389d6020c133f0a3797c5eebfb68cfc835ddcdda9b7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_warn_dtype_mismatch_warn_dtype_mismatch.if_all_col_in_left_column.if_dtype_mism_.warnings_warn_": {"doc_hash": "680da0b29348d1dad9f75487e8099d5bb1ac1b8b07e9aeceaa612485581f7d41"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_merge_merge._Both_sides_indexed": {"doc_hash": "cc8ca1d815c53d57ebd5e703b9a1df2404cd6d0ada3ab2745ef46431c789ca5e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_merge.if_merge_indexed_left_and_merge.if_merge_indexed_left_and.else_.return.hash_join_": {"doc_hash": "753728b324d368f5b33327f87652f9745c7abc3d7b142a74f055e40320c4787f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_None_5_compute_heads.if_by_is_None_.else_.return.suffix_reduction_most_rec": {"doc_hash": "88c7ed7730ed3b944e0034904712f3ad8c49bbf0875f27472658bde9163fec0a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_pair_partitions_pair_partitions.return.result": {"doc_hash": "b96d83e8ee219a2fd263cf1536b2e6e536d58ebaa8c2fe77a1c5e4ae8841d2a4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_merge_asof_padded_merge_asof_padded.return.result": {"doc_hash": "59c7b7d2447f0eea27137f5812d2dc6ca2939767a20d0124aac9ee8db63cea10"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_get_unsorted_columns_concat_and_unsort.return.pd_concat_frames_columns": {"doc_hash": "229b763988070ac73282144d71da2be65188093ceca5a578005e02b3a6b74a9c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py__concat_compat__concat_compat.if_PANDAS_GT_100_.else_.return._": {"doc_hash": "6bb528b0914f16769df85520a5f7d136e3126646ebeb282bf7b7d64108dba1af"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_merge_asof_indexed_merge_asof_indexed.return.result": {"doc_hash": "639db23966275c9a773befa97c5b22061f6fd413fb13e54d7d42f82656f5aa01"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_merge_asof_merge_asof.return.result": {"doc_hash": "324562d026294cc9d17b6f58b095d6e9ffa3cc1be50a89798d94273b4ec9bd15"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_None_8_concat_unindexed_dataframes.return.new_dd_object_graph_name": {"doc_hash": "90e7b0692ae895d622cdcc8f27ffdabcb60c55c29e45822b54a6466ad91c4227"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_concat_indexed_dataframes_concat_indexed_dataframes.return.new_dd_object_dsk_name_": {"doc_hash": "1b1d356dec52bdf9fab3577c36ba860259e3b417847890e36753a849dae88b62"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_stack_partitions_stack_partitions.return.new_dd_object_dsk_name_": {"doc_hash": "41918b67660aff004665339fe961fa0b6a676a1b1aa95058f2756d2acf2578ef"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_concat_concat._Concatenate_DataFrames": {"doc_hash": "c8b5a89d98f4e5727ed89917db5765554e8816c6af8aed260ed39391b376d92b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_concat.if_not_isinstance_dfs_li_": {"doc_hash": "6cddeb78124b0137d60d1496bafb9e20065b0c0cc8151d1e0ffa07684c402d61"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/numeric.py_pd_": {"doc_hash": "2a142b7c3b3a82348aedfd3bdc7614e913af6483c5b734f5be227c0333853f4a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/optimize.py__Dataframe_optimizatio_optimize.return.dsk": {"doc_hash": "ff50ab141777558c865694b16826dd0b97fe5690413847313c44bfaa72c8606c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/optimize.py_optimize_read_parquet_getitem_": {"doc_hash": "2935962ef90a3a35fca4a0832b067d933d5177916b7f953ef651a4fba360c631"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/partitionquantiles.py__Determine_new_partitio_math": {"doc_hash": "334bd96cea6d0b7a7d94a0f0816efa69518e4398c431281c8de674bd50775ab5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/partitionquantiles.py_np_sample_percentiles.return.qs": {"doc_hash": "b8713aea577f0da1fb3468867d5a3bc678c6e374658faee43f705f489e3e4d49"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/partitionquantiles.py_tree_width_tree_width.if_to_binary_or_num_group.else_.return.num_groups": {"doc_hash": "66eb22ff94aaf9d79bd9227029debb76d8f10504e0cb8147ffb0aaffd47ce808"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/partitionquantiles.py_tree_groups_tree_groups.return.rv": {"doc_hash": "d2c8499c9e970fc53d080a41e59138ba19efbc63c18fa5eaaaafab63ab3afde4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/partitionquantiles.py_create_merge_tree_create_merge_tree.return.rv": {"doc_hash": "8c952f3295fd53dd74ec1205ae3b85953b3916b8b4c1c0874c6156d4044e5242"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/partitionquantiles.py_percentiles_to_weights_percentiles_to_weights.return.vals_tolist_weights_to": {"doc_hash": "1209a14b73fbb306bbf65ded2f9faccd53d9b56363c218656a3f1b8b82624533"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/partitionquantiles.py_merge_and_compress_summaries_merge_and_compress_summaries.return.vals_weights": {"doc_hash": "ae6d2afab3ceba92bc3a7404513f9087ab0a0e8518b42d2ec8499e0ee946f90e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/partitionquantiles.py_process_val_weights_process_val_weights.return.rv": {"doc_hash": "219c560d1f844739dc56ba87e9fba5fab190be5507014a594ec593760846a853"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/partitionquantiles.py_percentiles_summary_dtype_info.return.df_dtype_info": {"doc_hash": "26f9fe0f1d01d5f2cd8110a1fa94881384060db8e37629a819209f9d54a68416"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/partitionquantiles.py_partition_quantiles_": {"doc_hash": "5c313d7e1e4852d8c8e791809c032b1395ad7002547f233fd1267809e4955f6e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/reshape.py_np_get_dummies._": {"doc_hash": "320b6eb077cb1644e03b2b7fcef118596da3e3a84f02c16bcf5d8b8c90cee691"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/reshape.py_get_dummies.if_isinstance_data_pd_S_get_dummies.return.map_partitions_": {"doc_hash": "8b64fb46c5e0778378569c9fa2f2b62d2d3cf0453c8be5c96a3b5f09ef308cae"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/reshape.py_None_3_pivot_table.if_aggfunc_sum_.else_.raise_ValueError": {"doc_hash": "5c55bd1700e471824c9b1faea53eb7ead49b97ed00230854c3cd754b1558b6ea"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/reshape.py_None_6_": {"doc_hash": "f8b12f9935d31676f2d267aed4db0e464c47e5ccc2db4f71e5b6fe34053fd388"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/rolling.py_datetime_overlap_chunk.return.out_iloc_before_after_": {"doc_hash": "84581243745c586c44085f4c67e24505f9648b4a9cbf6499a41deb282e52aa79"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/rolling.py_map_overlap_map_overlap.timedelta_partition_message._": {"doc_hash": "b0ffa6aaf49b6d2e3441a1e8d7598a92cc47bf27c761da8c3b8ac18d7bb03838"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/rolling.py_map_overlap.if_before_and_isinstance__map_overlap.return.df__constructor_graph_na": {"doc_hash": "8cbb11a8ccc34e83bee4661e2d74112a2742363634f6324a9977b86f964f01c3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/rolling.py__head_timedelta_pandas_rolling_method.return.getattr_rolling_name_a": {"doc_hash": "df6fa0115c09c6db9c775ba53df50ac339311af4c585110291aa3da446c10dc0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/rolling.py_Rolling_Rolling._has_single_partition.return._": {"doc_hash": "19220193e55dd0386676fc80cf7997682f91f5fde8f0b514e53999f6a0a2be33"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/rolling.py_Rolling._call_method_Rolling._call_method.return.map_overlap_": {"doc_hash": "ba8ff0a803f823a0ec9487d521186203e7f354b5ec9163f53a948e322a621bd8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/rolling.py_Rolling.count_Rolling.quantile.return.self__call_method_quanti": {"doc_hash": "18e2aecb65984a18ff5d045fb48669f0214a0de25c223321b771e12145c3681c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/rolling.py_Rolling.apply_Rolling.apply.return.self__call_method_": {"doc_hash": "9740a2a42132e95247345cd179185bd1176d6f95f425a3cbc8e97e459d1c28db"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/rolling.py_Rolling.aggregate_": {"doc_hash": "d0705f88ea786ecba41701653f7a39a6f6133fb7e64999d30b3534abb3d0f5be"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_contextlib_logger.logging_getLogger___name_": {"doc_hash": "2f5a219e87e82678cf4a9e57fc0db701978ef3b1ae7ca760ac8a9d7556ce2797"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_set_index_set_index.return.set_partition_": {"doc_hash": "47b046e4f0cfe901225563b86f221d50270548928d57b5bbaed4062264776aa2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_remove_nans_remove_nans.return.divisions": {"doc_hash": "ccd74e9904c18611ccbe201d506293c82f0e2fead40597e130ee37714fe6726a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_set_partition_set_partition.return.df4_map_partitions_M_sort": {"doc_hash": "dbafb3156d499ba8b1477c62ffd099a6baa18dfab34a694a9d0db47fccd3ad31"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_shuffle_shuffle.return.df3": {"doc_hash": "20b28c5405a17ebf88c589c6f289ce4500be30563ccc247dec775f0054f0d897"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_rearrange_by_divisions_rearrange_by_divisions.return.df3": {"doc_hash": "e9d48a9f536b33307a39ecbf67582a6fd6aae23231fad34ff4089788717af780"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_rearrange_by_column_rearrange_by_column.if_shuffle_disk_.else_.raise_NotImplementedError": {"doc_hash": "1922dc3f415048a2cd1ec56692923e7d957e6da93b49cceeeff004f29b68dda4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_maybe_buffered_partd_maybe_buffered_partd.__reduce__.if_self_tempdir_.else_.return._maybe_buffered_partd_F": {"doc_hash": "339119ec3d1949cc14618a6ce9697799ad713dd926c5faa88092159aae65169f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_maybe_buffered_partd.__call___maybe_buffered_partd.__call__.if_self_buffer_.else_.return.partd_PandasBlocks_file_": {"doc_hash": "e46e5aaaecf6a399248c7f0f82fed7869a62c8b4ef95015b806c8ad0dd06c80e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py___partitioning_index.return.hash_object_dispatch_df_": {"doc_hash": "516156ee0146cb9b06bcb504d8fb134a92d5ae3a57f78e58f9e4d1318283c71c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_barrier_set_partitions_pre.return.partitions": {"doc_hash": "e95ad0186b31d84a8da1f9d235727522a27c590faf85c860ebce9b5b94a6d115"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_shuffle_group_2_shuffle_group_get.if_i_in_g_.else_.return.head": {"doc_hash": "3a0c5aa57c515ba40f8c7f4f8954578c167c09ce66888256ce210676d4791d0d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_shuffle_group_shuffle_group.return.group_split_dispatch_df_": {"doc_hash": "5941d0b1a4af3b4d9f07974ef5889aa18963836ea119dabf54594a6165822af1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_ensure_cleanup_on_exception_ensure_cleanup_on_exception.try_.except_Exception_.raise": {"doc_hash": "c644d2737240e62dd245912fdf8ed360f83ac92a5fa96c8b715b620f9d4040c1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_shuffle_group_3_get_overlap.return.df_loc_index_if_index_": {"doc_hash": "cff74768816b7d825885434cb212d9d08d55002e81523553be81860fdcdc49e5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_fix_overlap_fix_overlap.return.new_dd_object_graph_name": {"doc_hash": "86a556d26680c6b99cc3da3f2111d361c70df13cc9459eab3be1f1962aa9c58c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_compute_and_set_divisions_compute_and_set_divisions.return.fix_overlap_df_overlap_": {"doc_hash": "dc673f3b6065cd3b834749ac5d86751588adc340aa763a68d012e4ba86a9b94c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_set_sorted_index_": {"doc_hash": "2638a2eaa1f76b136eb2608bf9331cd378208fc78bdecfe4940ada801d2dfd47"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_accessors.py_contextlib_MyAccessor.method.return.self_item": {"doc_hash": "4ea444cc8d671971db09dc15ee85d52f3775767cf9af590064c39d43577fcda9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_accessors.py_test_register_test_accessor_works.with_ensure_removed_dd_Se.assert_b_mine_method_": {"doc_hash": "5d866894c442da9317ed3e5bd811e3e9eebb36888facd9b3f9cf7343eb343f00"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_accessors.py_df_ddf_df_ddf.return.df_ddf": {"doc_hash": "1b8f86350ad54792624c242dc808fec8d93768164da0542341624d31c2acfa12"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_accessors.py_test_dt_accessor_test_dt_accessor_not_available.assert_dt_accessor_in_": {"doc_hash": "adae725be97812f3c2f2d7b36c7101f98e33a68612a552140f4bd129d5ea91b7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_accessors.py_test_str_accessor_test_str_accessor.for_regex_in_True_False.assert_set_ddf_str_col_st": {"doc_hash": "db9e48c5440b32f268a44088120419523892675003571881c5eab94c8784dc7d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_accessors.py_test_str_accessor_not_available_test_str_accessor_extractall.assert_eq_": {"doc_hash": "33ed514b6b733d48a58035b4656756c516b61a3faf1a9b0843c83b47d9b6de36"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_accessors.py_test_str_accessor_cat_test_str_accessor_noexpand.assert_ds_str_split_n_1_": {"doc_hash": "98e8269f8396a7ca6f60c5f73822ae6070cfea80ded5a7dda1219f29a2d4f504"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_accessors.py_test_str_accessor_expand_test_str_accessor_expand.None_2.assert_eq_": {"doc_hash": "df1b897c10e95d3f7a36d893f15ebf0afc8bcad6a6042c6d5b1b08320893f17d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_accessors.py_test_str_accessor_expand_more_columns_": {"doc_hash": "1122c22381ef7c3e0d155069f2a256e0d9fa97eddf851f748bb38292cf4b01c5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_from_datetime_import_date_test_arithmetics._Arithmetics": {"doc_hash": "ce870b43074430c30f7da7ca47d8b1ac604db3dfb2883f71a2aacdbf1eb46bd2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_arithmetics.cases_test_arithmetics.ddf8.dd_from_pandas_pdf8_4_": {"doc_hash": "4f98a906fe18ac98b84ca092f1c04059af4b58ced7fcba949484dd240fb1622e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_arithmetics.pdf9_test_arithmetics.None_1.check_frame_arithmetics_l": {"doc_hash": "5ec5c7a4f133649ff0948cfe51e002c81dbb4f715ae46549b9a35a1655f64518"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_deterministic_arithmetic_names_test_deterministic_arithmetic_names.None_2": {"doc_hash": "f99d6a65e0c8788251cd26fa827c061aecb8c6d99648a0bf69d2f902834e347e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_arithmetics_different_index_test_arithmetics_different_index.ddf6.dd_from_pandas_pdf6_2_": {"doc_hash": "a7b875111deb85e378db952492391f82532f31254dc4791f9253aa0bcf17a59b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_arithmetics_different_index.cases_test_arithmetics_different_index.ddf8.dd_from_pandas_pdf8_2_": {"doc_hash": "49b384c6aacf81dde10d88505b52f040c13bda62766f54a127747d23e9fcac22"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_arithmetics_different_index.pdf9_test_arithmetics_different_index.None_1.check_frame_arithmetics_l": {"doc_hash": "ce9e6c53fb1182129abe2d0ff496e8a218f225827b3b63ab2d65fb62ee2e4797"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_check_series_arithmetics_check_series_arithmetics.None_1.assert_eq_l_r_el": {"doc_hash": "827abcce2382a274948bd71383b3371ef8a83c3413a4e0f650adf221ac83ac84"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_check_frame_arithmetics_check_frame_arithmetics.None_1.assert_eq_l_r_el": {"doc_hash": "2f789443e1e3b9bd02db2a0caf4dae2a79597c19c1bc30910e1368be6b8b39c5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_scalar_arithmetics_test_scalar_arithmetics.assert_eq_l_r_el": {"doc_hash": "777f93f7e9b2dbd2c39e4398fcbe06c43ad19ecd48de1be0d9817ffc9c895434"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_scalar_arithmetics_with_dask_instances_test_scalar_arithmetics_with_dask_instances.None_7": {"doc_hash": "a3b07c2d7b1e47bcef4f2027188c2925a4ad6b45571d3627960bc9be539b94b1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_frame_series_arithmetic_methods_test_frame_series_arithmetic_methods.s.dd_core_Scalar_s_0_": {"doc_hash": "ff83f635105d72160e3ebf53f028b0de39b5284f88e3268bb46f11efbb0c03b2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_frame_series_arithmetic_methods.for_l_r_el_er_in__test_frame_series_arithmetic_methods.for_l_r_el_er_in_.None_1.assert_eq_l_rmod_r_fill_": {"doc_hash": "ac49eb080bcd90e203ce260d4b36cdd636e485b61f62eafc8e3b12e0c6d26abc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_frame_series_arithmetic_methods.for_l_r_el_er_in_ddf_test_frame_series_arithmetic_methods.for_l_r_el_er_in_ddf.pytest_raises_ValueError_": {"doc_hash": "82b8658b72fa65adfc3154f7230fd01c8c0244c054a7ea000b58de24345e2a64"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_frame_series_arithmetic_methods.None_2_test_frame_series_arithmetic_methods.None_2.for_axis_in_0_1_index.assert_eq_l_rmul_r_axis_": {"doc_hash": "03c6fcad459da82de55ecc8d15276b5dacc2c1718fda8caea2a7c68909827583"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_test_reductions.boolds.dd_from_pandas_bools_2_": {"doc_hash": "fd965bf862b25117f4fb003fb485381aa64043999f04d3d76b4cc451112695c7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions.for_dds_pds_in__test_reductions.for_dds_pds_in_.None_17": {"doc_hash": "747e3916726f9abaed7a8acadece1fe107b34d372288e08600ec9faee3f08f82"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions.assert_dask_graph_ddf1_b__test_reductions.assert_eq_ddf1_index_coun": {"doc_hash": "86032984ce60a70cf2adfee07303f879efe58606fea729be5acd1099d96bda3c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_timedelta_test_reductions_timedelta.assert_eq_dds_count_split": {"doc_hash": "113f4e0d2fdc2e4073ead66b10b50837841f2aae73e3935532ea07f7f66b98ab"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_out_test_reductions_out.None_4": {"doc_hash": "3daab12ec97b452a87393c0998d415aef743fd02bd3ab5c976208d6da92d6de0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_allany_test_allany.None_19": {"doc_hash": "19f3db16841ceab3b6f09c936577042c0a7897f2af560b496813baf1017db352"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_deterministic_reduction_names_test_deterministic_reduction_names.assert_": {"doc_hash": "555f4b4a5b761327f9459844c2be5169dfe915315f1fa1b2b05cade0cc6b7346"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reduction_series_invalid_axis_test_reduction_series_invalid_axis.for_axis_in_1_columns_.for_s_in_ddf1_a_pdf1_a_.None_8": {"doc_hash": "2e56d38cb15b25d080f901c62601aa0c6c8e4fb39132429a96151cb35c41e828"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_non_numeric_dtypes_test_reductions_non_numeric_dtypes.None_13": {"doc_hash": "80ee85fc3b00038963e6e7200a21de37526e26bf111ed1ae806f4929e2ae566d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_frame_test_reductions_frame.assert_eq_ddf1_mean_split": {"doc_hash": "690424583ee09c400888216787a5585ff473fc197e40673ae73b03bbf5d0d2df"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_frame.for_axis_in_0_1_index_test_reductions_frame._axis_1": {"doc_hash": "c35878af0f5f3edee1cae6af93ae8d5ccafac48a40cd313f759ead0dd2ceaa06"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_frame.None_31_test_reductions_frame.None_39": {"doc_hash": "1f7ca155cee57d640cf59044779584961766b7ecdc959bb6c866163930e536e7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_frame_dtypes_test_reductions_frame_dtypes.None_3.else_.None_1": {"doc_hash": "7319355078eefc8b251fd76c20cb0fbb39c5d2881e81d831f1bcf87c7e9ffd2c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_frame_dtypes.assert_eq_df_sem_ddof_0__test_reductions_frame_dtypes.assert_eq_df_numerics_var": {"doc_hash": "860239183964138617667b89229cad9e2bc2d262b5be429b110c4881f07b942d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_frame_nan_test_reductions_frame_nan.assert_eq_df_mean_ddf_": {"doc_hash": "d5f0f22bd3233dcdeef998157c73c31c4c62a72c4114b32b237d388eaef77888"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_frame_nan.None_1_test_reductions_frame_nan.None_1.None_21": {"doc_hash": "ac75972928907c0a8757bce94dfc4676e488b05db729278e9e2912f140057afb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_series_comparison_nan_test_series_comparison_nan.assert_eq_": {"doc_hash": "398365e3ff529e10b9c5158aa3496454e0c43ec826e3248aea4bedb7ce29fb17"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_skip_if_no_intna_test_divmod.None_3": {"doc_hash": "56dbf3ebeaf953b3aed191ee86feeea52ed7385685e917ff63e274cbbcab12b4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_moment_test_empty_df_reductions.None_1": {"doc_hash": "9a207695ef2d84e0c93630bca1c9f61e224c787463195e33b144972ce628ce76"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_series_agg_with_min_count_": {"doc_hash": "3d0a30f30da9f19a5e786cf910334de645cc66bbcd25b95bbcfa1a323d80a31d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_boolean.py_pd_test_meta.dd_utils_assert_eq_": {"doc_hash": "7b06fc7b5a3b4f233761c496d4bc86fdfc36805c91185c67f0fbee1835c4cfff"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_boolean.py_test_ops_": {"doc_hash": "488fe3a1d13b57cbba6636bf7b404f610c6ea88122b0cf22dc25873db39ef944"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_operator_frames6._i_set_index_i_y_i_x_": {"doc_hash": "b952c683f8d41fa40579c8aaaf05805c60ee75bf080b9ebfbacd3a426370b9b0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_test_concat_unions_categoricals_test_concat_unions_categoricals.None_8": {"doc_hash": "f0d11539fa8cab276e9343b3c295e6d465e4e4688aef3c1547c98000fadfd53b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_test_unknown_categoricals_test_is_categorical_dtype.None_3": {"doc_hash": "fc43475a4f568ffb17e6d9c9c9f2b6c569625327dd17c994f6b031116d95ba50"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_test_categorize_test_categorize.None_2.ddf_categorize_split_ever": {"doc_hash": "707a51be02f0d6be8a8c281bb6d786c633dbb199a8d4c87764a61e52afa34ec7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_test_categorical_dtype_test_categorical_dtype.None_5": {"doc_hash": "51ee7aa209ad84c46901223e44414b8f0c98c8054fd2df5586431dfdd4e1a4fb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_test_categorize_index_test_categorize_index.assert_ddf_categorize_i": {"doc_hash": "c13d71593aa75a29ea3198d61ec61cf35d50a5e171b501d7811726c8c26cf04b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_test_categorical_set_index_test_categorical_set_index.with_dask_config_set_sche.None_8": {"doc_hash": "396be4b80e4b30783c19a99869cda56ff17080ef9808e23599de378e6cd2c03d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_test_categorical_set_index_npartitions_vs_ncategories_test_categorical_set_index_npartitions_vs_ncategories._Test_passes_if_this_wor": {"doc_hash": "fd8145630dbcd888e77851f56fc8e0d5060068210155358364397eb7810d8d5a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_test_repartition_on_categoricals_test_repartition_on_categoricals.assert_eq_df_ddf2_": {"doc_hash": "b4a5e28f0a999891056190e8fe889b1115666cf71b624de9eba9b0314e5f1c0a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_test_categorical_accessor_presence_test_categorical_accessor_presence.assert_not_hasattr_ddf_in": {"doc_hash": "62eb9f8c34927ce7557a4b7da7192958fbc4e10771f8c54ea0ba601758e94c2f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_test_categorize_nan_test_return_type_known_categories.assert_isinstance_ret_typ": {"doc_hash": "823c90631d1648b8fc053b832493b6dba91f5a39a803acf32f92e83f23773b5d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_TestCategoricalAccessor_TestCategoricalAccessor.test_callable.None_2": {"doc_hash": "5e8848f857de277e9ea5ae5a2798c9443b9b1ba622a81ad49f335aa69a14eb96"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_TestCategoricalAccessor.test_categorical_empty_": {"doc_hash": "711d787134f4038ddb2d39d988829f3f4f99919db5ab2c0fc16fcaff14a21922"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_warnings_test_dataframe_doc.assert_disclaimer_in_doc": {"doc_hash": "34e1f10453ac1a7c5a96af0ba18a0877be1b5ffee0738229423f508719c702de"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_dataframe_doc_from_non_pandas_test_dataframe_doc_from_non_pandas.try_.finally_.del_dd_DataFrame_foo": {"doc_hash": "cdc41c1f1a5331665ce60432902cc57316b2ff8e797c82494af64b3f9e9acb60"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_Dataframe_test_Dataframe.assert_repr_d_": {"doc_hash": "725328faca936925a9f9ce034c58ed255a0f0811ee57aa45d02af4956612233d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_head_tail_test_head_tail.None_3": {"doc_hash": "54de0932a9d11cceb50b6ebff03edbf3d20671d65f813334aba09b3525d85135"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_head_npartitions_test_head_npartitions.with_pytest_raises_ValueE.d_head_2_npartitions_5_": {"doc_hash": "ef5ba091322b14ecf091c745229502ab69a79febff24f31fd9349ba1d1649d62"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_head_npartitions_warn_test_Index.for_case_in_.pytest_raises_AttributeEr": {"doc_hash": "0fffb81e4aaf40b8aae753312eda1fbdd17c5df299df9e75fff9af68cc163d62"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_Scalar_test_scalar_raises.with_pytest_raises_TypeEr.bool_s_": {"doc_hash": "6a54869976d938eef84db74c33c1b474aed51e5d7641d3c828d9634296242a8d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_attributes_test_attributes.None_1": {"doc_hash": "fb244849b780aa5165f5b31cc27a7e00794c9e0eb2eea539defec5ef62938857"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_column_names_test_index_names.assert_ddf_index_compute_": {"doc_hash": "fd32732776990475340dc6e5acf8af4c210c12c3ec57bdda18970edd95ca9132"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_timezone_freq_test_timezone_freq.assert_pdf_tz_0_freq_": {"doc_hash": "2df77f68e82961a84c0044346294884fd7ad011f4e106c598fb09c761250e895"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_rename_columns_test_rename_columns.None_5": {"doc_hash": "871cea156ff4bd81d85c1bf6494238fa20cdedfdf19e0c6fb92e339184db6dac"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_rename_series_test_rename_series.with_warnings_catch_warni.assert_eq_dind_ind_": {"doc_hash": "4d9d408b0269cb7590666899acde5d5951a38ab7d6320d5f968a476ea3ddd708"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_rename_series_method_test_rename_series_method.assert_eq_ds_s_": {"doc_hash": "7ee5066f4e099d29d2564c86b02bfb4d1dcc072a25a2d50eead076dc03d408e7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_rename_series_method_2_test_rename_series_method_2.assert_eq_ds_s_": {"doc_hash": "7c852e0815982aca79ff5ba3441dc3b76f6b3bd9afa8afc07a519f134559bd61"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_describe_numeric_test_describe_numeric.None_6": {"doc_hash": "a087f134db12563319dd56078e0a8234aa78c20e5b81116241bb4bdf5a2c2da4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_describe_test_describe._Act": {"doc_hash": "e3a97c1ff6e62717cdfa49dccc91a9d1deab7982e3a7d000a9bed6998d9a32b4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_describe.desc_ddf_test_describe.if_subset_is_None_.for_col_in_a_c_e_.assert_eq_": {"doc_hash": "a17429f3d4c91244d7b8e49a3fd5e57c38f3819c890a39733f556a05818dbaa0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_describe_empty_test_describe_empty.None_2.ddf_nocols_describe_perce": {"doc_hash": "e4707820bac263eda75e0fa4c35e02b7ab390ec323cde885833abd945de2b8af"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_describe_empty_tdigest_test_describe_empty_tdigest.with_pytest_raises_ValueE.ddf_nocols_describe_perce": {"doc_hash": "68d5fd80cd3b3acc7a5fbfe349f57ac2964c95a25379a2a5b6db5b2498da854f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_describe_for_possibly_unsorted_q_test_describe_for_possibly_unsorted_q.for_q_in_None_0_25_0_.for_f_convert_in_list_t.assert_eq_r_75_75_0_": {"doc_hash": "d8451b86494b40294336aaebf672d0e7672af6e3dea28bfb73f41939c59d1255"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_cumulative_test_cumulative.None_43": {"doc_hash": "dd9c216d3d11d6f5fcbe2ab6d79eaf416ed059eba59fa2bf1ceae1adf497e6b0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_cumulative_empty_partitions_test_cumulative_empty_partitions.None_1": {"doc_hash": "e5a8d10558f58b91ce49d5d12057432e72b13919e97a231a761051a59a8aa0e2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_dropna_test_dropna.None_17": {"doc_hash": "69f0c31c74a105e516c3ac3ff59954074280fe0e1f0664854090ece7b6a2a1a4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_clip_test_clip.assert_eq_ds_clip_upper_u": {"doc_hash": "426bf8a56ee330b507b3a672d09aa5239d679f9570c2be427282387130e7bac6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_squeeze_test_squeeze.None_2": {"doc_hash": "ebbeea38e32beb1b99c1f66b2b0161be5bf50060f7469826e805048a7b39ae97"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_where_mask_test_where_mask.ddf6.dd_from_pandas_pdf6_2_": {"doc_hash": "c7d714dbb8261de30a7eaa59dd0fd5d946bcddbf8b6dcb0291789db6bde4c1df"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_where_mask.cases_test_where_mask.for_ddf_ddcond_pdf_pdc.None_9": {"doc_hash": "afc23dd8d6d6bd5bf7a1ac3c155bc66cee741a7b8cf76ff926d2be0b212b14b5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_map_partitions_multi_argument_test_map_partitions.assert_result_dtype_np": {"doc_hash": "6a22f33b51c549820da38b8efc3ac4eb61151b039933ea2a8866f58797198b98"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_map_partitions_type_test_map_partitions_names.None_2": {"doc_hash": "859d0e3c73c26f9fd64e7f773f01caac352d7fd3d4907e9f24f983572bc36316"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_map_partitions_column_info_test_map_partitions_column_info.None_6": {"doc_hash": "3a471086deabf3afdae427343fa48ddc27474582224577d407da879db0050c3f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_map_partitions_method_names_test_map_partitions_method_names.None_5": {"doc_hash": "a9f7ce1a7462fa4f59899f98443bf291ed3f87f4ac61c1108f75b16ca5bc0fb0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_map_partitions_propagates_index_metadata_test_map_partitions_propagates_index_metadata.None_1": {"doc_hash": "dd12e0e98dd1e9f3ffba8d547e7c16787dc40d5258cb326b27f079d6c8dc4cf4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_map_partitions_keeps_kwargs_readable_test_map_partitions_keeps_kwargs_readable.assert_a_x_map_partitions": {"doc_hash": "f1f36a383528f87c286dec27ce16e9e0ed096ddad8c64a35187de53129a26be0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_map_partitions_with_delayed_collection_test_metadata_inference_single_partition_aligned_args.assert_eq_res_ddf_": {"doc_hash": "cc4a13da158aed378a6e88631f43e6ee791b7e84c900e9f18f2d02d8a8ab9ade"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_drop_duplicates_test_drop_duplicates.with_pytest_raises_NotImp.d_drop_duplicates_keep_Fa": {"doc_hash": "90153c8e53ce749d0b88aa8ed674b4d9658a1251a69ea4cff1a1036c952ee49a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_drop_duplicates_subset_test_drop_duplicates_subset.for_kwarg_in_keep_f.for_ss_in_x_y_.assert_eq_df_drop_duplica": {"doc_hash": "5726d5d34914d497948cb37e745755e7685ddf262ebe7778810151926514ec91"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_get_partition_test_get_partition.None_1.ddf_get_partition_3_": {"doc_hash": "ef47f3a026a2663788f2b0ef3804bc52efebc3d6ee90ecb5e781bba69df9503f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_ndim_test_value_counts.assert_result__name_re": {"doc_hash": "d36a0a89cd387fca2ebf1577d803dc23c49a4ee6ccf4d63df4090f4bd391c1a8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_value_counts_not_sorted_test_value_counts_not_sorted.assert_result__name_re": {"doc_hash": "253764737b31f1d729f9efa0b0794589586c57ddd1d9fabbaa91c744d3e0de79"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_value_counts_with_dropna_test_value_counts_with_dropna.assert_result__name_re": {"doc_hash": "5e55672516983ef1420aae4599e10ffd270483c778892b428bc2939761a63eb7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_unique_test_unique.assert_ddf_x_unique_split": {"doc_hash": "b1c13e39f48b5d7633592a41fac90c3f6425296aee120a5aacdbb5b700e3ff73"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_isin_test_isin.for_obj_in_d_f_series_.with_pytest_raises_NotImp.d_isin_obj_": {"doc_hash": "f3937298088992afc8f9f3aeec98d8528780e67427b63a26f02bf97ad1ecf806"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_contains_frame_test_size.assert_eq_d_index_size_f": {"doc_hash": "9ed5eecbf01b4a6838bbb8de0de6b7c61ecd4d256d068acf925a4a18e7115880"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_shape_test_nbytes.assert_eq_d_index_nbytes_": {"doc_hash": "36e85ab63a39057387805d4cb231bf80969dbac88aaac36371b39a48349a043e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_quantile_test_quantile.assert_result_expected": {"doc_hash": "07a24c9eb0a8f37973dc473789a0e25604cd99a84653f3cb7ba24a14a076a88a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_quantile_missing_test_empty_quantile.assert_eq_result_exp_": {"doc_hash": "7274b1178ef38efb88bc107d9281c901f819e3a569b300e716d6a65fe0f4041b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_dataframe_quantile_test_dataframe_quantile.pytest_raises_ValueError_": {"doc_hash": "29ce668c1c9feea5d730823f64fef1e3a140161f11306700e75c18c02d4c53c5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_quantile_for_possibly_unsorted_q_test_index.assert_eq_d_index_full_i": {"doc_hash": "b8b0c8c63916bf86780a0b26e01775dfef20a265ffa4f9487456d3de58ae3e98"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_assign_test_assign.None_3.ddf_assign_foo_ddf_unknow": {"doc_hash": "c62a3e7034e19cc9e9d7e4d9bea40b6a8876cd649d2cead3630078f61e36c3eb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_assign_callable_test_assign_dtypes.assert_eq_": {"doc_hash": "3e4fd22046ae9d0a2fe408fe8a463e9d6c3f8feb8d7057b912f01dfa72388f11"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_map_test_map.None_6": {"doc_hash": "4399403f8905ef2b064229539ac5148493ab806f7bce80a107e76c252c61ec67"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_concat_test_known_divisions.assert_not_df_known_divis": {"doc_hash": "7a55851514ab3fc54438e6ae86c86383ece154e600b78e7ce28b55db75efa5b2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_unknown_divisions_test_unknown_divisions.assert_eq_d_a_d_b_1_": {"doc_hash": "9725f40d9cf79c396ba3e530d90b6bb18af4c28fb7ad25ca48bbda97b9dfb05e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_with_min_count_test_with_min_count.for_df_ddf_in_zip_dfs_d.for_axis_in_axes_.for_min_count_in_0_1_2.None_1": {"doc_hash": "a49143de9e8a24256dc9eb8aad3f7721fab0df64e31bf9387fa21711e402f736"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_align_test_align.None_15": {"doc_hash": "894de5570951a745f984554212402b038483e67bfa442f0ab2af379f65c7d8a6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_align_axis_test_align_axis.None_1.ddf1a_A_align_ddf1b_B": {"doc_hash": "37beea4ebd0be84b11296a09d3d717726048e158f9f89b8f0a3585917680dc7a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_combine_test_combine.assert_dda_combine_ddb_a": {"doc_hash": "9d83bdd95be426db9b718da57e85e11794ac84c9978ab3a77bcdc62622181311"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_combine_first_test_combine_first.None_5": {"doc_hash": "66c8867f750fe3883ad61cf1eb51cbc684ead1910866c37529348fd223c5d658"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_dataframe_picklable_test_dataframe_picklable.assert_eq_s_s2_": {"doc_hash": "ab3cdb514106a0b0bb0b9c4d135a1ad74c1500e9cf6499cb5f9147dcfec02749"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_random_partitions_test_series_round.assert_eq_s_round_ps_r": {"doc_hash": "bb2f8b6457a77ac6eef5173a9c70d560880083ddce14a5d79c4ef90a7d8f1c3d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_repartition_test_repartition.for_p_in_range_1_7_.for_div_in_5_10_2.assert_eq_pdf_x_rds_": {"doc_hash": "87a889f13b5741555c25e47abf2a2283d9e5e4c8024c5be85bba623f03770f0c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_repartition.pdf_4_test_repartition.None_2.for_div_in_list_Yadijm_.assert_eq_pdf_x_rds_": {"doc_hash": "e011d144b761de75248c978a59c3ef06c629f31297912a30ce2b9edd4db37472"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_repartition_divisions_test_repartition_divisions.None_1": {"doc_hash": "e867c0c8f90b29cceeb9ff2fa79a7228a33dafbe11c1237de3dbda707e029ce8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_repartition_on_pandas_dataframe_test_repartition_on_pandas_dataframe.assert_eq_ddf_df_y_": {"doc_hash": "3c4dbd461d1e9fc867013398c8f6ca1a6e56a53d0e128a08db423f21057f69e7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_repartition_npartitions_test_repartition_npartitions.assert_all_map_len_parts": {"doc_hash": "8fc931802d16a1ace76528684597f3f831d95905d473a5e43e1eda85fba21a13"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_repartition_partition_size_test_repartition_partition_size.assert_all_map_len_parts": {"doc_hash": "d25229d47ae0222c412f6cec21f94ad3c4f07e49addfeb8b9d1a636829e790dd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_repartition_partition_size_arg_test_repartition_npartitions_same_limits.ddf_repartition_npartitio": {"doc_hash": "754da2e61fc5f2f5371f1d67d3b17afee7ce105876f7af36449187db75a466c7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_repartition_npartitions_numeric_edge_case_test_repartition_object_index.assert_not_b_known_divisi": {"doc_hash": "7eb4913b2f961521d955fc88953e8986b47a6a59720eecb1e09cf8fed82685f0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_repartition_freq_test_repartition_freq.assert_eq_ddf2_df_": {"doc_hash": "f4b885bf106644bad7e62e4867e7dc3417e73fb1f4169be175618a81bcf0e7e8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_repartition_freq_divisions_test_repartition_freq_divisions.assert_eq_ddf2_ddf2_": {"doc_hash": "deaae062b3eb7925d1900cc0c57dcb653ff4e57fab0433b5e708b4baaab13654"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_repartition_freq_errors_test_repartition_input_errors.None_1.ddf_repartition_npartitio": {"doc_hash": "255d5022781d0dece6e8701eae8e146e09b2243c3b7f959c72e1b62fc5115b5b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_embarrassingly_parallel_operations_test_embarrassingly_parallel_operations.assert_len_a_sample_frac_": {"doc_hash": "a7fff95ca9f1ad4ca5bfc6d87f5abd36c4460aa9221c0d947d27ae7dfb964924"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_fillna_test_fillna.assert_eq_df_fillna_metho": {"doc_hash": "6f78f012e37d0d48193506d1f84ce62e2df2a72b68687f2efea70661e478edcb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_fillna_duplicate_index_test_fillna_series_types.assert_eq_ddf_fillna_fill": {"doc_hash": "fd4b6bfb10bf5e0d5990d33515e72a56026a480de75205ee47ce239aef0649dc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_sample_test_sample.assert_a_sample_frac_0_5_": {"doc_hash": "645db31d6e1ab5f10eae96686705dd7327ba3ad142d831a8b05d5a65ab4dc1d4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_sample_without_replacement_test_sample_without_replacement.assert_len_bb_len_set": {"doc_hash": "1c3e46ba27024ba6a4a6a1bcde70d3dedfa40fe06f1ffc684de75dbeb197adbb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_sample_raises_test_sample_raises.None_2.a_sample_frac_None_": {"doc_hash": "00313406d1914ad3567f6d010afe915e8ef9c036a71c8e377dda8c26492fe684"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_empty_max_test_query.assert_eq_": {"doc_hash": "53d80f6fda15b19f0d5ead5d5952ad2ad10ca142098082e06a476469e581475b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_eval_test_eval.with_pytest_raises_NotImp.d_eval_z_x_y_inpla": {"doc_hash": "c8e803d1395b8a98fe87bd54bec10daf479438cc6ad6b64c9bb64546571b1ad6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_select_dtypes_test_select_dtypes.if_not_PANDAS_GT_100_.with_ctx_.tm_assert_series_equal_": {"doc_hash": "43a85d49820244542003e8e3269d88dc0812c8e2189f6b6be5f0febfd4406817"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_deterministic_apply_concat_apply_names_test_deterministic_apply_concat_apply_names.assert_eq_res_df_x_sum_": {"doc_hash": "c0b84f01c1feef9269bd29daae758a7cd3c1b186fd61bfac95bba01fd98ab8e0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_aca_meta_infer_test_aca_meta_infer.assert_res_compute_d": {"doc_hash": "8f5498be7174ab8abe5f94d9ed8647f86db274c7fe8a952ae1a64e4ad461ec6b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_aca_split_every_test_aca_split_every.None_1.aca_": {"doc_hash": "bb74a07b94ca51e69fff7698ed1c9ab86d61adf4cbbc14987edf13607789051d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_reduction_method_test_reduction_method.assert_eq_res_pd_DataFra": {"doc_hash": "a8787a8e4cb5c32c7ac517f72c84941343c590a58e736a8aaa692be83a3a917d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_reduction_method_split_every_test_reduction_method_split_every.None_1.ddf_reduction_": {"doc_hash": "8f9caf81e72623af52815b2723eaf44808fa484a3aa61b905ff3def6f927d237"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_pipe_test_gh_517.assert_ddf2_index_nunique": {"doc_hash": "ef4cb7b7974bd00451eafa054d579a52848996299c2c2e5bba22c784a81e613b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_drop_axis_1_test_drop_axis_1.assert_eq_ddf_drop_column": {"doc_hash": "ee61f650a7416b656f0849c76855bba07bb726d35476ace106264d8a47a1e28f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_gh580_test_rename_index.pytest_raises_ValueError_": {"doc_hash": "248f4348f8061487f25b51fab3ec45ef309de2829ce154d2800d5257d91c9ae3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_to_timestamp_test_to_timestamp.None_3": {"doc_hash": "3f7107fa36b03d3dd76d9f6bb5d92c62b2ec787945fe114c01dcb5c406a22ede"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_to_frame_test_to_dask_array_raises.None_2.a_to_dask_array_5_": {"doc_hash": "7b84b87db4f3fcb1e12ab4df70697a7407bd56bacc383efaa18b45c6816d2f2e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_to_dask_array_unknown_test_to_dask_array_unknown.assert_all_np_isnan_x_fo": {"doc_hash": "086e0e405bd0555b78a240926ddce1b8f547415f6e60a4d87e84f878d5b39a1f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_to_dask_array_test_to_dask_array.assert_result_chunks_e": {"doc_hash": "2e925d34c8d24194a34bc0e7a89aadbbd78630fd3cb6ae111cce9f1398ce180d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_apply_test_apply.None_4.ddf_apply_lambda_xy_xy_": {"doc_hash": "090d9043071877ba9d924b3310fd71bf7f5905c83fe8bf86e42b0bd90e527b1d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_apply_warns_test_apply_warns.assert_int64_in_str_w_0": {"doc_hash": "eeb23a7c6982ed24b20a13dc85f8bdb7e8d17703060360d81360ba333198b3b3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_applymap_test_applymap.None_1": {"doc_hash": "93131f8973e3bce9f28b9bddb7d9c00afb077b244e394b15a9604e5fc0cdf099"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_abs_test_round.assert_eq_ddf_round_2_d": {"doc_hash": "b9b894def99cfc511ae1194cd336de76ef7d35e7ffbd087cf7212f4760b4e74d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_cov_test_cov.None_7": {"doc_hash": "9d62460c1e4089d70f739852c75ca25a8d3aad83c47fe27924ae1d3146cbad71"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_corr_test_corr.pytest_raises_TypeError_": {"doc_hash": "3c7bfa2957d4740bfd29a25d536803eb19b87148d304adee8d55c6f02982d108"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_corr_same_name_test_corr_same_name.assert_eq_result2_expect": {"doc_hash": "6ce2446c9cb8332c5d2396d288ff00e0287b4346f3a499015a430e95912b9836"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_cov_corr_meta_test_cov_corr_stable.assert_eq_ddf_corr_split_": {"doc_hash": "8efa97aeb8044621e622263598c262a18f4a7541d40e8f7ee4e2c2b461474f2a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_cov_corr_mixed_test_cov_corr_mixed.assert_eq_ddf_cov_split_e": {"doc_hash": "3744203ffeeb4a04f28411e46cfd8632063e1003cd117cd99f170bfd4372eb9d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_autocorr_test_autocorr.pytest_raises_TypeError_": {"doc_hash": "a427b9039b45d3e6338565b6808f650191221a9da05be235bff0ef7b530761e9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_apply_infer_columns_test_apply_infer_columns.None_5": {"doc_hash": "6dd7946e4ade8ed50ef552886d772b48fdd3b51f9f3da9d35c9d74da285643e0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_index_time_properties_test_nlargest_nsmallest.for_m_in_nlargest_ns.None_2": {"doc_hash": "764c571d2070dbc2e4c8b749a895e78e847f65c63b4d0d5eb350d0dc42d372e8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_reset_index_test_reset_index.None_3": {"doc_hash": "20a7bea6f49044e41386519b73a21221ea65113f5b263940ba0094879958cb78"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_dataframe_compute_forward_kwargs_test_dataframe_itertuples.for_a_b_in_zip_df_iter.assert_a_b": {"doc_hash": "8ad7ad83f0f5b8ce4229002a00977166af070f113c6021869144bdc5e2c925c1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_dataframe_items_test_dataframe_items.for_a_b_in_zip_df_item._column_values": {"doc_hash": "42ebdf3d60c4cedea5443992353746581b9601747d2fc38dc6a837d1a48b8abf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_dataframe_itertuples_with_index_false_test_astype.assert_eq_a_x_astype_floa": {"doc_hash": "0807170d14e43482999bdb8cd17478f8a3a9d24c623ca0aec4b773de19c19fa2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_astype_categoricals_test_astype_categoricals.assert_dx_compute_dtype": {"doc_hash": "83981dda45199074fcc9668061b76579df2967f50fdbe75263487a819cca530f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_astype_categoricals_known_test_astype_categoricals_known.for_dtype_known_in_ca.assert_dx2_cat_known_k": {"doc_hash": "ae27cf70f84c5f21f753925ea04586a0a48c508110f5b37e9e2da1964f79469f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_groupby_callable_test_groupby_callable.assert_eq_a_y_groupby_ise": {"doc_hash": "bfcdc0d86557a265d3d90d949bee2582b7c81ef01e7c90a4e2added45a0574aa"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_methods_tokenize_differently__assert_info.assert_stdout_pd_stdou": {"doc_hash": "c906003d1e3db4585e3934f970f12d446702bd7add231c02c885cc9784be782c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_info_test_info.assert_ddf_info_buf_None_": {"doc_hash": "7cbebdb45aafed1cad20db46347ce4f04ee1a742cd3397cd14122cb839fbee95"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_groupby_multilevel_info_test_groupby_multilevel_info.None_1": {"doc_hash": "5f7f1fc85469721d4d05cd816884e84a836e019724610bdf091c3a9af8413743"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_categorize_info_test_categorize_info.assert_buf_getvalue_": {"doc_hash": "d13aa35da163721ba59b5da9713d48d842451db2df535f3b6eedc3d63ba4f7ae"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_gh_1301_test_column_assignment.assert_z_not_in_orig_co": {"doc_hash": "f6a3ac45ea30e279bc41a818e58fddc555ccb96cc4d3c297507f1865995f3c06"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_array_assignment_test_array_assignment.None_1.ddf_z_darr": {"doc_hash": "102b09f892b285bef752a8dedeb4cf60f38831329082270e284b48712ceacfea"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_columns_assignment_test_columns_assignment.assert_eq_df_ddf_": {"doc_hash": "93d07cb3d20c10ded5461b73398cb041c9fcc8dc5a8bdb1acfae3c08d01e9054"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_attribute_assignment_test_setitem_triggering_realign.assert_len_a_12": {"doc_hash": "de74c6df5fc284c900b28fa0288e77565476a549ca45464e82d5a930c63242d1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_inplace_operators_test_inplace_operators.assert_eq_ddf_df_assign_": {"doc_hash": "e1413ed8a74ca887ee5d51b6d430129360f92653d7cc20b8380e65e23e34f186"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_idxmaxmin_test_idxmaxmin.with_warnings_catch_warni.None_3": {"doc_hash": "b39fc3073fdfe840e514ea619489064e99f18ccdc7807f17ce76132f1923162a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_idxmaxmin_empty_partitions_test_idxmaxmin_empty_partitions.None_2.ddf_b_idxmax_compute_": {"doc_hash": "bc7b77423ecde279ec67d2b41cb21ac229cff5cc443ac88360ba8bb3e01de600"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_getitem_meta_test_getitem_multilevel.None_1": {"doc_hash": "48c52cd58e0dc7f6dfcb07adbf7a558a47d833840881043df851a1165ba1ab6e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_getitem_string_subclass_test_ipython_completion.assert_c_not_in_complet": {"doc_hash": "15eee8fc37a83c94da375d7f3e800ae85838acbac34d78b806d327a87f9c384a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_diff_test_diff.pytest_raises_TypeError_": {"doc_hash": "1199764247b7b6638d3ac85ff3d35bf2b2c2222d5df2e32597f316687bbf6f9c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_shift_test_shift.with_pytest_raises_TypeEr.ddf_shift_1_5_": {"doc_hash": "4c35f3d3da199e3d19dae22c4dbecaa4623171301c2b9a345080c5b6ae0c28f4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_shift_with_freq_DatetimeIndex_test_shift_with_freq_DatetimeIndex.assert_res_known_division": {"doc_hash": "5842d207264e6bb4a3426009c0eda13a328ad92ee2e514ac0e5cfdbe9b0a6392"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_shift_with_freq_PeriodIndex_test_shift_with_freq_PeriodIndex.with_pytest_raises_ValueE._freq_keyword_not_suppor": {"doc_hash": "75f96370f06da6d812203db5fc15ab16cc837b0399b9c34f65bb7623fa9d32b8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_shift_with_freq_TimedeltaIndex_test_shift_with_freq_errors.None_2": {"doc_hash": "81b8e61310b8b34c79a4296adf17d7fa63617ad57a82678b61519aafdd539e11"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_first_and_last_test_first_and_last.for_freq_in_freqs_.for_offset_in_offsets_.assert_eq_f_ddf_A_offset": {"doc_hash": "cc46fa1a1478063333d1860d5fee68bb18c78328eb84ac882cee1114d150c5d1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_hash_split_unique_test_hash_split_unique.assert_sorted_dropped_com": {"doc_hash": "ea88b7ce364ca28f68faf87fd61a1e40f726c007eaef2ee246875accddabd19b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_split_out_drop_duplicates_test_split_out_drop_duplicates.for_subset_keep_in_produ.assert_eq_sol_res_": {"doc_hash": "e69b12da1f01b1369b1e5ef1aacef3ce610f7db1898d1b90525416c7087470e4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_split_out_value_counts_test_split_out_value_counts.assert_eq_": {"doc_hash": "0ef68ac621f4b90cafafd27588035cfc206884928daf5c9b0a70b6a87257ac90"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_values_test_values.assert_eq_df_index_values": {"doc_hash": "13eaf1a6041f5017e3036eb7a0fb47d50ff9ff5e3860cd9ea8e9f7c9a3f7af9c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_copy_test_del.assert_eq_a_df_": {"doc_hash": "511a274182e050abfec190d5b9eab267d9e0b8c5c60f56a00bc697d026ca7a19"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_memory_usage_test_memory_usage.assert_": {"doc_hash": "3f9a8e4112f669654dffddfa5821f28915699295cbb1d793e2297366066eb439"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_memory_usage_per_partition_test_memory_usage_per_partition.None_1": {"doc_hash": "9a4ea76ffeb1bc30df514495d66750f4cb602cc7927518eb16742c624100da3c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_dataframe_reductions_arithmetic_test_dataframe_reductions_arithmetic.assert_eq_": {"doc_hash": "db9be274083dfbb586e6fa69190b536cf3c932c933f136a1353985c00ee132f8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_dataframe_mode_test_dataframe_mode.None_2": {"doc_hash": "c743e1abe81ff37505579698cafed409aca95ce253476f7abfe974b78fee52d6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_datetime_loc_open_slicing_test_datetime_loc_open_slicing.assert_eq_df_0_loc_02_0": {"doc_hash": "cb8db85e689d7cfaaaa764d5b226d72adb02d369d8fc226a387c997d900443a3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_to_datetime_test_to_datetime.None_2": {"doc_hash": "880c1b99a8b2e9c7bcb88dffffc8e176ba5d26178f953870c36778764eed81b1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_to_timedelta_test_isna.assert_eq_pd_isna_s_dd_": {"doc_hash": "ed67dc4586630bf7d9f8a053ac4cbfb5321126a23dc38bb7ed530b837d1fdec3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_slice_on_filtered_boundary_test_slice_on_filtered_boundary.assert_eq_result_expecte": {"doc_hash": "d1a601f0747b53d1dbd8df1d77dd634bf9f02ce62fe2296c9572797381fa07d9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_boundary_slice_nonmonotonic_test_boundary_slice_empty.tm_assert_frame_equal_res": {"doc_hash": "5ddadc972c474fe4f91d0874a7a61fff1d2c9611a8fe99971e0027de24c2308c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_with_boundary_test_with_boundary.tm_assert_frame_equal_res": {"doc_hash": "52ebb08725b4baaaa1a54b1b21382c3f00c13c270f97a662f971fbe25feed776"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_boundary_slice_same_test_boundary_slice_same.tm_assert_frame_equal_res": {"doc_hash": "82bc9a1eb48ecaa5edb73391b43e0bd7a51d262249c82304b5311260796a1fc5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_better_errors_object_reductions_test_bool.for_cond_in_conditions_.with_pytest_raises_ValueE.bool_cond_": {"doc_hash": "c7952fcdaed136f0e08cf36de6227760ff8ccd32d34e1ffc41e044f85c60a35c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_cumulative_multiple_columns_test_cumulative_multiple_columns.assert_eq_ddf_df_": {"doc_hash": "ee805f19eff084d6d4a886857b0355f0f5223301d21690ad5e023c121a3ad300"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_map_partition_array_test_map_partition_array.for_pre_in_lambda_a_a_.assert_x_chunks_0_np": {"doc_hash": "ed1a82140c9a4388b2ad348fcd512e8394e64c19dc9043b59ef20ff67cea1b16"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_map_partition_sparse_test_map_partition_sparse.for_pre_in_lambda_a_a_.assert_computed_coords_": {"doc_hash": "274b145ff237ff047a44f1d093f3c0d2cb611ed53b60862f363a0be774c677b7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_mixed_dask_array_operations_test_mixed_dask_array_operations.None_4": {"doc_hash": "e2775176027170b016feed28136154330bbfc52254bc3b48ce4650c2356a14c0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_mixed_dask_array_operations_errors_test_mixed_dask_array_operations_errors.assert_add_in_str_info_": {"doc_hash": "341bae8e997002f42f237988f7da720b352707e715c72310571315bd428009af"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_mixed_dask_array_multi_dimensional_test_mixed_dask_array_multi_dimensional.assert_eq_ddf_y_x_": {"doc_hash": "503275182ef591eabd48df65e8a0a69d29e7f8b611eda60e8c243fcb11e222cd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_meta_raises_test_meta_raises.assert_meta_not_in_str": {"doc_hash": "130ec219e4dfad13829b414dc65132be6741744a0baa70d749ad6dad2c624423"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_dask_dataframe_holds_scipy_sparse_containers_test_dask_dataframe_holds_scipy_sparse_containers.assert_all_isinstance_v_": {"doc_hash": "26bcef508e37ec1f3367672c87944513e0114d05a852686a765a629f9e069436"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_map_partitions_delays_large_inputs_test_map_partitions_delays_large_inputs.None_1": {"doc_hash": "8ad45731f907376e8a9822339c9cff2d7115b5404641991ccd4f04972bd2613f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_partitions_indexer_test_partitions_indexer.None_2": {"doc_hash": "5fb81f9334865750723f737b059b88313e8848d2263e81368e4ff8c0657d7b36"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_mod_eq_test_meta_error_message.assert_pandas_in_str_in": {"doc_hash": "b2b01be11270b5ab9278762eb0297cde90361bae448b80571a6c895695f9f7dd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_map_index_test_map_index.assert_applied_divisions_": {"doc_hash": "2ad89942ec0d0a6d7298dd6d95b122308c8d3445bcbe4c0e7984e65442193fdc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_assign_index_test_index_divisions.assert_eq_ddf_index_df": {"doc_hash": "287ed2bc75ea891dbc437ad92112a2e93abf754f17d820c5b49850dfac009fdc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_replace_test_replace.assert_eq_df_x_replace_1": {"doc_hash": "9c2812e830f3124c78616b08b9071c7826fd43c9e42418051be4f1e208f86ad9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_map_partitions_delays_lists_test_map_partitions_delays_lists.None_1": {"doc_hash": "18ec3751a8ea40cc65e2ca8caa22aafac58e17e31285e03f0f4ba991c65a1a07"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_dtype_cast_test_dtype_cast.None_7": {"doc_hash": "366b36038830b79f5ee02071bd2a48aa900ff43e541d5dabcb843e21fca8f0d4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_series_map_test_series_map.dd_utils_assert_eq_expect": {"doc_hash": "173b47a953ed4f0d295c75571b39e27a417e33f3731c40dfb93e17a9e7ab98bb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_dataframe_explode_test_dataframe_explode.assert_eq_exploded_ddf_co": {"doc_hash": "d79cd69ada52ba3c9d3b76f1a8570145a2508d36a90199f6bb18c329eeb0842e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_series_explode_test_pop.assert_eq_ddf_df_x_": {"doc_hash": "2fd40f450242ff72cc14b782a5b20f5afcff34c4cb4b4c9ed33973cea2351865"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_simple_map_partitions_test_simple_map_partitions.assert_v_0_M_clip_or_": {"doc_hash": "857d1e3d77a6bfbc9e1b05da3dbf86d05fa66d400ab85594025ff3b2fed3e749"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_iter_test_dataframe_groupby_agg_empty_partitions.assert_eq_ddf_ddf_x_5_": {"doc_hash": "2b16af7bb73297503dc3e291472c7982f253dc70de9f217df4db887e168e7c20"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_extensions.py_from_decimal_import_Decim_test_register_extension_type.assert_eq_df_ddf_": {"doc_hash": "cd58120e3e2dd4bc39195e23419fd51c1ce436c79826abda7516c628f7454f3e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_extensions.py_test_reduction_": {"doc_hash": "9aafbabb178e3e15242934502e0f99ce11cd5e515b83c65d39e4af40cae7f43d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_format.py_pd_test_repr_meta_mutation.None_1": {"doc_hash": "7999c6d91a5025b2241eb6b90ed904ca3bf6150a4c7e985e7f3d709852b6ce56"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_format.py_test_dataframe_format_test_dataframe_format.assert_ddf__repr_html__": {"doc_hash": "f57e99a879df03674084cee09d9f03cb19b7cfa0d51a6193d304c218d4fe7385"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_format.py_test_dataframe_format_with_index_test_dataframe_format_with_index.assert_ddf__repr_html__": {"doc_hash": "68db2ee0241298a6cf41214ef4a0def9d247bc8892372f655217eece65d6986e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_format.py_test_dataframe_format_unknown_divisions_test_dataframe_format_unknown_divisions.assert_ddf__repr_html__": {"doc_hash": "7d60a1b42922ac28e23100030ffff04a493a9e9e7ef061ebd49bedfd6d9f5412"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_format.py_test_dataframe_format_long_test_dataframe_format_long.assert_ddf__repr_html__": {"doc_hash": "3f9c95c2599615ba70e74767a0e3a671f5ef71329d957400eba08786512b9807"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_format.py_test_series_format_test_series_format.None_4": {"doc_hash": "a8b78ad98ae61d662f258f7e8684cf9dce48c650678656e7b7015f05ed7d0322"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_format.py_test_series_format_long_test_series_format_long.assert_ds_to_string_": {"doc_hash": "4a1ab90dcc84ee221a3b1536fa3435c1d4d6d7abded448c10e5cb06543820033"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_format.py_test_index_format_test_index_format.None_3": {"doc_hash": "b253f00668e61b5a6b6fb4541c992ffa9c8e08c04929abfc85bf37bb83d9de44"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_format.py_test_categorical_format_test_duplicate_columns_repr.repr_frame_": {"doc_hash": "03a199dad409f31019057e9f2e59bef941fe0acb4e82d692a7a3d3fdcfddd59c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_format.py_test_empty_repr_": {"doc_hash": "a89cb4b6a2eb7a37276a7086262c153969ad9e793bf73e7687bdc1ab5a43bc3b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_collections_agg_func.return.request_param": {"doc_hash": "9e40b36a1795c1e34bcdb63b289d573cd1037a4071287ee96a1f63afe8cf1e74"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_internal_repr_xfail_test_groupby_internal_repr_xfail.None_1": {"doc_hash": "3e2c6df0669ad8e0b4802efae5c721f395a8aaa7b6f4e4df929fd09219425798"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_internal_repr_test_groupby_internal_repr.None_2": {"doc_hash": "adacae37ba987694348c2bef094c7c2a7090ea30108fad82124b06bed883f1f4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_error_test_groupby_error.None_1": {"doc_hash": "1133e03e880b0e928d5c398e2ab108c6b3dc64da53a225f136a804255ad98b5b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_full_groupby_test_full_groupby.with_warnings_catch_warni.assert_eq_df_groupby_a_": {"doc_hash": "72d0dac458dab6af9dfb54bfe8336faeee0e3ad615ded55e88ede6c4baabd705"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_full_groupby_apply_multiarg_test_full_groupby_apply_multiarg.None_1": {"doc_hash": "f1e23d941c0d2b027c3ffdbdaf2a041154e91947f85ff80b2718f3d17fb612b6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_full_groupby_multilevel_test_groupby_dir.assert_b_c_d_e_not_in_d": {"doc_hash": "398e7936683ac603a43ab635ea1ba0343296ecbc249fd1fd53d2d15da33fc849"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_on_index_test_groupby_on_index.with_dask_config_set_sche.with_pytest_warns_None_.None_4": {"doc_hash": "c518920d637b976782cc72f9329ecae5f99af899fcf5738fc703bf93bf3b25eb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_multilevel_getitem_test_groupby_multilevel_getitem.if_agg_func_mean_.else_.assert_eq_a_b_": {"doc_hash": "b93f75763e86c28e696954435fcffca865b4018e66820fb549876eda130a96ea"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_multilevel_agg_test_groupby_multilevel_agg.None_2": {"doc_hash": "809498d84c88debd6bb8921fc8017b530fe8ed9997baa0adfdc84ac4c7b980b3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_get_group_test_groupby_get_group.for_ddkey_pdkey_in_b_.None_3": {"doc_hash": "9e870cf3082732be652ab5e1fd89d1dfe24e283151f59eb02190c5827764f1ba"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_dataframe_groupby_nunique_test_series_groupby_propagates_names.assert_eq_result_expecte": {"doc_hash": "89e2d705d7bbe6cafb4e793a51386f4e8bff77877036245bb6f7bbc11ab230d3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_series_groupby_test_series_groupby.for_dg_pdg_in_dask_gro.assert_eq_dg_prod_pdg_": {"doc_hash": "64878d2a15f68043acbfe1bf5653a0e09a070d7b699456c5cef755c47a73a4d6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_series_groupby_errors_test_series_groupby_errors.None_4._dask_should_raise_the_s": {"doc_hash": "2ca7e97f65d5939e36e260f2115726095e780efea703de6b44f6b66e4068d926"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_index_array_test_groupby_set_index.pytest_raises_TypeError_": {"doc_hash": "a5b51c917ba30ef27a296df3e6b4caa9464fa030eb2d60771cccb2786604ea0d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_split_apply_combine_on_series_test_split_apply_combine_on_series.for_ddkey_pdkey_in_b_.None_1.None_1": {"doc_hash": "19b67ac5040e0194acc4eda3936a7c6439b943bf077666aef026b06eecdc6912"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_split_apply_combine_on_series.for_ddkey_pdkey_in_ddf_test_split_apply_combine_on_series.for_ddkey_pdkey_in_ddf.for_ddof_in_ddofs_.None_1": {"doc_hash": "138d413191b547f3b7c0ce846aaf882c0eb3ebcd993c7ea44d8786fd8bc57e58"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_split_apply_combine_on_series.for_i_in_0_4_7__test_split_apply_combine_on_series.for_i_in_0_4_7_.for_ddof_in_ddofs_.assert_eq_": {"doc_hash": "7b023559c1a74dd106db2621796eaacac4cf5d69412ed572c0d93705606cdd5d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_split_apply_combine_on_series.for_ddkey_pdkey_in__test_split_apply_combine_on_series.None_7": {"doc_hash": "04c08ba996147d342f097e2ceb800adf9fc921c17b61754d6c86da028ef95240"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_split_apply_combine_on_series.None_8_test_split_apply_combine_on_series.None_26": {"doc_hash": "36d5db3db9acf474316dbb7c2b28b6225b10656bc85ac7f8dd40a2037fa8075f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_reduction_split_test_groupby_reduction_split.assert_call_ddf_a_groupby": {"doc_hash": "d5b267c49a5bd2b3b673a767bf1348d0caa48c46540edfce41a7f0d8106c4857"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_apply_or_transform_shuffle_test_apply_or_transform_shuffle.with_pytest_warns_UserWar.assert_eq_func_grouped_pd": {"doc_hash": "f660623f18c290b60b1bfe94b62e4f1c0910c2586e53168e917d7222bc6f6544"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_apply_or_transform_shuffle_multilevel_test_apply_or_transform_shuffle_multilevel.with_pytest_warns_UserWar.None_2": {"doc_hash": "e5e5dbcb3b6460a36011a62c902685cbf1dc00459ffb2fa6190dd5d3ccc2ce4b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_numeric_column_names_test_numeric_column_names.assert_eq_": {"doc_hash": "4eaa1d950490c13d2a5b895caf25d9357e3bf5a917835f450fd2f2ee0dd0b06a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_apply_tasks_test_groupby_apply_tasks.with_dask_config_set_shuf.for_ind_in_lambda_x_A_.None_1": {"doc_hash": "1727ec1242320fa581c12bab5f4cf0f5c14ea32acb3d5df24e51a18b7c610c91"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_multiprocessing_test_groupby_multiprocessing.with_dask_config_set_sche.assert_eq_": {"doc_hash": "659a8f471dcc1dc2fb3752f7fe5c2ba4ab2bda17a25df9653b3d4204fe2a2138"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_normalize_index_test_groupby_normalize_index.None_5": {"doc_hash": "a44eaa15c20d5778ecec1279e39a1d8dec812d7e4821127475d6cfe3310ed56a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_aggregate__examples_test_aggregate__examples.if_not_PANDAS_GT_100_.with_pytest_warns_None_.assert_eq_": {"doc_hash": "ef8d89f5bb2f8bd6843ccdc6b4b98ecff230595a3bb9aa618d0e93d78166fc4d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_series_aggregate__examples_test_series_aggregate__examples.if_not_PANDAS_GT_100_.with_pytest_warns_None_.assert_eq_": {"doc_hash": "13207c2c472f7e426cb18960e75a7fcb700c0d363195516f1d5aa8f722b67778"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_aggregate__single_element_groups_test_aggregate__single_element_groups.assert_eq_expected_ddf_g": {"doc_hash": "31f23211e94bcedefb5b7c0737076594b981210b9e42e44877b129a0d18d645d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_aggregate_build_agg_args__reuse_of_intermediates_test_aggregate_build_agg_args__reuse_of_intermediates.assert_len_with_mean_fina": {"doc_hash": "dceedbd9ffb9ac84389a6e8ff0c12d4a081a88d6ce46e27ef2ffbb7cf5f8e365"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_aggregate__dask_test_aggregate__dask.for_spec_in_specs_.for_other_spec_in_specs_.None_1": {"doc_hash": "59ec12beb903b1b9c60ecacc59f1a6deb4c3e4fcdd888f16baf30ed3e50fa278"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_dataframe_aggregations_multilevel_test_dataframe_aggregations_multilevel.if_agg_func_nunique_.if_agg_func_in_cov_c.else_.assert_eq_": {"doc_hash": "635610e1b3de9967c7dba73a009d6360f044a80c7a47686dc82ea8bb084262ed"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_series_aggregations_multilevel_test_series_aggregations_multilevel.assert_eq_": {"doc_hash": "a3215c790401c5cb199cfc9328ed268ecea23b934bdda1be6dd20ed1df1a0f5f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_meta_content_test_groupby_meta_content.None_1": {"doc_hash": "af7963a1aab7deca021905133fcbe21fbac9a59bc192234e271ad5ec6c98fc67"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupy_non_aligned_index_test_groupy_non_aligned_index.None_4.ddf3_groupby_ddf7_a_": {"doc_hash": "663e22fb581fa1b617a86d9d70a82d5318029cbf286355c347f72b54857d806b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupy_series_wrong_grouper_test_groupy_series_wrong_grouper.None_3.s_groupby_s_df_": {"doc_hash": "1fe4c617894b2c3b49d601b0d0fe1414305f4698755554b4cf95c5a788617f8a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_hash_groupby_aggregate_test_hash_groupby_aggregate.assert_eq_result_df_grou": {"doc_hash": "dd76cdc27872c5e1829a653a37b435662b2e780cc217f82574ae1dcd7753e5e6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_split_out_multi_column_groupby_test_split_out_multi_column_groupby.assert_eq_result_expecte": {"doc_hash": "667b04a19c6e41aa65246476d18da3c1a288ec5e553b4be0c178bf6a194f2d45"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_split_out_num_test_groupby_split_out_num.with_pytest_raises_TypeEr.ddf_groupby_A_split_ou": {"doc_hash": "9d0231def38ec1eeae576797c869d297c36499dbb163b0921058771ffa2eaf58"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_not_supported_test_groupby_numeric_column.assert_eq_ddf_groupby_ddf": {"doc_hash": "31319f08f49ec1b9fde82479d35e1d642ebe2d904ad0e8b99425b7a6c9da99b3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_cumulative_test_cumulative.assert_eq_getattr_g_func": {"doc_hash": "0b4ebb31d9075731ce72949e44bb277e888ef137dfb98857bbfdba487680cfda"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_cumulative_axis1_test_cumulative_axis1.assert_eq_": {"doc_hash": "08cea69c9eed6cdbb94d8836717c1bb95139f7bdd2218df54a30e901ce838d28"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_unaligned_index_test_groupby_unaligned_index.for_res_sol_in_good_.assert_eq_res_sol_": {"doc_hash": "ea38c2667034ce94467b2d6de465773c3afc04a76a192181287fbb09e607678e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_string_label_test_groupby_string_label.tm_assert_frame_equal_res": {"doc_hash": "5d92f31eb9c463beca411bcddecd00a514c33972bbeda7582241fd5c93718d92"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_dataframe_cum_caching_test_groupby_dataframe_cum_caching.for_op_in_ops_.assert_res1_a_equals_res1": {"doc_hash": "e3d6eb6b844a56d3eee8e19167c6dfd84b564bbfb6c52ad88e0e740e7e3cbc44"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_series_cum_caching_test_groupby_series_cum_caching.for_op_in_ops_.assert_res1_a_equals_res1": {"doc_hash": "e377b596772bd16e2759d51aa36077c165a7d36a22c62e3b4f0912c6d798fff3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_slice_agg_reduces_test_groupby_agg_grouper_single.assert_eq_result_expecte": {"doc_hash": "68c9e936b361d2f9d8da7f4961251dbfb6f74a9d676772b7df1c862c58cbb8aa"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_agg_grouper_multiple_test_groupby_agg_grouper_multiple.assert_eq_result_expecte": {"doc_hash": "3380a0f8e912ea6fb01c6af96abf213923ec5db0deade244d51317287aba6101"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_column_and_index_agg_funcs_test_groupby_column_and_index_agg_funcs.None_5.assert_eq_expected_resul": {"doc_hash": "36d8ea4e76312c63ea1b7a9024136e7dba0222550070d16711cc212ea0056071"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_column_and_index_apply_test_groupby_column_and_index_apply.with_warnings_catch_warni.None_3": {"doc_hash": "4ff5b9026f2c5b9ba20def009751712d13876992e14f0a812be3f7d32079d5cd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_custom_mean_test_dataframe_groupby_agg_custom_sum.assert_eq_result_expecte": {"doc_hash": "41529c50237a3d5c52895a7f820ab44f48be5f2a9f48dee631b943ffe55bd22b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_series_groupby_agg_custom_mean_test_series_groupby_agg_custom_mean.assert_eq_result_expecte": {"doc_hash": "eb575caf00ab507373e03ce643ae359540fc9f7b82353abf60842b0396307678"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_agg_custom__name_clash_with_internal_same_column_test_groupby_agg_custom__name_clash_with_internal_same_column.with_pytest_raises_ValueE.a_groupby_g_aggregate_": {"doc_hash": "dc1117d107f6cdd497bcb3c2dce67bc8eadbde655d1770d3248137140f218d67"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_agg_custom__name_clash_with_internal_different_column_test_groupby_agg_custom__name_clash_with_internal_different_column.assert_eq_result_expecte": {"doc_hash": "0b41d759feb27ba3975884f21f0f70ba34dfc6638dbfb6576377cd3d88769609"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_agg_custom__mode_test_groupby_agg_custom__mode.assert_eq_actual_expecte": {"doc_hash": "1033208e755c1b7a471be8a29f4902b6853937e37cdbe38cf32e2d6e507e5098"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_select_column_agg_test_groupby_select_column_agg.assert_eq_actual_expecte": {"doc_hash": "720546784cf0c1fe026e34c822b88cad4a5b35fde440d2b9b280a6ada89c9c73"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_std_object_dtype_test_std_object_dtype.assert_eq_func_df_func_": {"doc_hash": "c28e8d03461026edcc1755a5b4dd52a70b8f2d2b6c0f74e7a132bcc3001bdf45"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_std_columns_int_test_timeseries.assert_eq_df_groupby_nam": {"doc_hash": "8a970303d785291e15d7e15a7f39b21bf507e8b0190728e343f7d507f8ecba4d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_with_min_count_test_with_min_count.for_df_ddf_in_zip_dfs_d.None_1": {"doc_hash": "7efaa1f8221ee4a2fa950f6efa6311a0fdb397824c284e1d65cf1dd22cc6b65b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_group_keys_test_groupby_group_keys.None_1": {"doc_hash": "8d41877d484a5d6edaa04f09b175d15d10f27f31892b9d1e184b48b1fc2e4a96"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_cov_test_groupby_cov.if_isinstance_columns_np.else_.assert_eq_expected_resul": {"doc_hash": "e96b1e0103d790cf73a1ce9864ecde403891377e4eac1b0b541e1ef7888f3b43"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_df_groupby_idxmin_test_df_groupby_idxmin.assert_eq_expected_resul": {"doc_hash": "6ad0f11a5851aef1d06c59bb4b253ea0af892851e3fcc6b26ddcc3cf0181b8d4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_df_groupby_idxmin_skipna_test_df_groupby_idxmin_skipna.assert_eq_result_pd_resu": {"doc_hash": "40591b5e173e96b362dc2fa7a1c9fed696cfe29605627fd43319048bd0207949"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_df_groupby_idxmax_test_df_groupby_idxmax.assert_eq_expected_resul": {"doc_hash": "2cbedaf61109ef2207ad0699c259edc74d3991d87a1d19159e44e5b29441e2c7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_df_groupby_idxmax_skipna_test_df_groupby_idxmax_skipna.assert_eq_result_pd_resu": {"doc_hash": "ee9cfc198f0de2baa3c8feee2c09f033682979485e4b5d5ff80829df4ab2c939"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_series_groupby_idxmin_test_series_groupby_idxmin.assert_eq_expected_resul": {"doc_hash": "3bdf85163af14c18f9c0a58e911d52bb90a050f41716aa879f5ff12edcd2dbaf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_series_groupby_idxmin_skipna_test_series_groupby_idxmin_skipna.assert_eq_result_pd_resu": {"doc_hash": "0d211391d76fbb1cae447cc6e79a6036f5ed77c15ee8b6a77dc0ddfaeff59ba4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_series_groupby_idxmax_test_series_groupby_idxmax.assert_eq_expected_resul": {"doc_hash": "b28b9d7cab6051915d30c16eae18b566d7619be13029167350fc8b75d1a1bfc4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_series_groupby_idxmax_skipna_test_series_groupby_idxmax_skipna.assert_eq_result_pd_resu": {"doc_hash": "93f4211fee82dd74f3de292ad995fce3d50f52b9bcac0a534748bc28ab65c361"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_unique_test_groupby_value_counts.assert_eq_dd_gb_pd_gb_": {"doc_hash": "34590894164b0b17af67544fce8a8b6ec796f68cc66bf73e6666cd1b838a310f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_transform_funcs_test_groupby_transform_funcs.with_pytest_warns_UserWar.None_1": {"doc_hash": "e90328b5586c6ab47c51e486de7f602da1c746f399b819447da4d2aa5890e012"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_transform_ufunc_partitioning_test_groupby_transform_ufunc_partitioning.with_pytest_warns_UserWar.None_1": {"doc_hash": "6a93de237c8caba4e69caa8aad5333307f3212c7de85e5743a6511ab8d3ee9b2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_aggregate_categoricals_test_groupby_aggregate_categoricals.None_1": {"doc_hash": "29e254bc6d8891860ce0059c48e57b63f41f98505101df31a91e0184450dedce"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_dropna_pandas_test_groupby_dropna_pandas.assert_eq_dask_result_pd": {"doc_hash": "feb2c1ff0f4e3ebed56e04f7ed8811c4a6e04e1d0ae046e7e59bec137de318ec"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_dropna_cudf_test_groupby_dropna_cudf.assert_eq_dask_result_cu": {"doc_hash": "e010dc9cfe830164e161f1c3127128b4b3ea31076ace6d1299f08330b7a606ea"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_rounding_negative_var_test_groupby_split_out_multiindex.assert_eq_ddf_result_ddf": {"doc_hash": "acaede15f5a58619e3ed5ec312810f24592a4de633f0370d0557645f361257c1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_large_ints_exception_test_groupby_large_ints_exception.assert_eq_": {"doc_hash": "d9a97a53a3c42ac71d8192df701f2eaea449ae0c1f2b45e50aa30143a953439f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_sort_argument_test_groupby_sort_argument.if_agg_mean_.else_.assert_eq_result_3_resul": {"doc_hash": "1b0c7a40778856c7feb3c6c047affc319748d9729feff4096ccd4fc37c216e5a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_sort_argument_agg_test_groupby_sort_argument_agg.if_sort_.assert_eq_result_index_r": {"doc_hash": "22b0d92aac7679c2589e167f5509e638dbec23799136e2e42afefaeadd96ce24"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_sort_true_split_out_": {"doc_hash": "cb7c28eda3c9010c50c256e65e2f9621ab1d5545bebffa789e67d997059a5792"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_hashing.py_np_test_hash_pandas_object.if_isinstance_a_np_ndarr.else_.assert_eq_a_b_": {"doc_hash": "de9ea04e19d701460efbf4245a4605a7e2fe8200f8582b52f74199e0499c98a2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_hashing.py_test_categorical_consistency_test_categorical_consistency.for_s1_in_.for_categorize_in_True_.None_1": {"doc_hash": "73b13ba4d010aec72ea654e90834e0536cf733320793ed6b59a26333e3cb4946"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_hashing.py_test_object_missing_values_": {"doc_hash": "8f3b15d511eafc38afd205c20083de4ddadd8ae39abcc794b9cf42835621603a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_hyperloglog.py_dd_test_basic.assert_abs_approx_exact": {"doc_hash": "bea2d442c50d2489a2a46e25ace2e849055379d2aafe099d033da98eefc0a073"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_hyperloglog.py_test_split_every_": {"doc_hash": "25ecfde19f2cf1e1d6d7d1ee6107bfb7422556165fca16e6ca9c8214b85d6fdc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_pd_if_dd__compat_PANDAS_GT_1.CHECK_FREQ_check_freq_": {"doc_hash": "7485abec7a762533c89fe091efd06018f32a20e6548e627e1b18cea216edd44a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc_test_loc.None_4": {"doc_hash": "e68050560866a6ea0c2523c607cbca347eafda5397a6b96352c64755e6011eef"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc_non_informative_index_test_loc_non_informative_index.assert_eq_ddf_loc_20_df": {"doc_hash": "3b3d84cd601d205d8fa71e7b5da22a763f8c2a5cc32bdf3e93e1cd191685b24e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc_with_text_dates_test_loc_with_text_dates.assert_len_s_loc_2000_01": {"doc_hash": "3637d0d5e3091babbfd3c6abaa30fd4f02e402b8e39be32a4f40a8c4e08f5c0f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc_with_series_test_loc_with_function.assert_eq_d_loc__col_l": {"doc_hash": "e68b5cb43e497473c6b51892fa453aa855c3fa82b82196c2677cd7d81242ee65"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc_with_array_different_partition_test_loc_with_array_different_partition.with_pytest_raises_ValueE.ddf_loc_ddf_A_0_repar": {"doc_hash": "517d484eeea45eb0bf34b8ceaaf20d079beb92b1d5a05eabda5c4f7bfe1e5418"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc_with_series_different_partition_test_loc_with_series_different_partition.assert_eq_": {"doc_hash": "d56cf6212890ab1e70777367f47322bfe560b287b460d31b61544058f6aca737"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc2d_test_loc2d.None_3.d_a_loc_d_a_2_0_3_": {"doc_hash": "d81afdc744098518707f0ec2d84b9cbd77fa2591eb82b6cc17353193140fa17f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc2d_some_missing_test_loc2d_with_known_divisions.assert_eq_": {"doc_hash": "a1447a02ce0a07ee6953791e5fa61db465ba2fdda55bb8b77410f5ddab562de2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc2d_with_unknown_divisions_test_loc2d_with_unknown_divisions.None_3": {"doc_hash": "6ef932474299d0df90c454de7f42ce460a6d14d244bce4c7318292f5e55384ac"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc2d_duplicated_columns_test_loc2d_duplicated_columns.None_13": {"doc_hash": "ee6f0c35ee88ff1ff75494e78186559cfc86480b039bb764d6071f4fc6e635c3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_getitem_test_getitem.None_13": {"doc_hash": "e3078e7b03cc902150430632f8226c58baaf9a96a13978ccfc880005ce385829"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_getitem_slice_test_getitem_slice.assert_eq_ddf_f_df_": {"doc_hash": "5a1b46d91b4d043006908c26d45ad073c33ddeb156265395218983d08ca86c5f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_getitem_integer_slice_test_getitem_integer_slice.assert_eq_ddf_8_df_8_": {"doc_hash": "f5ebf4f355788e9af5b5a86b046af9090b7cc2ccf1fe251819c620fbd6562b41"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc_on_numpy_datetimes_test_loc_on_pandas_datetimes.assert_eq_a_loc_2014_2": {"doc_hash": "8d07a78b9208ede7a6fbe3c563d27dc4e1dbd8dce41eb95ebdb7a26916011470"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc_datetime_no_freq_test_coerce_loc_index.for_t_in_pd_Timestamp_n.assert_isinstance__coerce": {"doc_hash": "1ddd663e4babbff915fe1c59365a7d3ee38100bc380548fb5ef63274f9ac68c1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc_timestamp_str_test_loc_timestamp_str.None_15": {"doc_hash": "4077426eb62dfe39b37dca71d37675a1d1aabe9933856106365d7a882b44d8ce"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_getitem_timestamp_str_test_loc_period_str.pass": {"doc_hash": "6b0002d4ec54f1de1e9a81b4c9cec01f4117978537252d1ad3fcc0159618c0ef"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_getitem_period_str_test_getitem_period_str.assert_eq_df_2011_2015": {"doc_hash": "2a0609801c1fbaf17602c0a1457738408f6c48f04e65abe8bf423f4529bfca01"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_to_series_test_to_series.None_1": {"doc_hash": "086bef35ebecd11cc9bb7afe0234b248d3be4e616fd4f3e507908a301ca4e294"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_to_frame_test_to_frame.None_1": {"doc_hash": "3a71bc5e0ecb741f913d9ace7efb56cdc074d18d28e62c05a160590fb489310a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_to_frame_name_test_to_frame_name.None_1": {"doc_hash": "173745f77743dfcb2ab34ac1663d8874ebd575cf2d6237140d12dd67eb3fa3a2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_iloc_test_iloc_series.with_pytest_raises_Attrib.ds_iloc_": {"doc_hash": "5de2d4e568d357dc74753f2174b1a52a274734cf7759dfd93c88231849c8e540"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_iloc_raises_test_iloc_raises.with_pytest_raises_IndexE.ddf_iloc_5_6_": {"doc_hash": "25383d0be50a8c24d36b315ae9275bf9c66605e7570e2de436459afdfce3a233"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_iloc_duplicate_columns_test_iloc_duplicate_columns.assert_eq_select_negative": {"doc_hash": "0c06b6da6d2ffe6a76742fc7570582f4f0196927c1ba7664ca43b8514819718d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_iloc_dispatch_to_getitem_test_iloc_dispatch_to_getitem.assert_eq_select_negative": {"doc_hash": "a0f1ebf7a5ea6dff7b4c72e9a6dd7056991482d678f3f0e19c802d1527eb9e82"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_iloc_out_of_order_selection_": {"doc_hash": "fb0e2add33d2a8c1be5112f7fe45aac114540d39ee605aa976f8f401e33f3a57"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_merge_column_and_index.py_dd_df_left.return.pd_DataFrame_dict_idx_idx": {"doc_hash": "bd699314b453d8344264870aa91d435d1cb66c35ffedb256f8d869a7296f6866"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_merge_column_and_index.py_df_right_df_right.return.pd_DataFrame_dict_idx_idx": {"doc_hash": "7f068968f9baa31e668e8d8573d6a6015bce1f359c6c7ba79548f5659a3f3b26"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_merge_column_and_index.py_ddf_left_on.return.request_param": {"doc_hash": "ab50cbc15a8e15cfaa6708a3c457f99460ef2454363bc2eaff74662330b34654"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_merge_column_and_index.py__Tests_test_merge_known_to_known.assert_len_result___dask_": {"doc_hash": "fe86340cdffc73b6b04978cdd8e88a54400d26b91345c95b3bfe556f54b7f7bf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_merge_column_and_index.py_test_merge_known_to_single_test_merge_known_to_single.assert_len_result___dask_": {"doc_hash": "b7023fbd0852e4c4c96357a3cdca857af9d0aa2ce83cfeaebab723f784b8b319"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_merge_column_and_index.py_test_merge_single_to_known_test_merge_single_to_known.assert_len_result___dask_": {"doc_hash": "90385d64364140b2b3647a7c09a19c15517f7214dcf1ba5ff96fd198dec62316"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_merge_column_and_index.py_test_merge_known_to_unknown_test_merge_known_to_unknown.assert_len_result___dask_": {"doc_hash": "e3a4a474a50017dfbe173ad30cafeb0e4c5cd0375088911cfe52ba8edd168d26"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_merge_column_and_index.py_test_merge_unknown_to_known_test_merge_unknown_to_known.assert_len_result___dask_": {"doc_hash": "d5aa58f3aea3ddf9540bc191800fa808ab24419106dbcdcfdd9a2ba8ce6615aa"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_merge_column_and_index.py_test_merge_unknown_to_unknown_": {"doc_hash": "2f648dbeadbb0df2029ef30e647aad51a3167a2843d0f1a97c5ce2cb4f406e35"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_warnings_pytest": {"doc_hash": "9c0167a0549c49793d2473b34c3c3798aa27c9e4857930311bff46adcac0a310"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_align_partitions_test_align_partitions._different_index": {"doc_hash": "b490752b68f19efc81fa2f0fbc13869e0a84f5c5b3731ee2ecde877ffebc8561"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_align_partitions.ldf_7_test_align_partitions.None_1.assert_eq_rresult_rdf_": {"doc_hash": "0412c074a8b9e6631298890265643cd4f1c972bf219ba9c03257b630722f8499"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_align_partitions_unknown_divisions_test_align_partitions_unknown_divisions.None_1.align_partitions_ddf_ddf": {"doc_hash": "bb84054a5e435665cf6dcfa65840a475525972f06caefa97e52fe0f61e0258ae"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test__maybe_align_partitions_test__maybe_align_partitions.None_1._maybe_align_partitions_": {"doc_hash": "886b23dbbd9c503486dffdb2b51243770c05ffe5266358300592fa5f95bf7b02"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_indexed_dataframe_to_indexed_dataframe_test_merge_indexed_dataframe_to_indexed_dataframe.None_9": {"doc_hash": "e75366e702fa37bf554a4134dba3e862f6f9434489867f1ac8f2071f6269fc07"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_list_eq_list_eq.dd__compat_assert_numpy_a": {"doc_hash": "ffdceb54b41400152405c6e84e4f3c683c397897081a42a00f4b6957e0b5fedd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_hash_join_test_hash_join.None_2": {"doc_hash": "57bbe2dbd63be178d72aebc2f7061abf11f1238d38c1690a1995821afbb2c4dc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_sequential_joins_test_sequential_joins.assert_eq_multi_join_pd_": {"doc_hash": "dedef0d135978f6607a780bc0c339b74949fd62ad7df70af605528d197fe77ee"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_asof_indexed_test_merge_asof_indexed.assert_eq_c_C_": {"doc_hash": "0caa46ec40bc69658c83a584b5609415f03b44b5e6496e058961c67368527e6d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_asof_on_basic_test_merge_asof_on_basic.assert_eq_c_C_": {"doc_hash": "d9b52f8aad39c41020323d56088445d84297e75d7b3413e1393387b434ec2ea3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_asof_on_test_merge_asof_on.assert_eq_c_C_": {"doc_hash": "ca14093514e89d2a6dbaeb46a8cf3b3f5779411941ee05eaf5a45ea9f448408c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_asof_left_on_right_index_test_merge_asof_left_on_right_index.for_nparts_in_1_2_3_.for_a1_idx2_in_.assert_eq_c_C_": {"doc_hash": "c1f7a1bf2c5ba3a6c56ec45bb7e37990fe1d0505bdf53dd17f5157be6aaff18c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_asof_indexed_two_partitions_test_merge_asof_indexed_two_partitions.assert_eq_c_C_": {"doc_hash": "023ce7e0ea32d9ec2a3d0f48a3a4d8daa34fc85e70cece8ff1c0659dc439802e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_asof_on_by_test_merge_asof_on_by.assert_eq_c_C_check_ind": {"doc_hash": "ee6f0d573a3894f2ea30395211b465edc927bfda41fc8b57bf63ad7a945586d9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_asof_on_by_tolerance_test_merge_asof_on_by_tolerance.assert_eq_c_C_check_ind": {"doc_hash": "579af33d63b0dbc3714e7770488b936f704b17319dbcf81a2820ac20b35245be"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_asof_on_by_tolerance_no_exact_matches_test_merge_asof_on_by_tolerance_no_exact_matches.assert_eq_c_C_check_ind": {"doc_hash": "3b3cd6c4165ccbb9aba75180188097813bfb2441d7469bc12a756ef7dca167ba"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_asof_unsorted_raises_test_merge_asof_unsorted_raises.with_pytest_raises_ValueE.result_compute_": {"doc_hash": "327fa78dcce370deff2fe6cd8ead75b570d5366cb080216a12c8412ec39958c3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_indexed_concat_test_indexed_concat.with_warnings_catch_warni.None_1": {"doc_hash": "f8436bf018c56ed5b7797cc762120271b0345e0c13c6464e3d37dcbb2655f489"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat_test_concat.None_1.assert_eq_result_expecte": {"doc_hash": "6d0522b63702c1c2c5737fa7122784d76cce607858e801cf486169199e0dc92d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat_different_dtypes_test_concat_different_dtypes.assert_dask_dtypes_pa": {"doc_hash": "8e97b5d002dac12177f7377ce35a293e460bb67d8fe472e11d2b31f91461f9f3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_columns_dtypes_test_merge_columns_dtypes.assert_has_nans_and_warn": {"doc_hash": "75b3c5abc0f75cedf5720d79894479d1ccb6ce71476d40d7b0e90b8d8495c81c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_test_merge._pd_merge_A_B_": {"doc_hash": "a2087230ce5f36ec4856b4b1ee02caaeaec908e8cb342c358c1e461c474603d8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_tasks_semi_anti_cudf_test_merge_tasks_semi_anti_cudf.assert_eq_result_expect_": {"doc_hash": "b6ab8e417feb6ccb3bc497b387d0eebfbc52dc486025880f766307c42868ceb2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_tasks_passes_through_test_merge_tasks_passes_through.assert_not_any_partd_in": {"doc_hash": "a315e5df72628496ccee4d2d1c6d2d385d6d0af4bbf489d3515a337ad517bec1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_by_index_patterns_test_merge_by_index_patterns.pd_merge.return.out": {"doc_hash": "566de5cc6c3e9dbc48dd980d7897ce21b6e73d370ce6972eca3dbf1ed8765cfd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_by_index_patterns.for_pdl_pdr_in__test_merge_by_index_patterns.for_pdl_pdr_in_.for_lpart_rpart_in_.None_15": {"doc_hash": "ef45e04c06abb31ed42bc6620e946330cc4cb3dbd457c88f2faa35e0c121cb50"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_join_by_index_patterns_test_join_by_index_patterns.pdf7r.pd_DataFrame_c_list_": {"doc_hash": "422fbcb1c7f8b71e416666a36c1e59b10aea9e2c7e7a22521ce86e63548540dc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_join_by_index_patterns.for_pdl_pdr_in__test_join_by_index_patterns.for_pdl_pdr_in_.for_lpart_rpart_in_2_._": {"doc_hash": "1e44cf5e7adf49c29b7a04948f71357bb38798519bdea02aa090164cef2299c2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_by_multiple_columns_test_merge_by_multiple_columns.pdf3r.pd_DataFrame_": {"doc_hash": "7442a429023e846f1b0c02dd1d03702e09a8329921305819f45ed99b745ebcb6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_by_multiple_columns.for_pdl_pdr_in_pdf1l__test_merge_by_multiple_columns.for_pdl_pdr_in_pdf1l_.for_lpart_rpart_in_2_.None_8": {"doc_hash": "ee4d90bb1a007a395adebd92454c5a0377e6a41b47da5777a1c5260e64413c98"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_melt_test_melt.None_9": {"doc_hash": "9f2aba53f761935d63ef37318a56e68e682d4b57357eede12a11e6c024c985e2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_cheap_inner_merge_with_pandas_object_test_cheap_inner_merge_with_pandas_object.list_eq_da_merge_b_on_x": {"doc_hash": "afcd7b03b6d1cddefb5dcfe07afdce17928b404aacf68db479be1f0aab9631cb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_cheap_single_partition_merge_test_cheap_single_partition_merge.list_eq_aa_merge_bb_on_": {"doc_hash": "f08e557771394c8e7beba29e6c298881c82b0c2aea32dacb309ecfb1ff22540b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_cheap_single_partition_merge_divisions_test_cheap_single_partition_merge_divisions.None_1": {"doc_hash": "095c178b5f3b5be49af495de99818a2297fe79f10e5110a1b6a9506046a914f6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_cheap_single_parition_merge_left_right_test_cheap_single_parition_merge_left_right.None_1": {"doc_hash": "141c0c629d5aedb88088386580942f0e92c7a14d6a4a807189286dc9e3ecb11c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_cheap_single_partition_merge_on_index_test_cheap_single_partition_merge_on_index.None_1": {"doc_hash": "b40346ca17e49db75c6ff1109a876b77f20f6ecaf87fd40f6db64fb12c42493f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_maintains_columns_test_merge_maintains_columns.assert_tuple_merged_colum": {"doc_hash": "9a67a32265c3d95304b4fde64cdf4765e7774d1d09a9239b5721ac15ad72cc99"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_index_without_divisions_test_merge_index_without_divisions.assert_eq_result_expecte": {"doc_hash": "e7d4f93555196e4f91bf97f5c604dbad917d0997db7b863124749bacf66e0b0c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_half_indexed_dataframe_avoids_shuffle_test_half_indexed_dataframe_avoids_shuffle.assert_len_cc_dask_500": {"doc_hash": "97334d5dd9c678731afcc5f61d1decc0c3236d267f03712e0fc2d79fca38c916"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_errors_for_merge_on_frame_columns_test_concat_one_series.assert_isinstance_c_dd_D": {"doc_hash": "62b07c8c8fc16d9161eb167726f953bb77d45f0ab1b8a2bcbd20948986b0df71"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat_unknown_divisions_test_concat_unknown_divisions.with_pytest_warns_None_a.assert_len_record_0": {"doc_hash": "d347a5b00915a70bf5cdca2c7f92410054f01d61ce427f3b107541a1e25705ca"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat_unknown_divisions_errors_test_concat_unknown_divisions_errors.with_pytest_raises_ValueE.with_pytest_warns_UserWar.dd_concat_aa_bb_axis_": {"doc_hash": "2aabb716f63c01a4e1e6cfda61ce688d0eb37034c3bb2a6e3b8d33933442cee4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat2_test_concat2.assert_dd_concat_a_is_": {"doc_hash": "0d83e977563732a64a4646a9669bccdd8d00ead16be95cdcae8b77e9e982c150"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat2.for_case_in_cases__test_concat2.for_case_in_cases_.None_5.assert_set_result_dask_": {"doc_hash": "76207abe0191a1f882af26b862f6ef915a1f3a0e1842b60c38c8dc711629bbdb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat3_test_concat3.None_5.assert_eq_": {"doc_hash": "239c88ad03f821dcf59cae96523cf82b3995b16bc0e7f5e46cbd47bad44983c1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat4_interleave_partitions_test_concat4_interleave_partitions.assert_msg_in_str_err_val": {"doc_hash": "f6d6944cb5395d4c080996da46519359d5dfd9dea8faaf2fe47836f77cb52a89"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat5_test_concat5.cases_11._": {"doc_hash": "c8f3e6fe22e41048c8f6473896e34f620c32c4482a85e287712b7f02a2353fa4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat5.None_1_test_concat5.None_1.None_3": {"doc_hash": "62a830d75435dfda6d2389a18c349488c6a40fe892b5e0144a68d47dbdfb2fc8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat_categorical_test_concat_categorical.if_not_known_.dframes[0]._meta.clear_known_categories_df": {"doc_hash": "899c3a37ad52fa46e60b88c40237c7dc145428f8d35f0dcd5f37a8a864fa68d2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat_categorical.check_and_return_test_concat_categorical.check_and_return.return.res": {"doc_hash": "a713d3d167fde1de65fb9a0f3b5dda047e1a2329a28ed80f430b25aa7390bf91"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat_categorical.for_join_in_inner_ou_test_concat_categorical.for_join_in_inner_ou.None_4": {"doc_hash": "47bfa3ffbadc08ad6ba104781cbe3776c091b62f0a8b5f54e3b3e155eaf823b1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat_categorical_mixed_simple_test_concat_categorical_mixed_simple.assert_eq_result_expecte": {"doc_hash": "58e7b18775dd2d3a4efb55c311f8b398a3dc5dcb18e649a90c3c23bd69a82fad"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat_datetimeindex_test_concat_datetimeindex.assert_eq_result_expecte": {"doc_hash": "2ebdf9aa614fbdcb8de0ed94636252bff299583399b7b671f0906a836f10cd4d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_append_test_append.None_8": {"doc_hash": "365166169b4bfa8a04de2bc3988325d691c796b20bd34e764d9c8cea4e5b6eff"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_append2_test_append2.None_7": {"doc_hash": "a367c0f9b4bae34e2c8a9b3f874224400a8fc38e4c1c1c2790ad17074f27b07b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_append_categorical_test_append_categorical.for_known_in_True_False.None_5": {"doc_hash": "7894885ea2d19c202a00431fe6bae41c302d8bf492e7a1083c458c0a685f62a9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_append_lose_divisions_test_repartition_repeated_divisions.assert_eq_ddf2_df_set_in": {"doc_hash": "0dea3f62af8d2f02ef63dc85860b76c0d7eb2658decf750bf2a47a58ee310603"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_multi_duplicate_divisions_test_multi_duplicate_divisions.assert_eq_r1_r2_": {"doc_hash": "42fd93a1215d94370968c5bc45e52d6c8594858bf4a9ce229bbfeb6d9879a151"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_outer_empty_test_merge_outer_empty.for_x_in_range_0_k_clust.assert_eq_": {"doc_hash": "5120b072f17db994b5b9ce6a87ddf3c779f2350c467e69dbfde2400af323c5dc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_dtype_equality_warning_test_dtype_equality_warning.assert_len_r_0": {"doc_hash": "9639bb547643e8c66c58169e4b2e700895f582d820b7c26d81b144598e695846"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_numeric.py_pytest_": {"doc_hash": "0f1c8670f065c90f4c8492811f23b9638d27624ef3918246ab3efcceed341d8c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_optimize_dataframe.py_dask_dfs.list_dsk_values_": {"doc_hash": "3e66af7e4aa716070a1b2d63d618927c6178de0fcf09756f6bd8bccbd8b8fbdc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_optimize_dataframe.py_test_fuse_ave_width_": {"doc_hash": "fbce4f4df15b911629334590d05abe6649d37c5a1e507c7cc9caef5d66cae24f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_np_test_get_dummies.tm_assert_index_equal_res": {"doc_hash": "1923d632bd34dd5f708f846610c99e3e6549d02ba39c29f7896719b329ff1ffd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_test_get_dummies_object_test_get_dummies_object.None_2.dd_get_dummies_ddf_colum": {"doc_hash": "169a38ad553299c64a4b040be5f17e287065b09a3187a94c27f763457beff852"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_test_get_dummies_kwargs_test_get_dummies_kwargs.None_7": {"doc_hash": "9dc61fd582f4bbb4f492cd797cf66574454a1690ea7779869ffc2f2a5e0b5992"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_test_get_dummies_sparse_test_get_dummies_sparse.None_2": {"doc_hash": "645244b77596205b36b469823a76c8ab92b96ba9ae42ecd6bc9e1c05e034cdf3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_test_get_dummies_sparse_mix_test_get_dummies_sparse_mix.assert_pd_api_types_is_sp": {"doc_hash": "7ece4f8ac0b2d0b463192773990e4ff10ac80f391124a107b03c09c311fe72a5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_test_get_dummies_dtype_test_get_dummies_dtype.None_1": {"doc_hash": "15678f1d22844501cecc10843fba622d53e42a47210827fbb62ebaa839f5c9fb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_test_get_dummies_errors_test_get_dummies_errors.None_3.dd_get_dummies_ddf_x_": {"doc_hash": "ddd2d7ba99d632752d2f840e3c522bf28056cf5ab0990a1a16430458f2d39ec7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_test_pivot_table_test_pivot_table.None_1": {"doc_hash": "b440dddbfe0668b6032f282610fc2a0046b9b6a095a167509ccd5372c8e73fe9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_test_pivot_table_dtype_test_pivot_table_dtype.assert_eq_res_exp_": {"doc_hash": "d05815d671ac1df6786d170f10cbc853d1f324930349add522b8f521fc3116dc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_test_pivot_table_index_dtype_test_pivot_table_index_dtype.assert_res_index_dtype_": {"doc_hash": "aeb8ef76490505ecc04145e04a84765ca00b8d0d67b90bd95896f69ac03a5967"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_test_pivot_table_errors_": {"doc_hash": "57de7b7df747ed4f3d55dad412eebb0a75c6979c20b9f63a93963888832e2d3f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_from_distutils_version_im_shifted_sum.return.df_a_b_c": {"doc_hash": "cae42371d5dd10ba7a941385e0f9df44a34f1b2fbf02cfdf01b63006424a9de7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_map_overlap_test_map_overlap.for_before_after_in_0_.None_1": {"doc_hash": "238610693aa2146ec9161c8111adb1c7c41fc76a0f06224dee9e2352f67a8e2d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_map_overlap_names_test_map_overlap_names.assert_res4__name_res_": {"doc_hash": "1e30c120e101ebc911a343ff67ba87da4369528ff4eeb899c8061824cd31f3d0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_map_overlap_errors_test_map_overlap_errors.with_pytest_raises_TypeEr.ddf_map_overlap_shifted_s": {"doc_hash": "9a7d7bfae14eb33e78af5d1e4b08b3b140d38f0f153eb2d2d67466f2a10df421"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_map_overlap_provide_meta_test_map_overlap_provide_meta.assert_eq_res_sol_": {"doc_hash": "662a35b8588bc2549bf8da58efd62e6cc703fc53634761c94288e22d522e2b53"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_mad_rolling_method_args_check_less_precise._": {"doc_hash": "48bcb8b6341a571839a9c07b7a9f2f366f8c1d723dda7522b3826f4e510cca9c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_rolling_methods_test_rolling_methods.None_1": {"doc_hash": "ab71070aeffbe764cd2409ae180151bd1a7bdf88c36c407c8477f20c88261746"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_if_PANDAS_VERSION_0_2_test_rolling_cov.None_1": {"doc_hash": "67e798c25e210d7729149e38912fb9b2183e1abfdce33cf30086ac97ae17acc6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_rolling_raises_test_rolling_names.assert_sorted_a_rolling_2": {"doc_hash": "055afcaac0963a09bf86c65c26dfa72bf6558d2ac841ac692cf17a115cc4012d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_rolling_axis_test_rolling_axis.assert_eq_s_rolling_5_ax": {"doc_hash": "782776aa96a8f854257b7153fb9ca7907b4e9ae6c8b214f9367a333d52b35962"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_rolling_partition_size_test_rolling_partition_size.for_obj_dobj_in_df_dd.with_pytest_raises_NotImp.dobj_rolling_12_mean_c": {"doc_hash": "e1a579d80ac5b157bf149592ef9a9b2bef3307f0d1b8db491ec38a08628b72a0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_rolling_repr_test_time_rolling_constructor.assert_result__min_period": {"doc_hash": "7f354c0753621528699db9864671babb6bbd599826b9122ebbe0ca4ca3068688"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_time_rolling_methods_test_time_rolling_methods.None_1": {"doc_hash": "0596526146baa44ac6f6c0f66a40efbca533aaebd26231f2eaa7e4c5fb9dc42c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_time_rolling_cov_test_time_rolling_cov.None_1": {"doc_hash": "37b301862086dcca6a86541a9d1890e8a58f2d7ef899dc47b050e2e3a5300b6c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_time_rolling_large_window_fixed_chunks_test_time_rolling_large_window_fixed_chunks.None_2": {"doc_hash": "55fbd0f4e1c275d36f5b58b133b0b3c3fc24a5b2863daad76873c53ed7611ac6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_time_rolling_large_window_variable_chunks_test_time_rolling.assert_eq_result_expecte": {"doc_hash": "feabd8c2c4cf3cdab6a441d0ac610214e69e0f826b7a2541404229bdfa6518ee"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_rolling_agg_aggregate_test_rolling_agg_aggregate.None_4": {"doc_hash": "938459439fa07eb10c7e97f8eff6003affeec08150d99697b7cad118cc5c34f6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_rolling_numba_engine_test_rolling_numba_engine.assert_eq_": {"doc_hash": "227e6811def3d4b9ed55ddf91927388775650a4453ca8ee9d2ae6777b96f3b15"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_rolling_apply_numba_raises_": {"doc_hash": "abaab1ff4ef6532371d3ed10c53e54b4860fa408f80dc411516e0d4543e25a38"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_itertools_shuffle_func.shuffle": {"doc_hash": "fdb0425bac5ef436dda7ae30c985ca267931e30018d793137a3aa1bb86388d46"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py__conflicts_with_keyword__test_shuffle.assert_shuffle_func_d_d_": {"doc_hash": "3e434e2af9ceea15fca6bc0d5037ed08bae82e7e40d6de6c789270e8e8b1b679"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_default_partitions_test_shuffle_npartitions_task.assert_set_map_tuple_sc_": {"doc_hash": "5f3b7e84d778180f6fbf5ddb466b0803a455b76b8805f2aa03eeccc1a0ffe565"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_index_with_non_series_test_shuffle_from_one_partition_to_one_other.for_i_in_1_2_.assert_len_a_compute_sche": {"doc_hash": "08c5311a7010849d57ebb09d281b5d22d8246e28c04dc852dbd8867415469f01"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_shuffle_empty_partitions_test_shuffle_empty_partitions.for_p_in_parts_.assert_s_columns_p_col": {"doc_hash": "c416cc002515f2deae996ababb81f0b5df6384123a7f93bb1ba954f1c626d47b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_df2_df2.pd_DataFrame_": {"doc_hash": "f2d31838ea0565f0ef352b4a5fa9d7dc64eccdab0aaf647036b4f61d27eed428"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_partitioning_index_test_partitioning_index.None_7": {"doc_hash": "95e01189dbf1c179d30e09f03b540b9f69b9230ce5f786c3ede32d6d576c824b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_partitioning_index_categorical_on_values_test_partitioning_index_categorical_on_values.None_1": {"doc_hash": "b5739c62acf6dd4f61298e7620fb480855ec6bcc71222634a1d88808ce1823ff"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_tasks_test_set_index_tasks.None_5": {"doc_hash": "a65bbe2a77820d20df068225e41b25393316c9cd849d549696182947cb2a5de5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_self_index_test_set_index_self_index.assert_eq_b_df_set_index": {"doc_hash": "777504922ab4ba2dd96c43833ad12b48b2e0d35db97c176199e10aaf5e9d6573"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_names_test_set_index_names.None_3": {"doc_hash": "e31ac534ae129dd94ae9830d6972fbf0e06d8345f20c1b35f3acf272f4acdc37"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_tasks_2_test_set_index_tasks_2.df2_value_sum_compute_s": {"doc_hash": "7794f934a874f2dd29c835bf456310fdfdbad228679b38ab089904a732a44c8b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_tasks_3_test_set_index_tasks_3.assert_ddf2_npartitions_": {"doc_hash": "9e2c6a44ed3bff3da2dd9e42b6be3dad4bab5706836460f6e2152f85173fd694"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_shuffle_sort_test_shuffle_sort.assert_eq_ddf2_loc_2_3_": {"doc_hash": "b9cb7ec4744a4c38d3ea44181c1afb11dd5123d88f712baec7a3fbe42473dfdd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_rearrange_test_rearrange.for_i_in_a__partitions_dr.assert_sum_i_in_set_part_": {"doc_hash": "4ac82aaf241783f514756309ed7d9a59d4e1f81103a5194c271ddaeec9aff1c3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_rearrange_cleanup_mock_shuffle_group_3.raise_ValueError_Mock_ex": {"doc_hash": "819f44a25fb7f2e346cd710ea57a7a465c5e640e4e332a6ee41e2d42bcb97895"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_rearrange_disk_cleanup_with_exception_test_rearrange_disk_cleanup_with_exception.assert_len_os_listdir_tmp": {"doc_hash": "a445c75b72107ed42d9314df8bde6ebe657798005dd52d40442c321e95de7c7b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_rearrange_by_column_with_narrow_divisions_test_maybe_buffered_partd.assert_isinstance_p2_part": {"doc_hash": "c94b57c4aa1392ba1708bc4875e653d12f6c50042cc44544f7b57a12889f65ec"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_with_explicit_divisions_test_set_index_with_explicit_divisions.with_pytest_raises_ValueE.ddf_set_index_x_divisi": {"doc_hash": "bc95fcf5926e10990a102d01a364f2d1b5c7904a5e689c6b6de973df0cf59b67"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_divisions_2_test_set_index_divisions_2.assert_list_result_comput": {"doc_hash": "58fb9dac426ff7e0fc88b7b0f007216c0d9dc406ace607cd5eaad0a43faeb4b5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_divisions_compute_test_set_index_divisions_compute.assert_len_d4_dask_len": {"doc_hash": "2f7410c575d49270308029e55d3dbc6d9bdedf825900ffec1d6d8e65392615d5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_divisions_sorted_test_set_index_divisions_sorted.None_3.ddf_set_index_y_divisi": {"doc_hash": "2a9a0548e7dad60210c89a9f52609a9c48faf08419f14f22424218c3e17dba31"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_consistent_divisions_test_set_index_consistent_divisions.assert_len_divisions_set_": {"doc_hash": "30f6568f1f43c83c30cc84ecc8ba42b762c307656e9e566f0b214c9003b79f4b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py__set_index_make_part.return.pd_DataFrame_x_np_ran": {"doc_hash": "c741a35d2f75e79288bab70a5727506623d20320204d3232000c0fc3f1ed4ac8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_reduces_partitions_large_test_set_index_reduces_partitions_large.assert_1_ddf2_npartitio": {"doc_hash": "7993117a595a9edb95a232e3c4f206161a0c7446e6b8d2cfbe0c46f97df8cb5e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_doesnt_increase_partitions_test_set_index_detects_sorted_data.assert_len_ddf2_dask_d": {"doc_hash": "ca37b4e1d66c1464e3733a56f78ccd7fc28c7521e5b7e4599b13879968338de3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_sorts_test_set_index_sorts.assert_ddf_set_index_tim": {"doc_hash": "88d7a7c76691e7fa60671f89d43ef3b9c3f0c39d62709f9ef67e02dd149cb4cc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_test_set_index.assert_eq_d5_full_set_in": {"doc_hash": "f4021bf7a5e71e251fb9e057e8a85545db3b9f79c8bb81110431d45ae46d8927"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_interpolate_test_set_index_interpolate_int.assert_all_np_issubdtype_": {"doc_hash": "e179456ec8610b040e8c3d42811f1f2808822c897f9fa888c05bc5c3df7149d3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_timezone_test_set_index_timezone.with_pytest_raises_TypeEr.d2_divisions_0_s2badt": {"doc_hash": "3344819f30235ae06172a11263476ca0348144d5deaff81e60565cbab7c27fa6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_drop_test_set_index_drop.assert_eq_ddf_set_index_2": {"doc_hash": "4c4bc8ec37fd59351f43fb558f737bfadc33b5e1d4da447ad15df9fc330bc4a6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_raises_error_on_bad_input_test_set_index_raises_error_on_bad_input.None_2": {"doc_hash": "a3a057cb02d2185f10adb90cfb7b31fdf3d65c8cd1c6a8f7a2aa59faa2b1d32d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_sorted_true_test_set_index_sorted_true.with_pytest_raises_ValueE.a_set_index_a_z_sorted_T": {"doc_hash": "079307848146ea2fd222164e142bb4f3c1998335c4ae31bdfa24cb8f87af8f73"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_sorted_single_partition_test_set_index_sorted_min_max_same.assert_df2_divisions_": {"doc_hash": "f366a5626c5a8695c6614761d857541c1dba5b97b071b2e94494ad86b53052e7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_empty_partition_test_set_index_empty_partition.for_conv_in_converters_.assert_assert_eq_ddf_set_": {"doc_hash": "83960d50e19132a6fb3cd5e2aa8fb5eec1f49023a2de8fed80ff7d2029218350"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_on_empty_test_set_index_on_empty.for_converter_in_converte.None_2": {"doc_hash": "89209b944e83cd5107caaddb6fdf1c0404405be2b230fcf94d7d2ee0c357ed4f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_categorical_test_set_index_categorical.assert_categorical_equal_": {"doc_hash": "9bf2ce13e3e5d3456af2dfc235e2f53b38457d65fb4706c8e61b4ef3621b3edb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_compute_divisions_test_compute_divisions.assert_b_known_divisions": {"doc_hash": "17cd7841c7445f2f8fc6efe05672a5a46480f842eaca9499836f73d8a22439a9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_empty_partitions_test_empty_partitions.None_2": {"doc_hash": "44f2c7856e5f2c1d6e4df3ab47ac3824231229f5f2dc05bc50f5adcc9c84276a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_remove_nans_test_remove_nans.for_conv_none_val_in_con.for_inputs_expected_in_t.assert_remove_nans_params": {"doc_hash": "015550f57283ac393a80d356c3fb7bd98f0b1b90960b8b33a0a60b8ac84bd575"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_gh_2730_test_gh_2730.tm_assert_frame_equal_res": {"doc_hash": "d3fd6cc3e14ae3a317547ce58332f17cd754fa4198434a84939e5935a10d00ca"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_does_not_repeat_work_due_to_optimizations_test_set_index_errors_with_inplace_kwarg.with_pytest_raises_NotImp.ddf_set_index_a_inplac": {"doc_hash": "8b9fa48d57ee9eda51570896fab93acb9727302cda1d16eccbca84fe0ab77111"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_timestamp_test_set_index_timestamp.assert_eq_df2_ddf_set_in": {"doc_hash": "735efb3b5b59c96bbd1a55646c30dee71da24909a4a6a85ab182671ada5a6cf1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_disk_shuffle_with_compression_option_test_disk_shuffle_with_unknown_compression.with_dask_config_set_da.with_pytest_raises_.test_shuffle_disk_": {"doc_hash": "b7a28140e945d745b83c42cdc618a4029ce79cb790f85baf2f5d06c49e20d21a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_disk_shuffle_check_actual_compression_test_disk_shuffle_check_actual_compression.assert_len_uncompressed_d": {"doc_hash": "ac7e87b1c91101869c7b9871a557bbf407b1422ac69c2388081c68388ab21a97"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_pytest__BASE_UFUNCS._": {"doc_hash": "a4a5a492fc03aa90984c4e1ae3672a27d5f853c6d8e02a0e5d59edb7646d308e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_test_ufunc_test_ufunc.None_4.assert_eq_dafunc_pandas_i": {"doc_hash": "a39dc42f23f7f26ec7a5557cb35a04a41120caf5ff354924d16943e75b6190d1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_test_ufunc_array_wrap_test_ufunc_array_wrap.None_5": {"doc_hash": "b0a549342573bbaa932e93e4307741a0c65a0ac0a7df9ff486b7496b27eaaf9d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py__UFUNCS_2ARG__UFUNCS_2ARG._": {"doc_hash": "682cea3989ff75991dbb648a9ea24ca5470d236992f7db748ff0867e6160510b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_test_ufunc_with_2args_test_ufunc_with_2args.assert_eq_dafunc_pandas1_": {"doc_hash": "9ce5b68d787dc90833dee78b28493198c216b04f19fdc1efbc6333a5f6f32d64"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_test_clip_test_clip.assert_eq_da_clip_pandas_": {"doc_hash": "1d9e4d0fc6071c7a8216d77ca79cc1cfa47903019484054a39bbb8cdf7e80557"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_test_frame_ufunc_out_test_frame_ufunc_out.None_1.assert_eq_ddf_out_np_exp": {"doc_hash": "51f51f4b3f4310f2cc233a1616fd501873f85a7c3ac269053bc2580a8a17bf58"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_test_frame_2ufunc_out_test_frame_2ufunc_out.assert_eq_ddf_out_expect": {"doc_hash": "15322ea73f43e36a47db974eb8f8a314b4e0188d95bc33b0c56fb54805378252"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_test_mixed_types_test_mixed_types.assert_eq_dafunc_arg2_ar": {"doc_hash": "c2405024918f9ab24ef6d2cceeb3b8318441dcfc10b7386fe7485a9270719a5c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_test_2args_with_array_test_2args_with_array.None_2": {"doc_hash": "d03d823a589954a8e5e84f71ee76fe8d94d83a3e0a28451d785616d48124df5f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_test_ufunc_with_reduction_test_ufunc_with_reduction.with_pytest_warns_None_.assert_eq_np_redfunc_np_u": {"doc_hash": "5c96ffa313daf366bf5857d63178d5e605b60f543c4bbe47b8d604e8a36fa3ab"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_test_ufunc_numpy_scalar_comparison_": {"doc_hash": "bfc62c17ee426ab9a7d9f1e5c8584fb7073685f8a1fc8100e031e1772bafd31c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_re_pytest": {"doc_hash": "781b6b8e6ce12b620f9be8880723ad699831c2c38e602c334e14c3a07358df2d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_shard_df_on_index_test_shard_df_on_index.assert_list_result_2_ind": {"doc_hash": "c67c2aafbd2aae11e0c527063ac5f4fc728b970c236b6e80f97194ea000961bf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_make_meta_test_make_meta.assert_pytest_raises_Type": {"doc_hash": "a8a963128e41b9ee35681131c5a74ae0bee0247e8911f0de236c45f0dbd972f2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_meta_nonempty_test_meta_nonempty.assert_df3_A_s_al": {"doc_hash": "13230b7c775ad08858c74c289c5baf5ebae768b205d3ae537a582fad040b384a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_meta_duplicated_test_meta_nonempty_empty_categories.for_dtype_in_O_f8_.assert_res_name_s_name": {"doc_hash": "1f3a52d252c271806427c68b72d5fa398bc5b187f12b9dd9401ba5942b278862"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_meta_nonempty_index_test_meta_nonempty_index.None_26": {"doc_hash": "3d3627056d623c91914ea8c3a4791d452798c48818f9784900da11e1e037c4f9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_meta_nonempty_uint64index_test_raise_on_meta_error.None_1.else_.assert_False_should_hav": {"doc_hash": "37b9aa63cde4ad06fa4f6cdcb98231f1a00a6cbd29d3f710c3b872b48c55d72f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_check_meta_test_check_meta.None_6": {"doc_hash": "775c454f4c087d5da82cfb4763c50d58103a8f45f9055e9c674d1babc62e857d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_check_matching_columns_raises_appropriate_errors_test_check_meta_typename.assert_pandas_in_str_in": {"doc_hash": "cd3895d69cf041cbf8423bfe06bd089805f4cd1d94fcf02ec0ef3866120dc870"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_is_dataframe_like_test_is_dataframe_like.None_20": {"doc_hash": "64351e5b69038ad75e9f98252658fe48ec1b55dbb5baa5b7768a9c163073babb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_apply_and_enforce_message_": {"doc_hash": "b23fd7366f21f1088784b2c4e6da474d3c8b2c745b8e2bd6deb833bd42b1b5de"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/resample.py_pd__resample_series.return.out_reindex_new_index_fi": {"doc_hash": "e2db3c41737f9c72b4bbd7b05707b91363e00022ea29348973fdfae9d2d8fe1d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/resample.py__resample_bin_and_out_divs__resample_bin_and_out_divs.return.tuple_map_pd_Timestamp_n": {"doc_hash": "869b00d8282915e55ebc3dbb1f187b58e7a850dc969e0d097c13e9d548571fd6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/resample.py_Resampler_Resampler.__init__.self._kwargs.kwargs": {"doc_hash": "7f843037d162c814c2f8364975f1b6cd4462a0f52c9fc0558876a986737e8a67"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/resample.py_Resampler._agg_Resampler._agg.return.Series_graph_name_meta_": {"doc_hash": "efff48b3dc3c378381cf0ad2eb7d273b6d11d3e7dc7a71014a6280d933526afd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/resample.py_Resampler.agg_": {"doc_hash": "6d38f41b9c93728a1cb9d302ed818b9c880ba8089d6247f04f8dc46b68faddbe"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_from_itertools_import_pro_test_series_resample.assert_expected_index_1_": {"doc_hash": "945c70a17842c6a4b831fdb8619ff170d7202fc6332a50584a91d3c5cac19f85"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_test_resample_has_correct_fill_value_test_resample_has_correct_fill_value.assert_eq_": {"doc_hash": "109900deb1970e39bdb2e81dd060a8fb9d6e4b31caa3a5a92e7cd9d12662d04e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_test_resample_agg_test_resample_agg.assert_eq_": {"doc_hash": "25ea79888b69df3d8597310f635ab59cd761215503e4eb0d02bfaa9f20379907"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_test_resample_agg_passes_kwargs_test_resample_agg_passes_kwargs.assert_ds_resample_2h_": {"doc_hash": "161e0d8ad5fa7c66d42a51c17185f930923ef5cb18ac6ba7a82c3eaab8c5a686"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_test_resample_throws_error_when_parition_index_does_not_match_index_test_resample_throws_error_when_parition_index_does_not_match_index.with_pytest_raises_ValueE.ds_resample_2M_count_": {"doc_hash": "b9f958fa14cba0a78f2884a5001afbf0fcde9ccc74f0ea17b9eb07306efdfdcc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_test_resample_pads_last_division_to_avoid_off_by_one_test_resample_pads_last_division_to_avoid_off_by_one.assert_eq_actual_expecte": {"doc_hash": "a4d3fbdcf9c52da10bd44391e5446502751d287b951cfa6126cd6db7865e6088"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_test_resample_does_not_evenly_divide_day_test_resample_does_not_evenly_divide_day.assert_eq_result_expecte": {"doc_hash": "98369f31a8d3d7ffe67656441f61fdb72e69c8202f2414d920ee4bb294e2a8fe"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_test_series_resample_does_not_evenly_divide_day_test_series_resample_does_not_evenly_divide_day.assert_eq_result_expecte": {"doc_hash": "380b49a67165115f960604729859dd409b8c72619ca7f1448527166e94111498"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_test_unknown_divisions_error_test_resample_index_name.assert_ddf_resample_D_": {"doc_hash": "c00ae061eb16e00300f9ef10977f64ada3acd34ec8906f911d49003da2b2b7dc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_test_series_resample_non_existent_datetime_test_series_resample_non_existent_datetime.assert_eq_result_expecte": {"doc_hash": "c6c0cfe80508dd72c94a78601a6aa19f0e89d29ca633da35e3e5d6a04f71ece9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_test_common_aggs_": {"doc_hash": "84820aa09fe0d85e591db2e0f91026f2951054deee1a58160e813219ce71da96"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_math_is_integer_na_dtype.return.isinstance_dtype_types_": {"doc_hash": "434fe3a1b7994804e9ca8c3414749367a94be63b8cf77acebfbe0976487ba326"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_shard_df_on_index_shard_df_on_index.if_not_len_divisions_.else_.yield_df_iloc_indices_1_": {"doc_hash": "939ee5fe17dc2dcb257c2ac3e2cab1f720ceb854c106558b74d7b810c01494fd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py__META_TYPES__META_DESCRIPTION._": {"doc_hash": "e21341d6e3d2d65be790f0ea3ddf5d92e3986dbcea27286f6708d120838d0375"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_insert_meta_param_description_insert_meta_param_description.return.f": {"doc_hash": "9682b72efec8f512ab58210c9c5eda7390db6a71a8839e77fc86f949cbf61bfc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_raise_on_meta_error_raise_on_meta_error.try_.except_Exception_as_e_.raise_ValueError_msg_fro": {"doc_hash": "af747d5b49de939b3d8a2d890f78f55f77b6521a0507a04b260b1d4f9dc58468"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_UNKNOWN_CATEGORIES_has_known_categories.raise_TypeError_Expected": {"doc_hash": "f1c28ebc6d2a7f2659afcbe67626c13b31f3923186e4261420cc0ff152b9f3d9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_strip_unknown_categories_strip_unknown_categories.return.x": {"doc_hash": "400fe89337935786071caa790c430cb8fb8d60c339bacebef3bddff4c0c960c9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_clear_known_categories_clear_known_categories.return.x": {"doc_hash": "6fc3cb9f7f2da9c54899d35e65d66b59fde3f7275f49e890e652a7f2008cd216"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py__empty_series_make_meta_index.return.x_0_0_": {"doc_hash": "38503d3fc9966cbb4ebffc0ddc482894439e75ae9ba11ea4efc63bdcc90289c9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_make_meta_object_make_meta_object.raise_TypeError_Don_t_kn": {"doc_hash": "95ed03b86c9a66caae08f89c8a1922655e09ea2f81434678f7c038ffd5fd3132"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py__numeric_index_types_meta_nonempty_object.if_is_scalar_x_.else_.raise_TypeError_": {"doc_hash": "3c20a0df689b532d76955b9c5136410e869ef5c44bd4169122f07196cbd16a17"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_meta_nonempty_dataframe_meta_nonempty_dataframe.return.res": {"doc_hash": "0923bb1c28acb85a70ab11c79c0202de8592d044904da6ae901e5956e87f971b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py__nonempty_index__nonempty_index.raise_TypeError_": {"doc_hash": "f38251e19021c31982ddd1bea4dd59e317a75c33ac4a3d3a15c9b1d06587df4c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_hash_object_dispatch_group_split_pandas.return.dict_zip_range_k_parts_": {"doc_hash": "9bd08e8ff1a116bbf36acea65c802347ee6b84cf8dbe6c4bb08645bf2bcfb48c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py__simple_fake_mapping__nonempty_scalar.raise_TypeError_Can_t_ha": {"doc_hash": "a8cfb8942ad429783454513494a86e8d917de0c9604cd6ee091c41bac4cebb46"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py__nonempty_series_is_index_like.return.dask_is_index_like_s_": {"doc_hash": "da80c1c8b36645fbbb23c9057914ee1776d8a78649f93bd12fd1a54ac74cd07f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_check_meta_check_meta.raise_ValueError_": {"doc_hash": "b1c757445bda673db7485c6bca477a752d755426cc01a4a7e0f6b9e2254381dc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_check_matching_columns_index_summary.return._entries_format": {"doc_hash": "c9ccacaa258044d2bd8826901c123c6d21b4d1713c2880f2f75a0b2324f773a1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py____check_dask.return.dsk": {"doc_hash": "d6037ede8f4b047c35b89d64ec5453bcf79f80bf4977c04862db5383b3714ccc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py__maybe_sort_assert_eq.return.True": {"doc_hash": "cccfc367016d85eec969ad26612caa3768c8b91eac1346b36c8e88d9c8469d7f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_assert_dask_graph_assert_sane_keynames.for_k_in_ddf_dask_keys_.assert_k_split_0_is": {"doc_hash": "613bea6067acda560fb53fcb97652f620c77ecd6d0516c9394bd67421691c6cb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_assert_dask_dtypes_assert_dask_dtypes.if_not_is_dask_collection.else_.if_hasattr_ddf__meta_dt.else_.assert_type_ddf__meta_": {"doc_hash": "b90b6f1ce40303720b3b056eb5610851fd9fad3d44c352a4a85181a2cea26ee3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_assert_max_deps_": {"doc_hash": "1463bd2ee1a551f4337abc5d20a767c5532cc42c5f455a9aef76671155eb276c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/datasets.py_random_timeseries.return.make_timeseries_": {"doc_hash": "844bc834bf97fe5fd8a5b6103c1b47d13fc52a930b3b0c4383b86d823f2c952a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/datasets.py__generate_mimesis__make_mimesis.return.db_Bag_dsk_name_npartit": {"doc_hash": "f8de8f86dd61d58b63021e0c6658cf5002e42f0c31fdc004cc259b01e870f5f0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/datasets.py_make_people_": {"doc_hash": "d87ffc995f44f8f730cc2a9664f299542e270fa1afcc637c81d431524e4707eb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_operator_finalize.return.Delayed_name_graph_": {"doc_hash": "5db98cedb9feb14cdc297d298f7efa08b1bc081cccc49e8e104b396a897621e6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_unpack_collections_unpack_collections.return.expr_": {"doc_hash": "81dcd884e04a25eb8d22c70990d14daebc20671309211470f0b6fb75f9410cdb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_to_task_dask_to_task_dask.return.expr_": {"doc_hash": "24fc868837f447f55ac19c2ff98fac2c4b395ed1169963fa537c1ec98342961d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_tokenize_delayed": {"doc_hash": "7e7f57f4cc2576a07f185ed5cdfe5f2d0dfc0b102471d907992de3f1ea080d40"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_delayed._Wraps_a_function_or_ob_delayed._Wraps_a_function_or_ob": {"doc_hash": "0ca79946c9a49d8909a7f77315629275d09e53187f87096cf88449007cff5702"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_delayed.if_isinstance_obj_Delaye_rebuild.return.Delayed_key_dsk_length_": {"doc_hash": "c3468c002c035d094f7fafb2c1e73bc25a050d81c0570c21b8cc17cbee439d80"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_Delayed_Delayed._get_unary_operator._get_binary_operator": {"doc_hash": "3856f5311ca2b17cc83c3216c6150c0cbf4d40b40cb00a0b06537186b692506c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_call_function_call_function.return.Delayed_name_graph_leng": {"doc_hash": "5a3c621a7be087864c91f8d565dfc2d81f4896f3ccc208ad4fd2733740502766"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_DelayedLeaf_DelayedLeaf.__call__.return.call_function_": {"doc_hash": "f280444eaa5b866551742b04c7c4d6d04d2dbb0c76ed6a9f1e7f97279979587f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_DelayedAttr_DelayedAttr.__call__.return.call_function_": {"doc_hash": "834fa6f67a51fd35d1ad92a65da7fab05eb4ee2885a6a3ffd5f0d9a69f1b8702"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_for_op_in__": {"doc_hash": "ede61c9f231a86f7f6ffcba39a3a27d70eafa748d1027985bae93f27cb4d9fd3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/__init__.py__": {"doc_hash": "2ffd5ced0b3acd134e1509dceacd62232013436469d4702d9ae021cfda3d7306"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile.py_from_collections_import_n_Profiler.clear.self._dsk._": {"doc_hash": "1fbe1d4d94430956ac569850972efd852e05449ebdf311f103da6e4b7213c7a8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile.py_ResourceData_ResourceProfiler.visualize.return.visualize_self_kwargs_": {"doc_hash": "c77fb3aeaa2848f84bd85eb4b49b8ebaadf94d6d3d337a42e699e03f936fd22c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile.py__Tracker__Tracker._update_pids.return._self_parent_": {"doc_hash": "894631b6ff4b7601ae18a9f0e708634396ee1e863e17eee62c892e74ed2ff80e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile.py__Tracker.run_CacheData.namedtuple_": {"doc_hash": "d677181f692ea459a2e2a28227ceba312fd1e329970126ef5ad89bd22895f260"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile.py_CacheProfiler_": {"doc_hash": "3b44b8301ef56394fadf186147fbf7bd6d0e116b513b957a413b4ab78080a3f9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile_visualize.py_random_unquote.return.expr": {"doc_hash": "b227923d73e2abbb1cc99b90c1fd6b247128c935a8808cf8bf8c69568349bddf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile_visualize.py_pprint_task_pprint_task.if_istask_task_.else_.try_.except_TypeError_.return._": {"doc_hash": "b3ce0db0d73506d09f33149d86bea6111dc73d1ecb8bf11c9de9d37d41d19a08"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile_visualize.py_get_colors_get_colors.return._color_lookup_n_for_n_in": {"doc_hash": "ab0080c8d9eb96dd2d011dbc859422110115d58bb2d20f07f79086aec5e36654"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile_visualize.py_visualize__get_figure_keywords.return.o": {"doc_hash": "f9f150b5b2ae34f2bbab1f1f9107e892a95610e3f5b040634060ad069c83f55f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile_visualize.py_plot_tasks_plot_tasks.hover.p_select_HoverTool_": {"doc_hash": "a608f25106c95744571d59ceb5431a8d5d8913aac17fbe42f8b0c10ce11124f1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile_visualize.py_plot_tasks.hover.tooltips_plot_tasks.return.p": {"doc_hash": "1e4746f07e35bf17b9fa607bae3c8f87d7e47ab2639dbe3eff851da71c5213fe"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile_visualize.py_plot_resources_fix_bounds.return.start_max_end_start_m": {"doc_hash": "74ceeb4deeda7bac5c6233d676b48f0ce1d15b3ba318f4b00b8c2be4bd448bc8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile_visualize.py_plot_cache_": {"doc_hash": "8ba784796cf07d64e0035abd69ed6e54be85bb3b47b15c4e91efa9068660e0d0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/progress.py_sys_format_time.if_h_.else_.return._0_4_1f_s_format_s_": {"doc_hash": "5d121cf59c46309ad38bdaf4d5251733342acd58e5c2ea516331c0a104e8d945"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/progress.py_ProgressBar_": {"doc_hash": "b397f4f2fb8d314dcd83d94f8760d3b8e2d008730de47facdbfa23e7ea30a6d4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_from_operator_import_add__ignore_abc_warning.pytest_mark_filterwarning": {"doc_hash": "67a6a6ec0b4dcadcdd6d0f2c89ba52069e01bd368b4193ca3d0e5de7a57eff75"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_profiler_test_profiler.assert_prof_results_": {"doc_hash": "f2e4b703ab22a55d81a45ee638522e241838b3db4e1244634a67a1fbc78b0f8f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_profiler_works_under_error_test_two_gets.assert_len_prof_results_": {"doc_hash": "79a22a50f4472eef893f9c1987e42f831555d5eafeb760bf814d272a1619a9cb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_resource_profiler_test_resource_profiler.assert_len_rprof_results_": {"doc_hash": "8b69c82cdc5218268075f81ded7cbe271961e423230a80ffa12a3f04ec232f6e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_resource_profiler_multiple_gets_test_resource_profiler_multiple_gets.assert_not_rprof__is_runn": {"doc_hash": "917f016c892d0b33793c16885e203c587ec6764e996eee7908dccf16fe3432d2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_cache_profiler_test_cache_profiler.assert_CacheProfiler_metr": {"doc_hash": "d65b4c94f95faae035daca1b844fab3bc1f95d3da08c6cabe1b802207cb739dc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_register_test_register.try_.finally_.prof_unregister_": {"doc_hash": "0b7e10348e11d03590dff2518337100020ecd0fe6e8d53cb980f17bcbcad86bf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_unquote_test_unquote.None_2": {"doc_hash": "68fb9e4f3d078d1a35b97b69ada8edaa87788337cee4d7ad7de4c26fbef689e1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_pprint_task_test_pprint_task.None_9": {"doc_hash": "5020fbebf7350f7b2acbd15ec02d7519f93a60196744ceba83edc877c7179311"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_check_title_test_profiler_plot.assert_len_record_0": {"doc_hash": "030bc92ad7a2a33d53ceac132b7c03b44be575cd3d6f00f18bb78f14c230aa7d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_resource_profiler_plot_test_resource_profiler_plot.for_results_in_1_0.None_6": {"doc_hash": "182460db10296542131875363d81b82ffbd243b3bc6e8103501c10031b0a4a27"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_cache_profiler_plot_test_cache_profiler_plot.assert_len_record_0": {"doc_hash": "62a806d54a0f138d6a8c76f211f35cc0685d03a76a5532d84df420f5efa50809"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_plot_multiple_test_plot_multiple.visualize_prof_rprof_": {"doc_hash": "949ebdb3d64468f10fcee00786cbe9e5cdfd55be004b899ecbbbb622fd37402c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_saves_file_": {"doc_hash": "e8169876a6af62640a2bdcdf1deb67f167f0741b347220bf8799e4d031488d32"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_progress.py_from_operator_import_add__test_no_tasks.check_bar_completed_capsy": {"doc_hash": "d440f1fc5cd3892d038829467345a4c607f53cd14de3cb1da140dd31392d6774"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_progress.py_test_with_cache_test_with_cache.None_1": {"doc_hash": "01c579d87a542c9b389a93a1f41273d0d28882316548262a834b5e46e3cdc490"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_progress.py_test_with_alias_": {"doc_hash": "73d2fbaa553378db30a84a25f375b73fcc11fca99eeeda6eaeaf173e4093339d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/distributed.py__flake8_noqa_": {"doc_hash": "5235a405751be8dfd10a7b3b4678ea3f4ac140a8d85171654ff2730602afc9ed"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dot.py_re_task_label.if_any_has_sub_tasks_i_f.else_.return.head": {"doc_hash": "2048cbe021ac16c331137ca6422ef030511d5235b16880d714cc770a943f78ca"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dot.py_has_sub_tasks__UUIDPAT.re_compile_0_9a_z_8_": {"doc_hash": "5f31363e824da09a889b2f27e4962f03eff0e9cc3e22659c884dc9b546aa5558"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dot.py_label_label.return.s": {"doc_hash": "084d406cae98b33946923c57a9d620577e47fb5a78057fc94c5caa4890e41f1d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dot.py_box_label_box_label.if_isinstance_key_tuple_.else_.return._": {"doc_hash": "4db15a7f0091a536abf727c33e71eeb5ebca6ba1c78f9cbcc5f97242bcbd3e1a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dot.py_to_graphviz_to_graphviz.return.g": {"doc_hash": "2242111e9f267c54791a804561af46b837a2725a80f55ab273921e7bb403598f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dot.py_IPYTHON_IMAGE_FORMATS__get_display_cls.if_format_in_IPYTHON_NO_D.else_.raise_ValueError_Unknown": {"doc_hash": "24df063f9ec3016ba9d8d5dca957865ad121eec5fffff62a7145ba1b16b2aad6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dot.py_dot_graph_dot_graph.return.graphviz_to_file_g_filen": {"doc_hash": "e7150a3bddd83ec56dec4c680940a1999526b5dfdd41c630222b75949055a919"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dot.py_graphviz_to_file_": {"doc_hash": "8ef2318b4282b9c00fa5e26737aef8c929c25e4baa1c9226eb32e48ead5b206c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/hashing.py_binascii_hashers_append__hash_sha1": {"doc_hash": "6127a3780e0b5824f4f13ec21137d294078cfccf62ec4547b836ee5f256541b8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/hashing.py_hash_buffer_": {"doc_hash": "bdf7b9057442fcd5132cc7d390b92ebddeaf586151fe3051a22e737df8773f0e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph._from_collection_HighLevelGraph._from_collection.return.cls_layers_deps_": {"doc_hash": "fd3a0262b17d4f22fd61c00d2f9241283acf0de70b007d021d0c0b8326951769"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.from_collections_HighLevelGraph.from_collections.return.cls_layers_deps_": {"doc_hash": "519aa990f7f5394f062e738d3a0c380f59cd2f2fcbb6ec8c83de7d0d1c624c85"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.validate_HighLevelGraph.validate.for_k_in_dep_key1_.if_self_dependencies_k_.raise_ValueError_": {"doc_hash": "3ff0365e5811892408957cc073efac35781bd2c3255d2c620fa558918263a5e6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_to_graphviz_": {"doc_hash": "e60fcb83a86030c093f1722a172338fd3a6b1ca6e9bc187d10ddbeff9aca04b2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/local.py___os": {"doc_hash": "78da90cb65852aa27f86f405727ba6aad1195356704155b5ab0b106562e94e99"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/local.py_from_queue_import_Queue__DEBUG.False": {"doc_hash": "b217a56bb091dc2c2798043b4415c7ad25ad623f08bc5f258036fe0b6f80ae6e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/local.py_start_state_from_dask_start_state_from_dask.return.state": {"doc_hash": "06b47febe5b36c4a36fdcf97b67dafcbebfeaa354eb3792bf56cb63bc2fd7aa6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/local.py_None_2_release_data.if_delete_.del_state_cache_key_": {"doc_hash": "aa24c9cf6acf9686790ff6caeffc616bba6cf66376c66515362a197edb7941dd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/local.py_finish_task_finish_task.return.state": {"doc_hash": "6a807265d8eb8b7468221d65efc05a6b45e1f2812f39df7c764b848c9580d487"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/local.py_nested_get_identity.return.x": {"doc_hash": "615f651c68be8590a090a01364eccc17e2b6991661afd462fe8e206c4ea56be1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/local.py_None_3_get_async.dsk.dict_dsk_": {"doc_hash": "333a60b0562ae3c79734ab8d398420cfaae91f29a56a33a1d606cdf25141ef59"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/local.py_get_async.with_local_callbacks_call_get_async.return.nested_get_result_state_": {"doc_hash": "94806e55e1e9c1c0cc14c3b069826e5afe92e5256b66d22287231c850c36e576"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/local.py__Synchronous_concrete__get_sync.return.get_async_apply_sync_1_": {"doc_hash": "e966a28284b47e242f04bffa828274deaa9e06d1bd1784369d70e6ee25cd88ac"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/local.py_sortkey_": {"doc_hash": "872b984619fc732fcef8c87bc815242121c67c0631b57f9cc68c077d82e8d68e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/multiprocessing.py_copyreg__process_get_id.return.multiprocessing_current_p": {"doc_hash": "a8083f6baf687cadce725a3d883fae1c1457be35437a3ed8505032b0a76c922e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/multiprocessing.py__Remote_Exception_Han_RemoteException.__getattr__.try_.except_AttributeError_.return.getattr_self_exception_k": {"doc_hash": "2eb61d6bf8bcadecefd4415af8b5c500dc9d6696aee8af4ac0ada0227d3b2748"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/multiprocessing.py_exceptions_get_context.if_sys_platform_win32.else_.return.multiprocessing_get_conte": {"doc_hash": "5a5bbfc90944d32e34ff6d97c211866fbe1d3c84f632c47b9ee73839e56cc591"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/multiprocessing.py_get_": {"doc_hash": "4a07cf9884f19f3f675c1a9fc11808bfd6cacd10069cc3ded87bd2097eb35d8e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_math_cull.return.out_dependencies": {"doc_hash": "9880a83268de4d218f2876a3de465b4fbdcdea5862540009d1905335a90fa619"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_default_fused_linear_keys_renamer_default_fused_linear_keys_renamer.if_typ_is_str_.else_.return.None": {"doc_hash": "78b47c1cf58aaffc6de1cbcfce0f9403ea6ec66c196c282dc482ba536dd05461"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_fuse_linear_fuse_linear.dependencies._k_set_v_for_k_v_in_de": {"doc_hash": "115685c43794355002a8ce440095ba6ae12f4774ad92a832449f37811287a46f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_fuse_linear.if_rename_keys_is_True___flat_set.return.set_x_": {"doc_hash": "4860b738cd3eaf1d45590749816832479311ddd6b1c2ad93852db1fe7b47d32e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_inline_inline.return.dsk2": {"doc_hash": "f128d471804f98909fdcbdd4df41504323d3454c3ea56d3cc50b13659f979c6f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_inline_functions_inline_functions.return.dsk": {"doc_hash": "4bd8600a18b6abdbd9641355b6540c78f6dd26c79e3479b59f791424be23fa06"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_unwrap_partial_functions_of.return.funcs": {"doc_hash": "dd9f443d8b32873136c07079f85bb02ed6307d529f418dbfbffafe53e5815137"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_default_fused_keys_renamer__default.Default_token": {"doc_hash": "c4327ce98611882d192732b55183384e0ac7871f69afa0ede2f803ebbc28944e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_fuse_fuse.if_not_config_get_optimi.return.dsk_dependencies": {"doc_hash": "65989bd341820917dafcb44335641bf5f448a04ac4084737c07b7212829a3fa2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_fuse.if_keys_is_not_None_and_n_fuse.children_stack_pop.children_stack_pop": {"doc_hash": "b84e276bbd3abba9c3374c6116defd75d782abe20596055f56c7f019006a3d62"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_fuse.while_reducible__fuse.return.rv_deps": {"doc_hash": "84d32698506bdbd819153b1b2b8a2d1b97d063ff15c85fa110814bfa1290f537"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py__inplace_fuse_subgraphs__inplace_fuse_subgraphs.for_chain_in_chains_.if_rename_keys_.fused_trees_outkey_cha": {"doc_hash": "979cbcbfb9ef7b50ae8e186df6b1f3d4c7697737db9a568bcbf04fc993718d58"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_SubgraphCallable_": {"doc_hash": "16e60c962afb24066ccd277fe490322401a1cb50ca6e5afd8130855419a6c00f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_r_Static_order_of_node_add": {"doc_hash": "deb3429c25d24d49359f0f1350ec4d25e5754db822dc0cbbf85b265ce6a68fa6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_None_2_order.initial_stack_key.init_stack___getitem__": {"doc_hash": "456015776057bdba8ce1aab74bedd26de68ee9f5fab92850ec2e6a106074cb1c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_order.dependents_key_order.dependencies_key.return._": {"doc_hash": "a6820ae0481077bf463ee86a9988a3bbf5d333be5794b3dcd4c35315ccdbb638"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_order.finish_now_key_order.is_init_sorted.False": {"doc_hash": "eb5c260af0d7c380e1897f4f92fa841e4433e4c9934ddb3c8706a2394f004c22"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_order.while_True__order.while_True_.if_inner_stacks_.continue": {"doc_hash": "fb2e04be658c4aaf92395b478faa1531504944894591bfeb5f872a63477ffbbb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_order.while_True_.if_later_nodes__order.return.result": {"doc_hash": "c2bc925472479fd71ceb22b93c46b868ccc07d0c264c13d76e6a8196a33c1a68"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_graph_metrics_graph_metrics.for_key_deps_in_dependen.if_not_deps_.for_child_in_dependencies.if_not_num_needed_child_.current_append_child_": {"doc_hash": "7922c0de6298b1b975168e8a15ffa158a675a6ce56a405489a319d1762e87928"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_graph_metrics.while_current__graph_metrics.return.result": {"doc_hash": "841b0f89b848d4c91bcff9a34829a8d5ef93b5a5c2acf470880a38164c2cb5c9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_ndependencies_ndependencies.return.num_dependencies_result": {"doc_hash": "08c6cc063799d121677887688a81f46c003b069ef94c313b8b1678b29a8819f8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_StrComparable_": {"doc_hash": "cbfd0bdc24a8aeb546cec795823bb868208e67f359c8c5614ec6ab438c4ea120"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/rewrite.py_from_collections_import_d_Traverser.skip.self.term.self__stack_pop_": {"doc_hash": "9ebbaafa2ae89b5a8a849d85bbe53148ddbf98d79fbd19ff8863e9cec2107b1c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/rewrite.py_Token_END.Token_end_": {"doc_hash": "80685bf58ccf905d4421c958b9b0437e2ea90add3540563842ffd37b22e67277"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/rewrite.py_Node_Node.patterns.return.self_1_": {"doc_hash": "e24e38c76ac9afa8de7bbb3a64166cf86d7c347a22727b2f5501e3f650e3678d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/rewrite.py_RewriteRule_RewriteRule.__repr__.return.str_self_": {"doc_hash": "b4379d7ae731de0d1c100d5b09a3b4ef27ab0e73b78124b1a4e674196b82fa1b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/rewrite.py_RuleSet_RuleSet.__init__.for_p_in_rules_.self_add_p_": {"doc_hash": "0b045245a1b98f63b7345fec6d93fd23bc62b6c125e26de12f5cb0e6e7b253bd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/rewrite.py_RuleSet.add_RuleSet.add.self_rules_append_rule_": {"doc_hash": "d5f7aad61717860643cc1ef9da26904870b350fd7d0926b5c31c854067a54fca"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/rewrite.py_RuleSet.iter_matches_RuleSet._rewrite.return.term": {"doc_hash": "4e8f2e86e16a28a1ceacf94410659b7930b976bca44107ab75e1ec819a24c883"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/rewrite.py_RuleSet.rewrite_RuleSet.rewrite.return.strategies_strategy_self": {"doc_hash": "929ed2b76e7f445e729b2ae394be784ff2866631ae6f489211fd755d4a00a813"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/rewrite.py__top_level__match.while_True_.None_1.except_Exception_.return": {"doc_hash": "41114f888aa5f2681e3393c50a68f0397a0532bb9f59c8d46e3294c17189bfed"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/rewrite.py__process_match_": {"doc_hash": "ba5e0bdca77afc2711452d1bd187e8e9cf7b7cd3c16b82cf2cb10f36b1075b7c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/sizeof.py_random_register_numpy.sizeof_numpy_ndarray.return.int_x_nbytes_": {"doc_hash": "1f3aecf3170b9f509fa5982d4f4e1fad5df9e00ff873a189dad4bbec45787f3a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/sizeof.py_register_pandas_register_pandas.sizeof_pandas_multiindex.return.int_p_1000": {"doc_hash": "7d0c156bd8bd65563de543a57f0b5d34622362edc38ecd48d4a1b97608342098"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/sizeof.py_register_spmatrix_": {"doc_hash": "da3c0c2e677f8db021b010bf76cfbc857faef8b087523f9f45760f6c63a1ef15"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/system.py_math_": {"doc_hash": "17923f072debe7fad63c27945bae9b0fef109c1cda2992ec9696ad87ab8f7248"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_normalize_function_test_normalize_function.None_10": {"doc_hash": "0ec835e22ccb477ed35873e1bf68b936e29d0bbb72cec082e3cd77036bbd2d54"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_test_tokenize_numpy_datetime.tokenize_np_array_2000_": {"doc_hash": "bc894dbe580d1c56138596c92f1bb18814517ba27c2c1b5ccd1bf29652a1c8c5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_numpy_scalar_test_tokenize_numpy_scalar_string_rep.try_.finally_.np_set_string_function_No": {"doc_hash": "46e64c08351b411ef50cf1983693df7cdc5edcbe20bf4579b8b34c82d58ce6fa"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_numpy_array_on_object_dtype_test_tokenize_numpy_array_on_object_dtype.assert_tokenize_": {"doc_hash": "8f65da48b6a26edbf07e8a3260594857df7b65e01a28c26d97c4579b4f6d4b23"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_numpy_memmap_offset_test_tokenize_numpy_memmap_offset.with_open_fn_rb_as_f_.assert_tokenize_sub1_": {"doc_hash": "459373dd9a0dfa078120d6987b2b7e2326803554ad4dc08e959d9a95d28f6a65"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_numpy_memmap_test_tokenize_numpy_memmap.None_2.assert_tokenize_mm_1_": {"doc_hash": "4b7f1e3e1767aea02fe1b9f5d9d02b6129bc39d1f8bd7e2fdbe303d4ce1c0259"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_numpy_memmap_no_filename_test_tokenize_numpy_ufunc_consistent.assert_tokenize_inc_t": {"doc_hash": "8680bcda023ace23f5ce959a31b2611be1b1f82308805f700bc8115aad240bf2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_partial_func_args_kwargs_consistent_test_normalize_base.for_i_in_1_1_1_1_sl.assert_normalize_token_i_": {"doc_hash": "d394cf6c711a8c3c40c9b4f937e3ae7978110a9d99bd62c90bdc229591a015ee"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_pandas_test_tokenize_pandas.None_5": {"doc_hash": "1619e7c37e9fcf29b7f5a6f05f2a17a6df52e934cbafd21033eba23731f90b52"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_pandas_invalid_unicode_test_tokenize_pandas_no_pickle.tokenize_df_": {"doc_hash": "77875601076aa202249b7191bc446d420213dda83192e4dfbed3b81a9dc38db2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_pandas_extension_array_test_tokenize_pandas_extension_array.for_arr_in_arrays_.assert_tokenize_arr_t": {"doc_hash": "55850c5f557e13f0ffd640a79eadb5da21e6fdb25f4d1c5e1ed03abb8d815fe5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_pandas_index_test_tokenize_ordered_dict.None_1": {"doc_hash": "25ed951de73d22df64900871ad5f66c768727402b78728986e9e9ed859224d22"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_range_test_tokenize_range._Different_step": {"doc_hash": "048a464b7e48424f5e40fc56d08b8207dfacba16413c02e7c84cea8170532cd5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_object_array_with_nans_test_tokenize_numpy_matrix.None_2": {"doc_hash": "a2d4d064e8495bc1ea774e1befabf94c48bfdba4ed04f2fb8932155c069b831b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_dense_sparse_array_test_tokenize_dense_sparse_array.None_2": {"doc_hash": "4223eb9eb17799024f816eaad0e6462f2026e3af6dc47d43f7b4633f347fc43a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_object_with_recursion_error_returns_uuid_try_.except_ImportError_.dataclasses.None": {"doc_hash": "8c2c5fdb641d6f5c08c5d25553cac986a66e4c6a1f00823a5931605197e7ab73"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_unpack_collections_test_unpack_collections.build.return.t": {"doc_hash": "f36f667a1cc93be80ac507ec91f03046aecd72c9ba9f3fcd57571c591bd15591"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_unpack_collections.args_test_unpack_collections._Smoketest_results_that_": {"doc_hash": "03e67386fda9bec90848c0f103e3cd40647c0a3617bc81ca635b44e05b92e7c5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_Tuple_Tuple.__dask_postpersist__.return.Tuple_self__keys_": {"doc_hash": "3105019c30075f34f3e9f10e24fce2ba33b9ce417c4291524f4e01a0629f48bd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_custom_collection_test_custom_collection.assert_t2__dask_t3__da": {"doc_hash": "7707d9c10495e547a62cfdc8687cde0f3231a4f346c5cfa8f4d1588457bb772c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_compute_no_opt_test_compute_no_opt._See_Renamed": {"doc_hash": "a0c0601a1ddbbc8f4404539b423ccf14aff9f2d48e323e3a5ed554894999ff5d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_compute_array_test_persist_array.assert_len_y_dask_y_n": {"doc_hash": "f42dced34b7d37ed03be8fee90c2d5d7a9aec9debfd51c9349a43244ab5f256e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_compute_dataframe_test_compute_dataframe.None_1": {"doc_hash": "7704110d14f92e3531fd2b5b078fc8cf6e5fe0db8b3df834fd68e6f13a3f080a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_compute_array_dataframe_test_compute_array_dataframe.dd__compat_tm_assert_seri": {"doc_hash": "3e0d3769f91e4a86e6292d10fbed18dddae0917ea100490614f8c64e7b60bb35"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_compute_dataframe_valid_unicode_in_bytes_test_compute_with_literal.assert_compute_5_5_": {"doc_hash": "aa468e33944bd25f7b0636d52da714cb959c772c72e52f0eaf0016460e3132e8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_compute_nested_test_compute_nested.assert_res_1_8": {"doc_hash": "6f72afc90809f3f5964bde808bed0b1730642c5b186e6ea56ccb2390e3673d79"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_visualize_test_visualize.with_tmpdir_as_d_.None_4": {"doc_hash": "dbc061a71800d42aa9ecf2ae7b6d3599d27e77473d3f3d7b7f8b7647cb50a317"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_visualize_lists_test_visualize_order.with_tmpfile_extension_d.assert_color_in_text": {"doc_hash": "f979f04ca6e6f5e7ade6a9851c4500470458dfe15d8ce25d79a45030f6a067d6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_use_cloudpickle_to_tokenize_functions_in__main___test_optimizations_keyword.None_1": {"doc_hash": "6ef19f0dc50eb895776e5ac35a7689b1daa650949483d46d273017a6890e76f1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_optimize_test_optimize.for_a_b_in_zip_x3_y3_.assert_dict_a_dask_di": {"doc_hash": "2f75815f1bcf3fd68776d132bdf124a37259a8bc634592e36294f76377d3720f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_optimize_nested_test_optimize_nested.assert_res_1_compute_": {"doc_hash": "bb6edf1fc411903840ea3aaa4693a6b213a9c2e24548821d595c4dbcf19ed1ce"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_default_imports_test_persist_literals.assert_persist_1_2_3_": {"doc_hash": "5df287cabeb865b4c61f43824f5bdd4846924e4b8675f964c3a7807afc86f58c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_persist_nested_test_persist_nested.assert_res_1_compute_": {"doc_hash": "91d61aa4fa809c441b0476b59e334f34b086e5701f620fc880e587bba456e0df"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_persist_delayed_test_persist_array_bag.assert_list_b_list_bb": {"doc_hash": "021c7e25c005a1dae75e83298818bc1f52c0744fc17432e55920236a39cd8a7c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_normalize_function_limited_size_test_optimize_globals.None_1.assert_eq_xx_np_ones_10": {"doc_hash": "9b307a85993c66bc5640d252f3a389fa0352d87da3dc9c506d73acfe000f57cb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_optimize_None_test_optimize_None.with_dask_config_set_arra.y_compute_": {"doc_hash": "0400c5af849693bcd3d8fb063782be9a3ea9ce45e13208e4ba8ce9f7b2567d9d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_scheduler_keyword_test_scheduler_keyword.try_.finally_.del_named_schedulers_foo": {"doc_hash": "33bd9afd41d169b5a0bbc6b8ed02bd0278c598baafdbe93fbf23d7c2319447f2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_raise_get_keyword_test_callable_scheduler.assert_called_0_": {"doc_hash": "7198d0eb76b44cfe6017ea16cee6b9baa49c422105bd1003932297b00724004d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_num_workers_config_": {"doc_hash": "b3f5aae3af7ee4394870b7e5e56703dd20ffc15976c3f3164e6631fb419b8c2e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_cache.py_from_dask_callbacks_impor_test_cache.assert_not_Callback_activ": {"doc_hash": "5019ff2099d626b8ee927cbab179c9131f74fe9eb4bc61828c9bff064c23a729"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_cache.py_test_cache_with_number_": {"doc_hash": "ae04d3cd4b0ce18d7248301323966f1fa3f8d8fe7a63e0900a597c8f4e1ef5c8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_callbacks.py_from_dask_local_import_ge_test_start_state_callback.assert_flag_0_is_True": {"doc_hash": "6dd28f323184d63009b11072fb74fe94b167fd2b2c62a4f55b2a31da131c4aa4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_callbacks.py_test_finish_always_called_test_finish_always_called.None_4": {"doc_hash": "1c60d77d24b67810fb14bf0c7388cd389027a684bfd839a2abc501a57dcec0be"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_callbacks.py_test_nested_schedulers_": {"doc_hash": "4dcc3a5963fc0c57ffe75417a8d99a07eb1211643ab0aa5b6e40e047d1947a7f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_os_test_canonical_name.None_5": {"doc_hash": "2745d03254e314243b959bd76858939f1bd519b52e382dc5776dfbd61e682bf3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_update_test_update.assert_b_x_2_y_": {"doc_hash": "1f04410735cbde262fe80e6fb6e6401760f0a9849999b4d1f3dca365cbc6788f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_merge_test_collect_yaml_paths.with_tmpfile_extension_y.with_tmpfile_extension_y.assert_config_expected": {"doc_hash": "21b40fa7fc0caee6e09ce211d3c46d3d065d6c1a85edc3ff0a659168dcbc02e3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_collect_yaml_dir_no_read_permissions.try_.finally_.os_chmod_path_perm_orig_": {"doc_hash": "c59a87bd89a9b178ce5d7ca21a936a75453bdf4249d3694cceda27c0910bbaf6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_collect_yaml_permission_errors_test_collect_yaml_permission_errors.with_no_read_permissions_.assert_config_expected": {"doc_hash": "27cfe7a212ec6e6d0601c7aea6a5dea34d4adb1ce87c8e644f4ec4e312bbbf3d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_env_test_env.assert_res_expected": {"doc_hash": "4536571705e25a6064e23a8df3efad15ddbaea25dfdaba0530428fd9eaa05af1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_collect_test_collect.with_tmpfile_extension_y.with_tmpfile_extension_y.assert_config_expected": {"doc_hash": "8e6c6ee7b1d798290d8ad16dc98adfca86b73009c30701e905ffef33f6a10a91"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_collect_env_none_test_get.with_pytest_raises_KeyErr.get_y_b_config_d_": {"doc_hash": "f1b95facb38888c518f91f0654afd0c504ab7d3ae2ec7d090d2581ab13869ff7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_ensure_file_test_ensure_file.assert_not_result": {"doc_hash": "d63f60c516c0fb20d6a429f2c63ce8a5d2c49a2f2c48d4d89d2058339a388f64"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_set_test_set.assert_d_abc_x_1": {"doc_hash": "d8cee9ad775c127fe3219f5da65b722912b52c81b736098ee119b707276758d9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_set_kwargs_test_set_kwargs.None_2": {"doc_hash": "49ff0b8fcc13a53967ffb6bcba1c4d44d7cd4d1d76c37b69a087183688f902c2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_set_nested_test_set_hard_to_copyables.with_set_x_threading_Lock.with_set_y_1_.pass": {"doc_hash": "c812e88928d22efa8ac7e82df77f69db760f388e127f776520e8f7565708471d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_ensure_file_directory_test_ensure_file_directory.assert_os_path_exists_os_": {"doc_hash": "ff5d69a9d8888cb8647217d646630b314f85fb8ac903f86616d8637b74bd3383"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_ensure_file_defaults_to_DASK_CONFIG_directory_test_ensure_file_defaults_to_DASK_CONFIG_directory.assert_os_path_split_fn_": {"doc_hash": "963b8982c0aa98f50c9b2575a3abd5ca19ba7302cb1352dd89e9407166c8973b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_rename_test_refresh.None_2": {"doc_hash": "49e7357208a0fe750bfc9ee625f10366e56b8003d02eccb854396e234ba0006c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_expand_environment_variables_test_env_var_canonical_name.None_1": {"doc_hash": "84c112c8a38655b507ff506d4077bfef32b7c51bbf5ba3da69441963e8a832cf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_get_set_canonical_name_test_get_set_canonical_name.None_2.None_1": {"doc_hash": "21f9f1034af5f175b15ea25d58d8092b536b6a0ef560a08b905c3c23de73f03c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_get_set_roundtrip_test_schema.jsonschema_validate_confi": {"doc_hash": "1cc18b9fcf4e9ef8847a20d284fd53877812408b884c724a48b8d888686f012f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_schema_is_complete_": {"doc_hash": "fa281395fee9e5d3769f6934f3cf4d8cdd0defd027126eb6be1a0a1386b7b0c3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_context.py_from_dask_context_import__test_with_get.None_4": {"doc_hash": "57d046b6aeb8b2e9d7c6d20f160b297a89b1d95f3a83985c6dde8746422a876d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_context.py_foo_": {"doc_hash": "0fb9b5187b9520c770f1b19b509ee62797bd22e90b171f5e3af1f0e061d40bc2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_core.py_from_collections_import_n_test_istask.assert_not_istask_f_sum_": {"doc_hash": "023aee8eb7d042e3257a59b7ea4a9e677bf2dfeeee2a308872737fe0f2113f66"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_core.py_test_has_tasks_test_has_tasks.None_5": {"doc_hash": "27c4dbffed6d5c2e373fb0d54e6d6cc7fcd2adbf9c8ccc6992b07982714ee320"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_core.py_test_preorder_traversal_test_preorder_traversal.None_2": {"doc_hash": "38b86cb05172c2cbcb00d506fa515e3766e31475572201026f326def238f97ce"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_core.py_TestGet_test_get_dependencies_nothing.with_pytest_raises_ValueE.get_dependencies_": {"doc_hash": "b33af7fb28b4e641f703e0635529b04eddccfdcd422e5c69322f264c43caaafd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_core.py_test_get_dependencies_many_test_get_dependencies_task_none.assert_get_dependencies_d": {"doc_hash": "453450ce16e82300e3a939e685dcf70c11b8758d27c6b191d04375ce3b040bf0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_core.py_test_get_deps_test_get_deps.assert_dependents_": {"doc_hash": "c8ccd2fe6da745c910e2b1abc6ecb6c68064796312707bd37a8d2eb9e79aa3b1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_core.py_test_flatten_MutateOnEq.__eq__.return.False": {"doc_hash": "951bdd5513b015a9f4d350de7d5ecf34dea4b8ccd33c8e6e1c96d0d648515527"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_core.py_test_subs_no_key_data_eq_test_subs_no_key_data_eq.None_1": {"doc_hash": "abd22c191606d563518ef9c0195a94eda4dce02b8176df78e18210e3dba11bad"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_core.py_test_subs_with_unfriendly_eq_": {"doc_hash": "ae729a2bae4e1fe0991445ea62aa4f880d81f5284b06b1321a2283c284b33c95"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_datasets.py_dask_": {"doc_hash": "a660d7d70e93a2db0d15024c7a0dec08786f0a78fb5dcf307df6ab60abab28cb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_from_collections_import_n_Tuple.__dask_postcompute__.return.tuple_": {"doc_hash": "9d187dfabb76d3c0a7e0bbe993c6154e7545e050efb329d8df54c77b8f60a46d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_to_task_dask_test_to_task_dask.assert_dask_x__dask": {"doc_hash": "ded3f3eb7ad23dff9613cad9f89edf8a2148f831d4e39396f23ae7797054778e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_delayed_test_delayed.assert_a_key_in_b_dask": {"doc_hash": "0a9be292c3d5e8389e8b552001fcb3fd725395ceb35eb9d0f4087dcac5eb20f1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_delayed_with_dataclass_test_delayed_with_dataclass.assert_final_compute_": {"doc_hash": "023b1f7a06817c764f8c1a176b8e5da25e83e0b6fade6c7e0752b523f27317be"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_operators_test_operators.if_matmul_.assert_eval_c_d_co": {"doc_hash": "dc296e5169cadfe567d104807ac78340f6ae452adfcfb97f4b376b8d4a43ea33"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_methods_test_np_dtype_of_delayed.assert_delayed_np_array_": {"doc_hash": "c4bb7a29e48bec83589dd5c66f9a02f4a027c9d5a58f03890e40b655c6d47d0b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_delayed_visualise_warn_test_delayed_visualise_warn.None_1.z_visualise_": {"doc_hash": "6abbdbaca5bfd05f597c00894ead63ebeda4af3278ceb6d386107a3869bc2219"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_delayed_errors_test_delayed_errors.None_5": {"doc_hash": "97383ab834f3c07d9a0d9b68b95b4de990c5a7fc5ffef58f24cffb2fa81e4c7c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_common_subexpressions_test_lists.assert_c_compute_3": {"doc_hash": "9ebfeb8f7949a4f84dbf3e229c15d0ff9d9f4a0f725b093bdd25a5936f7e013f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_literates_test_literates.assert_delayed_lit_a_": {"doc_hash": "ac90b422b7f74b26399abe72415ecf3ab0bb90c9bf549824936fa6e6d27f0afc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_literates_keys_test_iterators.None_1": {"doc_hash": "4a651de49e41788f490a270695fafe6a1a60e182746067893bb0a12ca9e55bea"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_traverse_false_test_pure.assert_myrand_key_my": {"doc_hash": "4b9a103d42218607d84316bfa5be1d380f906d64882900fcf4dc163c9b0e4ee8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_pure_global_setting_test_pure_global_setting.None_7.assert_element_element": {"doc_hash": "d60c3d22e6f3648e5d74d1bbeb494c7783fe8f90758bce3852575021ba0bd12d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_nout_test_nout.assert_x_compute_tup": {"doc_hash": "a1dfae102043704f92ffa8d8c2f5e9b9671d99fd7371ecf3a7394f0636732be0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_kwargs_test_kwargs.None_5": {"doc_hash": "6b53989ee848acd61ad71195b2c3877ca8bdbf38b4ec53c09f700248ff05594b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_custom_delayed_test_custom_delayed.assert_compute_n_x2_x_": {"doc_hash": "738bad5633d8f9237ac2130c60c9f72282775295324f94d6dfe7f14663e69cd6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_array_delayed_test_array_delayed.assert_delayed_arr_compu": {"doc_hash": "671b4a0880dba4222fcd0ceb61507eda8763ac4b104443a85cb34ba9d49a935b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_array_bag_delayed_test_array_bag_delayed.assert_out_compute_2": {"doc_hash": "d9e82088b36d2af8caacb79cf51df250738727ee361251baa576d68c6ceaa060"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_delayed_picklable_test_delayed_picklable.None_10": {"doc_hash": "3fae957077df9e9ae2b6ce8187c274a10ffbcb52cabb777839a00878b53be9f7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_delayed_compute_forward_kwargs_identity.return.x": {"doc_hash": "844b3d6104244524a34aeba831de58a3d351174fbcb46c56eefeae6289e90831"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_name_consistent_across_instances_test_name_consistent_across_instances.assert_func_1__key_i": {"doc_hash": "2316d3f38ee2d096edb0ec127548d6de9da8189510c7a5552cff0116b2f5efbb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_sensitive_to_partials_test_keys_from_array._check_dsk_xs_0_dask_": {"doc_hash": "9b16509e04d4ab9ce9183bf071e108970feae09064363499c9cbbd139386fcf0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py__Mostly_copied_from_http_test_delayed_decorator_on_method.assert_isinstance_A_addst": {"doc_hash": "d24639f56c47c38836c0cfdd439a81d78df916ba6ab54928f876cc028df396c5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_attribute_of_attribute_": {"doc_hash": "355b00c05e5f39a5868df6c7868735e93372dcc71e2d2d04a9374fffa4d33438"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_pytest_test_persist.assert_y2_key_in_a_data_o": {"doc_hash": "b8e85600b6e754d051746ae48530aa1a5148b77ddcdc94d96e637218deed5ce1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_persist_nested_test_persist_nested.assert_res_2_4_5_": {"doc_hash": "e53c4b24f2866df84c4d4ebe6a60e490dabfb7a7e2f727812be1ced6ee594f9b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_futures_to_delayed_dataframe_test_futures_to_delayed_dataframe.with_pytest_raises_TypeEr.ddf.dd_from_delayed_1_2_": {"doc_hash": "1cca66fc418728623308717616f213b9d5019abbdf791cb5290d35dcffb5acd0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_futures_to_delayed_bag_test_futures_to_delayed_array.assert_eq_A_compute_np": {"doc_hash": "5cd3262f5c6da21a332fb6a6d3d18f18d8436fb4016f27ed07a41af1f3fa2f78"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_local_get_with_distributed_active_test_to_hdf_distributed.test_to_hdf_": {"doc_hash": "2ab8ff738c131adeeff7850be0c36dd7fe63097b9df25da6d7dc3f90306d83ce"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_to_hdf_scheduler_distributed_test_to_hdf_scheduler_distributed.test_to_hdf_schedulers_No": {"doc_hash": "65670529b6fd90b2c2f2d41c9f88f4ed11113d389eb3df38ad72b075ca668179"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_serializable_groupby_agg_test_serializable_groupby_agg.yield_c_compute_result_": {"doc_hash": "7e9d934011092978df68135d49b222ee1c280963906ea73da89388fefe0fe9bd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_futures_in_graph_test_futures_in_graph.assert_xxyy3_compute_sche": {"doc_hash": "0fbd94208d389cd9b2ab5d0fc425e06b0b9386ae4f76114c47fb641ea370f36a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_zarr_distributed_roundtrip_": {"doc_hash": "77950028d64ec1e590ec1bebd8d540cfbfd49bd24117912296582a3be62dafec"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_dot.py_os_test_task_label.assert_task_label_add_": {"doc_hash": "e39263d9b1ebfe7ee8499430b09456cbe2a18112a5c6d034591e2864cbc46622"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_dot.py_test_label_test_label.None_10": {"doc_hash": "24d01e798113b7f09b31fa5472b34a5e39eb07c394429ea1ef04ae795655cab6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_dot.py_test_to_graphviz_test_to_graphviz_custom.assert_set_shapes_set": {"doc_hash": "8f671ec18f6727e078d19325196e53bc71bf0da7b228ee0d334c0d49233f3e35"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_dot.py_test_to_graphviz_attributes_test_to_graphviz_collapse_outputs_and_verbose.assert_set_shapes_set": {"doc_hash": "ad916c8ba43d051743f466721c862ffcc105e98ca7142e3d70d1de0318e19c2e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_dot.py_test_to_graphviz_with_unconnected_node_test_to_graphviz_with_unconnected_node.None_4": {"doc_hash": "8b07fe228f479a560f25c1311746e6e65a59928505b695688946735bd8cfe3de"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_dot.py_test_dot_graph_test_dot_graph.try_.finally_.ensure_not_exists_target_": {"doc_hash": "4053896a0b427bc0e1a13ef8349e86d0e5e9e30c1bdce36687700b4dac81583e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_dot.py_test_dot_graph_no_filename_test_dot_graph_defaults.try_.finally_.ensure_not_exists_target_": {"doc_hash": "61c80514e66f00bec5b7f12ce950ea80458ce729ab9e9902263ddf5ffd3d7129"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_dot.py_test_filenames_and_formats_test_filenames_and_formats.assert_isinstance_result_": {"doc_hash": "5f5da8458ea8d71ca76d0fb77fa196241d036932f2ded38a9e79fceef64461ca"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_dot.py_test_delayed_kwargs_apply_": {"doc_hash": "162f5985a0baa38a40e10463201f5b3d96c4720a727527f2d060ed967dc8d63a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_hashing.py_pytest_": {"doc_hash": "9d64c9b7c5b6d1444c5915e9c19e2b63c95beab327f82954dd80b96004d268eb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_local.py_dask_test_start_state.assert_result_expected": {"doc_hash": "22cc1cd3ecff619c2590a9cbd896d742b2b24677416ed2bf3411b73b6d4f7300"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_local.py_test_start_state_looks_at_cache_test_start_state_with_independent_but_runnable_tasks.assert_start_state_from_d": {"doc_hash": "66252bfbdfce49046b74059dd2c38de78eadfbcb481fe47e904a58015f5dd260"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_local.py_test_start_state_with_tasks_no_deps_test_start_state_with_tasks_no_deps.assert_state_dependents_": {"doc_hash": "c26079019b46eb81af2ab8a6913273919d49230f1982ab8ecc776104b219df19"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_local.py_test_finish_task_test_finish_task.assert_state_": {"doc_hash": "cccf94668eed97425c5850fe99369e9f7268017c24642ca7d9291b284d2e7776"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_local.py_TestGetAsync_test_sort_key.assert_sorted_L_key_sort": {"doc_hash": "8c9c051146dec3698ce3d738dd85a32455b62fd658e52b6f76be9310de7ed516"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_local.py_test_callback_test_callback.get_dsk_a_start_callb": {"doc_hash": "f6c80a34ed4302010c13f52192bb1d81f8b07d5f906aa08c728e77548c47676b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_local.py_test_exceptions_propagate_": {"doc_hash": "a1c542e3665a9134b3659bc095a836dd4f44f9c58bdeabb3613de39eabc9d685"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_multiprocessing.py_from_distutils_version_im_test_pickle_locals.assert_b_unrelated_functi": {"doc_hash": "d29185197bbe3b81f45834410eaa1f373e784da7699a1fc3d3436ea25da5990a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_multiprocessing.py_test_pickle_kwargs_test_pickle_kwargs.assert_my_small_function_": {"doc_hash": "50d36ec08bb83f0b632731759e8709f6d979f1dd1bf078f3aeb954cc138b0bf3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_multiprocessing.py_test_out_of_band_pickling_test_out_of_band_pickling.assert_np_all_a_a2_": {"doc_hash": "7a2ead6f87dec5bc6fd94352306ecae7df7c9f3e8125c672e36fd0af3b2ef132"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_multiprocessing.py_bad_test_optimize_graph_false.assert_len_keys_2": {"doc_hash": "562709e830cd8ccb44abfcc3fd60671c53c10d10574a59c555c6046003ab7e0c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_multiprocessing.py_test_random_seeds_check_for_pytest.return._FAKE_MODULE_FOR_TEST_in": {"doc_hash": "13a6dc4b37706c5bf2b69de80e03144dbfa0eefa45f00e81acdcab72e7d89eb2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_multiprocessing.py_test_custom_context_used_python3_posix_test_custom_context_used_python3_posix.try_.finally_.del_sys_modules_FAKE_MOD": {"doc_hash": "e94f05033408ee3c0ed0132519db2e41c70135a5aed0bf976d94639c1dcafd47"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_multiprocessing.py_test_get_context_using_python3_posix_test_get_context_using_python3_posix.None_1.assert_get_context_is_m": {"doc_hash": "936fba02bde24bab2fa022b63170a3d8149418caae5768206cc3f6669d0c5259"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_multiprocessing.py_test_custom_context_ignored_elsewhere_": {"doc_hash": "18837812d77388d8bd2ccb41caf65c098516c198068d24cf2e0b33aaa8f01222"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_itertools_with_deps.return.dsk_k_get_dependencies": {"doc_hash": "451cb3e091a1f6a51285480ce0cdae0ae8946507fc27230ca8d11906d5d2ef57"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_test_fuse.None_5": {"doc_hash": "e6b7ac602cd25e98d721c5bc9f3acf3dd93e6c2037130d8a2a9a9aa0ce560065"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse.d_4_test_fuse.None_9": {"doc_hash": "dcc64d4cf2eaf4261081543417285cfbc61a1a16710655b87f03a7c68da37ff8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_keys_test_fuse_keys.None_3": {"doc_hash": "7586c3001052d99543dedf8a3f6fa5c1088a632a6d3674b9ad9c5f29faee6b79"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_inline_test_inline.assert_inline_d_a_inl": {"doc_hash": "0a65cb1790408a103ce5ed8d14d63269d0aa949e58a9e635a1d97c9271743c02"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_inline_functions_test_inline_ignores_curries_and_partials.assert_a_not_in_result": {"doc_hash": "a0368c9f556ac63f38e20e035e3bef900210fdd6868f2d19e333ff483f4a94bf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_inline_functions_non_hashable_test_inline_functions_non_hashable.assert_b_not_in_result": {"doc_hash": "a6384785b8f91495315713c868e2e75aa31bcfa2943778f7600a9cafd073b572"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_inline_doesnt_shrink_fast_functions_at_top_test_inline_traverses_lists.assert_result_expected": {"doc_hash": "8c403ebe01cf1eb6ed45f6f6fe119b4555964027311cff145b06aa4ce3035a88"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_inline_functions_protects_output_keys_test_inline_cull_dependencies.inline_d2_b_depende": {"doc_hash": "2889be12cd932c8c6ed061b763d55438848fb1d2b3400602d6b4451f584fbcb5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input_test_fuse_reductions_single_input.d_3._": {"doc_hash": "d22a85a590b8399c4ff7573c612d918da7163761a0edf0b99f9dded118135392"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.None_12_test_fuse_reductions_single_input.None_22": {"doc_hash": "9231fac71f13aabcd3881d4b6ab6daa36d788cd9b4f5cc1ec8c5dcbad38facd4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.None_23_test_fuse_reductions_single_input.None_27": {"doc_hash": "857ac7797281830dc0381365f80a2c9d954f564b15cd714be59389121e7e3ad8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.expected_9_test_fuse_reductions_single_input.None_34": {"doc_hash": "17810043500540132fe16aa29971333c019eccd0c3d2541c5841237a6c362874"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.None_35_test_fuse_reductions_single_input.None_37": {"doc_hash": "efd9aa0bce7e0b026e232faf5af070c9be168d2e4369465848ab753f1170c1b9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.expected_13_test_fuse_reductions_single_input.None_39": {"doc_hash": "43a07e337f5ad657572f1654706c026c3ae76d4bfe5d3994c2a118857ca70256"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.expected_14_test_fuse_reductions_single_input.None_43": {"doc_hash": "bdda57ad66f4f72ae71fc1f8c3c4003a65fe32bd394d1ab17ad6a05b628268cc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.expected_16_test_fuse_reductions_single_input.None_47": {"doc_hash": "70a70bb986dbc357237ac02daf6246da94c13920d8e8a3750524ab8b5be59a00"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.expected_18_test_fuse_reductions_single_input.None_50": {"doc_hash": "7b363e65101c988ee5261776a6896528a839c08f2c395cf04e2b321775fa03eb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.None_51_test_fuse_reductions_single_input.None_57": {"doc_hash": "89be61853cfb41b1278fcfde8388a34eb4e809097832c57a1580bd09bdd20cb6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.d_22_test_fuse_reductions_single_input.expected_27.with_deps_": {"doc_hash": "ca6cd36000bcc9030079d2a5d0553677de7185e37b3d14e5d1aab0353cf67535"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.None_66_test_fuse_reductions_single_input.d_29._": {"doc_hash": "38bf72271a9e6cb8e09158cb2a5d1876a2556aed80139f6e4df0bd4117ed52cc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.None_72_test_fuse_reductions_single_input.None_73": {"doc_hash": "e46a7921d70cb52fc729beb6aa066563d958be143698a39ad04576f404c6b385"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_stressed_test_fuse_stressed.d._": {"doc_hash": "a1d397e9ab1b94dd0ab83ac9b1eefbdd43d40dd8d62e3496cfecbfe8927b922f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_stressed.keys_test_fuse_stressed.assert_rv_with_deps_rv": {"doc_hash": "30e4606c0d472073715654c60134400d529f92879edc25cc486fb46a2940e5a5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_multiple_input_test_fuse_reductions_multiple_input.None_11": {"doc_hash": "e03d2cd4670e0ee632196c8e9001c76371b4965ec087aca6c5fb17c0b5667563"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_multiple_input.None_12_test_fuse_reductions_multiple_input.None_17": {"doc_hash": "cd5014ebf5c1a4448c5e7b3fe9ab28b97e4914daaa55c6be82f3256716285b7c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_func_with_kwargs_test_SubgraphCallable.assert_f2_1_2_f_1_2": {"doc_hash": "bb9e10ea4799c756d4a9a619a41e2bffc2d4b5384ba944f234cf4aca086096a5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_subgraphs_test_fuse_subgraphs.sols._": {"doc_hash": "05d3e698058633d1bf6c95b3e9fc39355d0642a4f18549167d9b105dea342128"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_subgraphs.for_inkeys_in_itertools_p_test_fuse_subgraphs.None_4": {"doc_hash": "5106bd53a1fa3df9379227f6843e1666fc97af838878eade5b253b8a7e390c2c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_subgraphs_linear_chains_of_duplicate_deps_test_fuse_subgraphs_linear_chains_of_duplicate_deps.assert_res_sol": {"doc_hash": "d5376539817e77a48d29526f14c723e6ed85380c0ed5d4a00bb1149ec5e374cb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_dont_fuse_numpy_arrays_test_fuse_config.with_dask_config_set_op.assert_fuse_d_b_depen": {"doc_hash": "787cfac79e92ba86f2a57a7d3d7dab553cf5154c8b5209b0ab22f0a3e58a50ee"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fused_keys_max_length_": {"doc_hash": "b8c6e91999e7650c1627db571b0f7a378776ab36609b417264d64a21cb345ee5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_pytest_test_ordering_keeps_groups_together.assert_abs_o_a_1_o_": {"doc_hash": "74d80f3486ea3e0320ae2f19468e16ba21ea6f2886e9a124f2f81d0ead1ae732"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_avoid_broker_nodes_test_avoid_broker_nodes.None_3": {"doc_hash": "aa0ed2b9ded9667d53bc4a2fbfda2d97f26a90a2032f56655154b0b670a4b0dc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_base_of_reduce_preferred_test_base_of_reduce_preferred.assert_o_b_1_6": {"doc_hash": "33c13914f972b513bd778e394b9943febd513ef97dc822c9f1b29d5121739a94"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_avoid_upwards_branching_test_avoid_upwards_branching.assert_o_b_1_o_c_": {"doc_hash": "3d0a91bf8375de60fd6ea8fabd992a40976f3ad50472d1ccfb3a8bf4ac70f4a2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_avoid_upwards_branching_complex_test_avoid_upwards_branching_complex.assert_abs_o_d_2_o_": {"doc_hash": "bd549ced7944723534bc73ba7a89b7a28440070517611995b15f7be1aafaa5a3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_deep_bases_win_over_dependents_test_deep_bases_win_over_dependents.assert_o_b_o_c_": {"doc_hash": "357ed23880c3713eda113fe37ae2b5dd12d03038513ed03782a127ae2cef1eff"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_prefer_deep_test_prefer_deep.assert_o_b_o_d_": {"doc_hash": "c7ad0109d75fa6f509c072ef0d8b67f58ab415192600b67874a5098ec897db36"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_stacklimit_test_order_doesnt_fail_on_mixed_type_keys.order_x_inc_1_y": {"doc_hash": "0f8f4756895c2cbbdbe463657898654c7c8b58cbcf18866d39845451b24279ea"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_gh_3055_test_gh_3055._operate_in_order": {"doc_hash": "3545d5e61051bb05980d55de6c547ee74856d44d47f4bdfff8aedd9346272670"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_type_comparisions_ok_test_prefer_short_dependents.assert_o_e_o_b_": {"doc_hash": "cb8c62a22955fffad9075f1f769129b37cf1a8b767af612f17846f969e413373"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_run_smaller_sections_test_run_smaller_sections.assert_log_expected": {"doc_hash": "c92bc343046f8a3f52f5dca03198d56604f15d96be41643ad3b737132dfa1568"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_local_parents_of_reduction_test_local_parents_of_reduction.assert_log_expected": {"doc_hash": "1ff07b1fcd1845236351889c62fb43e6931aae4c45151cf3dae470b2c7acbe5f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_nearest_neighbor_test_nearest_neighbor.assert_o_min_b1_b2_b3_": {"doc_hash": "f28e831777eaf81d9dc546c5dba72407e436d4718b82dc62e88723996e86f0f8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_string_ordering_test_string_ordering_dependents.assert_o_b_0_a_": {"doc_hash": "14428d54e5b0b8871e6c159561540a2dd5f7535c882023aafb81268fbdf0a68a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_prefer_short_narrow_test_prefer_short_narrow.assert_o_c_1_o_c_": {"doc_hash": "1f4585908698272f5a60dcabe38e67d43c4e43d31fadc657d3dd5fb954a38853"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_prefer_short_ancestor_test_prefer_short_ancestor.assert_o_c_1_o_a_": {"doc_hash": "ddc9414387ac7e1a9251187b61865d1c179060b046fa88e468a96e88f5829599"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_map_overlap_test_map_overlap.assert_o_b_1_o_e_": {"doc_hash": "328cce5d782f1aece71f069c4ab306e3a703ad10c593e9c63295131a59547eec"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_use_structure_not_keys_test_use_structure_not_keys.if_Bs_0_3_.else_.assert_Bs_1_3_5_7_": {"doc_hash": "bbe9e1c19779e8cfbecd8ca4dd22d828e04b04b51bb03b5bdfa3510924d51f9e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_dont_run_all_dependents_too_early_test_dont_run_all_dependents_too_early.assert_expected_actual": {"doc_hash": "38cbcaaa1b57fe78c12817a745ee77197b3c0c30317b9e97553c4ee3ae494668"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_many_branches_use_ndependencies_test_many_branches_use_ndependencies.assert_o_c_1_o_a_": {"doc_hash": "0217ed553afd3aa961694ec847a81aace7ca27e3157534275622e07fc9baca37"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_order_cycle_test_order_empty.assert_order_": {"doc_hash": "a1cbc8bd129468e380e89cacd583f70903b738cfcc1255c9c1f90f33261fedbf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_switching_dependents_test_switching_dependents.assert_o_a_5_o_e_": {"doc_hash": "48cd0f2e5827210334c2957a7b0f5f68f178a3a2ece2b9d2d2e2cd440f78b0f6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_order_with_equal_dependents_": {"doc_hash": "6b441632eb28b5ff7c5f13e621c25fc555758d0402b101a537019b5bf3892852"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_rewrite.py_from_dask_rewrite_import__test_args.assert_args_1_2_3_": {"doc_hash": "793f057642c4988fdce5726a7a0be073784fe51f742b54bfc47be76eab16ae5b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_rewrite.py_test_traverser_test_traverser.assert_list_t2_add_": {"doc_hash": "9a16791aec3529c77b68e7c0ef1bf392576e71892fc3b74027fdf8627e1baa8d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_rewrite.py_vars_rule6.RewriteRule_list_x_": {"doc_hash": "ec5290f4e0ad43bcf8207b77d9e80872d539668777c2deb79a8fb5429e08e187"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_rewrite.py_test_RewriteRule_test_RewriteRule.assert_rule5__varlist_": {"doc_hash": "019245b1f0ce4ce32e4931de5b492bbd39aef3b23fb6540367483bd8bf7496fe"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_rewrite.py_test_RewriteRuleSubs_test_RuleSet.assert_rs_rules_rules": {"doc_hash": "96a959fc9181f2a966f7ac2e298584c40ef509b0891218f5d42849d9b6305941"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_rewrite.py_test_matches_test_matches.assert_len_matches_0": {"doc_hash": "0431cd8a176d77cb942e79d27990c36544d7b54448563806b099b73a93642d31"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_rewrite.py_test_rewrite_": {"doc_hash": "9bcd6b237394e1128f2f42621bc9f034851bae3b24495718da7fc8ec34bbf678"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_sizeof.py_sys_test_numpy_0_strided.assert_sizeof_x_8": {"doc_hash": "fbe7b8f114e7b58ac3f767e2d53ed4b386194c8b7da70d130e019a278a1559ad"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_sizeof.py_test_pandas_test_pandas.None_6": {"doc_hash": "18c5b26bd4a2ffb28903edb86d0d92166980b4d25fb280d4e3ff23f95bc3d7d1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_sizeof.py_test_pandas_multiindex_test_pandas_repeated_column.assert_sizeof_df_x_x": {"doc_hash": "64ad1e4680fea9842fb8ffa8c6f49952400dfbdff83ec311561376411554f8bc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_sizeof.py_test_sparse_matrix_test_sparse_matrix.assert_sizeof_sp_tolil_": {"doc_hash": "4d900f2face5b92dd585b34389ea7561b4b73ba96b79d1df3815559b338c3844"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_sizeof.py_test_serires_object_dtype_test_dataframe_object_dtype.assert_sizeof_s_100000": {"doc_hash": "cb462f15d7804fae66440e384790f464549727c670edc61480e4dc1565449580"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_sizeof.py_test_empty_test_empty.assert_sizeof_empty_index": {"doc_hash": "031586606cbeac6806d39b943872ded2573d62d91c7ce9fb561571f190ca4d1a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_sizeof.py_test_pyarrow_table_": {"doc_hash": "3da3ecaa572048726db1908f985ee44825803d2d5e17f4f32247cf79ceabcfb4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_system.py_builtins_": {"doc_hash": "4a35e98eff2430bb4986d84652c10caf2c9e3f4b2bcf759b338e1dc9353de94b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_threaded.py_os_test_pool_kwarg.with_ThreadPool_3_as_poo.assert_get_dsk_x_pool": {"doc_hash": "221ae6bad805abb13b36cc9a9600c69c5681cf0c1f78f764a04ec9176368dd59"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_threaded.py_test_threaded_within_thread_test_threaded_within_thread.while_threading_active_co.assert_time_start_5": {"doc_hash": "89c5984a535edab4f5b62d5806b814dada74fd53998578faf58a320b50c9989f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_threaded.py_test_dont_spawn_too_many_threads_test_thread_safety.assert_L_1_20": {"doc_hash": "819b79504d3b6cad4985c25159c1fdd88cc40b34cc0b2f00e24a67651016de9b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_threaded.py_test_interrupt_": {"doc_hash": "fc8f8ede73f187afe7d9be9f2a66c4954ece1a525d09d5d7e5655110347b907c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_datetime_from_dask_highlevelgraph_": {"doc_hash": "4b01d360e756c79aa8af0daf16a2d0c9f4a7e3072f8845b3f0eb24f1b07a51bb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_getargspec_test_getargspec.assert_getargspec_MyType_": {"doc_hash": "0b13e05345ea6a1f4b5dbb3b846459c76f9ef8ac6c81cbddc2ba7124fd05606f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_takes_multiple_arguments_test_takes_multiple_arguments.None_7": {"doc_hash": "96ecf9940332c4114b0823785141de2773719d8f47c91c8f2339aefd30a503de"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_dispatch_test_dispatch.assert_foo___doc___f__": {"doc_hash": "b79f8b9dbb78399c96a5e14b4d8d8d4906d73e6e27886135a787cf247f935de6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_dispatch_kwargs_test_dispatch_variadic_on_first_argument.assert_foo_1_0_2_0_": {"doc_hash": "372df096fb5d9a0cf281fe65322a9c8bb875bfe4f454d2c2e292d51383f1393b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_dispatch_lazy_test_dispatch_lazy.assert_foo_1_1": {"doc_hash": "d13f5cee190c52e60cb16a945f7653cef960571f06688812af9fb805a85be44d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_random_state_data_test_random_state_data.None_1.assert_s1_s2_all_": {"doc_hash": "3896dcfdac50b0e28e0259569b156f3ec52e949e6b5e9b69f8f4029bc6973e75"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_memory_repr_test_method_caller.assert_count_in_repr_me": {"doc_hash": "15ee5150247788971c29eefd3eea1d1d29b7f1da3cdc4c5987f8422ecce5b8f4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_skip_doctest_test_skip_doctest.assert_res_expected": {"doc_hash": "cdea1416d156e484176a66ace470b16df0c1847c5a0f3dd73d8d439680492f9b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_extra_titles_test_asciitable.assert_res_": {"doc_hash": "357060ca9378cd58270706e7084cbec93570a06f56010140c6947953d25f4203"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_SerializableLock_test_SerializableLock.None_4.for_y_in_b_b2_b3_.with_y_.with_x_.pass": {"doc_hash": "058416eabc107817d87b00ea18b72020eddb178ee612577289d8a9e03af22fbc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_SerializableLock_name_collision_test_funcname_numpy_vectorize.None_1": {"doc_hash": "3fa9477cc13d03928dcae575f0b64433e2c30cc391d58e27ded3c0d9fea63b07"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_ndeepmap_test_ndeepmap.assert_ndeepmap_3_inc_L": {"doc_hash": "89e502df51e93986205c785f4c8099b503fa04799365d8f77725689c6b55bd9e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_ensure_dict_test_has_keyword.None_4": {"doc_hash": "2588a0c9b0153efec3f86350ed6dde7e3850b5b3e0666166ebc419e2eced4db3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_derived_from_test_derived_from.assert_extra_docstring": {"doc_hash": "7e7adc5dcef1a0684aa47dab5a4ca7e7d7dc154080d75305db24d1baaf51e46e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_derived_from_func_test_derived_from_dask_dataframe.assert_dask_in_axis_arg": {"doc_hash": "301a667886d05b071692404b7fbaea4b157aa3a422dd0a58efa7d0ac29edb332"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_parse_bytes_test_parse_bytes.assert_parse_bytes_5GB_": {"doc_hash": "1d95c67e70d6e5d3d3b5307ab04fc1ad80dfc35c2a99a9d1f5ab9350c480bcf5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_parse_timedelta_test_parse_timedelta.assert_parse_timedelta_1_": {"doc_hash": "28cf7c3e2842ab62a28e515d7f868e1c16c2bacea833a088ff95d843f8f6114f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_is_arraylike_": {"doc_hash": "ffa5ef06e1cc4a2398f3c83d6e04a1c94781bd000d18137c83318af730964a8c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/threaded.py___pack_exception.return.e_sys_exc_info_2_": {"doc_hash": "b19165dc49de18592e1e175ceaf3f8968a379a0fe8d05513b113930fdaefc978"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/threaded.py_get_": {"doc_hash": "d566dd59a33a35f5eeccc4bd2f548e187069011d668660d1e06d9b10ac4fcfa0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_from_datetime_import_time_apply.if_kwargs_.else_.return.func_args_": {"doc_hash": "7ccb748594d893fcac0f7c3cafa5762ffae02b67f9d6b2ab047542df09bcf588"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_deepmap_deepmap.if_isinstance_seqs_0_l.else_.return.func_seqs_": {"doc_hash": "b40aade22bd829b2f41d4c78956aacf7b6e702e899f264ac8760c4cdf0330d70"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_homogeneous_deepmap_ndeepmap.if_n_1_.else_.return.func_seq_": {"doc_hash": "ed94fd9ad74f659145043536ef7e3351ba17015a69d1166e8d71ca17df13f5cb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_ignoring_IndexCallable.__getitem__.return.self_fn_key_": {"doc_hash": "db82dabb422c44ce90c8d21919ab3e4baa1880f664097f78db833eaf26f342f2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_filetexts_concrete.return.seq": {"doc_hash": "89e81e098de46fb2d81aba7aedcdaeb2b61afb576e26dedf1fac7338ae05216e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_pseudorandom_pseudorandom.return.out": {"doc_hash": "cf58f910afa1198efe0d3b0471a8470e3058510743e546b38ba8cae40735f746"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_random_state_data_random_state_data.return.l": {"doc_hash": "694d05aaab692e72175f51aa4074caabb6c17792d3f16b61cb0289aabe296159"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_is_integer_getargspec.if_isinstance_func_type_.else_.return.inspect_getfullargspec_fu": {"doc_hash": "c4c204703a9cbf802bfa88e0def4240135bd9b19490674734bb369b7ce1ce519"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_takes_multiple_arguments_takes_multiple_arguments.return.len_spec_args_ndefault": {"doc_hash": "228771ddddb5ec810d0214f0cd98bd72741195ef74a23a265ccf808986261b4e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_get_named_args_Dispatch.register_lazy.return.wrapper_func_if_func_is_": {"doc_hash": "d63b4582829ba322677bcd9849e546cc78113b93cb928c53c95463d7abd90aa7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_Dispatch.dispatch_Dispatch.__doc__.try_.except_TypeError_.return._Single_Dispatch_for_s_": {"doc_hash": "972cfa4cc097459280d237c15c5bcba7ed753c531c66c77c5074bf520d872404"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_ensure_not_exists_skip_doctest.return._n_join__skip_doctest_": {"doc_hash": "909143ffbc94d024b44789ee576e9bfe43d4c82037d996b740ee5a5a4d53b8ba"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_extra_titles_extra_titles.return._n_join_lines_": {"doc_hash": "972ef85d5459c395ac7c2b180d23d3e4f6aa59ea41f1f1be0fb5d8fcdf6b16a9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_ignore_warning_ignore_warning.return.doc": {"doc_hash": "aedd9079ec8717ecec74e6cef9a5235e617420e7de837e26be4cceb4570a111d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_unsupported_arguments_unsupported_arguments.return._n_join_lines_": {"doc_hash": "a713ab6052573ba7444f3cf4ef050c58de687435bb2e2008824be8d117e6b32c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py__derived_from__derived_from.return.doc": {"doc_hash": "c1a3fb03e0b126ef8e883f7a41090295cc19f81ef7ebc6421c69916f38ccd6db"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_derived_from_derived_from._Decorator_to_attach_or": {"doc_hash": "ef70b839300a0b3a60a7daff1588c70441dc5bc335d24fa49a3dd4f6eab1befe"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_derived_from.wrapper_derived_from.return.wrapper": {"doc_hash": "2cdafd2cb269bc71080f6d01e9e7a7a85c8b7a0056451be8e7aca33808a5fdfb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_funcname_funcname.try_.except_AttributeError_.return.str_func_50_": {"doc_hash": "5b4dadc7ec906ece6a05575a0fed3646430cb83a023de3ab2e8e74d99201348c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_typename_memory_repr.for_x_in_bytes_KB_.num_1024_0": {"doc_hash": "3250d55f2aea9c7e92f543de5e26682595c3dc0cad36fd6abc22caf6078612aa"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_asciitable_asciitable.return._n_join_bar_header_b": {"doc_hash": "b41737cfcd53ca1ee9d34fcde249575665374a69ea7c04689ca15f9cafedb63c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_put_lines_methodcaller.__repr__.__str__": {"doc_hash": "d758c2cd8f5c43bec44aa0a09820f347636d038670d2a4f1e82e3d4ad1bb7821"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_itemgetter_M.MethodCache_": {"doc_hash": "b1a631c77d27a83819926735f65eef7061f8582ef01b71f7d26a25bd9833de1d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_SerializableLock_SerializableLock.__repr__.__str__": {"doc_hash": "340af8ec3bf73347d85fcb1fb902034190729bbf05971dbd533499a7290cad05"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_get_scheduler_lock_ensure_dict.return.dict_d_": {"doc_hash": "da2c96eb0913974760cd9e47b68a3311779f63b375fa671a50b9a1de441d466f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_OperatorMethodMixin_OperatorMethodMixin._get_binary_operator.raise_NotImplementedError": {"doc_hash": "4d8d170dc440f5eeea3bf71e52430913fa21fb670af4822ca17905c8b8e05ae4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_partial_by_order_is_arraylike.return.bool_": {"doc_hash": "48127eff22a2edd2ba94a98591fab04fd21e3aed05289eccaebf9d58f4a74200"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_is_dataframe_like_is_index_like.return._": {"doc_hash": "f541513acd728fe6903bcf6e6ed2c28f70e453fcc134d0ae8a17c38fbdc648ea"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_natural_sort_key_factors.return.set_functools_reduce_list": {"doc_hash": "143f3d82630ca6abc4e41e024f9ab4d69c5bef00ffef9c0f04a375da57f4f5bc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_parse_bytes_parse_bytes.return.int_result_": {"doc_hash": "ad70d6af701e80473e67e90638dc8f0873a33731eec392334c6b728695533836"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_byte_sizes_byte_sizes_update_k_1_": {"doc_hash": "70f56a9a3fbba4e02e54e6c4bd7de366571ecba1bb793d9c17427f6783d1c0af"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_format_time_format_time.return._2f_us_n_1e6_": {"doc_hash": "22e701d7ab5b5dfc36831dabfbb7ba6cc2da89e94c6ae7eba74279cb93595730"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_format_bytes_format_bytes.return._d_B_n": {"doc_hash": "0b4cff4e335b08f23de79c782d47c45f0e6c6badb9fcac6cde168651598d37f0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_timedelta_sizes_timedelta_sizes_update_k": {"doc_hash": "9ccd9fb8113a247987c7a7f15451f56b2f88519af1d720e3ea0bb353a46e3026"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_parse_timedelta_parse_timedelta.return.result": {"doc_hash": "e1cb18be40dddeff80d1ecbc81c6a8003dd0e1814ce9cfec0f863611dfa7c1bb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_has_keyword_hex_pattern.re_compile_a_f_": {"doc_hash": "7e7832b24b3fa8e8d43e70f5599e93f6c59400feeb8eacf439a13885708bbf9c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_key_split_": {"doc_hash": "c3fa85d026ea8b8ac114d70f27efefe8e0a09f6007c130a1a0bf9b369e4424a1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils_test.py_inc_GetFunctionTestMixin.test_get_with_list.assert_self_get_d_z_": {"doc_hash": "d70b59cc6df5400ad016f882c4a84c4cf4151193784dd71f8f50980cace859e6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils_test.py_GetFunctionTestMixin.test_get_with_list_top_level_GetFunctionTestMixin.test_get_with_list_top_level.assert_self_get_d_f_": {"doc_hash": "4b8267e06205acf9fa19d3ad66764f4edbf0957bd997d15d3198ec11c8815daf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils_test.py_GetFunctionTestMixin.test_get_with_nested_list_": {"doc_hash": "46850f6363d6b7c2a844636a382a28baa4f6618b2708c10785e5091f0f5eae6e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/docs/source/conf.py__coding_utf_8___html_show_sphinx_True": {"doc_hash": "62c032239ff105724ea69de1fd23eb5fdc6e9d067a7beb9272f8865f5ff7e9ce"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/docs/source/conf.py_htmlhelp_basename__Options_for_sphinx_e": {"doc_hash": "8d2e01462620177b2ec642a0150e0ac67a9aba23034e041a4b2647ea8fec22e5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/docs/source/conf.py_intersphinx_mapping_": {"doc_hash": "6a69d8359b41dc5e3d4b8581f4f77d01184be1966db6fef75ed2b3107a3a24ce"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/docs/source/ext/dask_config_sphinx_ext.py_requests_setup.return._": {"doc_hash": "eb97949ddf9a050a8d1bcb0defc6351a0ff1f44e0953c7845b49e65a54abe608"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/docs/source/ext/dask_config_sphinx_ext.py_dask_config_to_html_": {"doc_hash": "509e958cf13c4e0d1583b525a6a9b2f6cfd6fa7df243693ee65cb497e0de8dae"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/docs/source/scripts/scheduling.py_from_time_import_time_trivial.return.d_x_height_1_i_": {"doc_hash": "b18d1b37dea7a08f9cd700b3660fc20a076b8561c029ac8550b2879bc3a95a68"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/docs/source/scripts/scheduling.py_crosstalk_crosstalk.return.d_x_height_1_i_": {"doc_hash": "177444b38c641dacc00aec2f243ea32fb68359398004f50daa4fb4ce84651582"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/setup.py__usr_bin_env_python_": {"doc_hash": "369edf40169ec3d8ef54838a4dfe257121d7b1a0907856fb05e3cc833eed5561"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py__Version_0_16_get_root.return.root": {"doc_hash": "6037919622d97781b7d59e2bfc6c232020081792e1bc8d43b3547fcb6339e15b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_get_config_from_root_get_config_from_root.return.cfg": {"doc_hash": "f94a68557f2fa0e531bce6e838f5874c06260f952089627ebf62e00848d1793f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_NotThisMethod_register_vcs_handler.return.decorate": {"doc_hash": "73daca7e59adb07412c31abb19c5afdd81cda94159e7b1d969a036c64e8e4be5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_run_command_run_command.return.stdout": {"doc_hash": "2bc574b43d1a6ce36eba13eb9658c3774bc0bcc2a8ae8e6d55a7a8dd557d0802"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_git_get_keywords_git_get_keywords.return.keywords": {"doc_hash": "2d5fa57e285364d55ac036c1052c8631ef01643581f769c18cb1ad2b1ac05f52"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_git_versions_from_keywords_git_versions_from_keywords.return._version_0_unknown_": {"doc_hash": "a463566e2a9177fa93b410343901fab1ca2bd40e8f2b9fcd85457e0cfefed307"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_git_pieces_from_vcs_git_pieces_from_vcs.return.pieces": {"doc_hash": "f1e6ef95c42f54a2b585f9ba794b72144071f5992ec8a611aebe089457b2ee48"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_do_vcs_install_do_vcs_install.run_command_GITS_add_": {"doc_hash": "83a3c9611a29a0a87630fe19289e9ccc134b95409d55a48807b517015a093642"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_versions_from_parentdir_versions_from_parentdir.return._version_dirname_len_p": {"doc_hash": "019abc54e0c5d0037fdc73017eb449f4f36b174fcd53f2ddec66612636e3488b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_SHORT_VERSION_PY_versions_from_file.return.json_loads_mo_group_1_": {"doc_hash": "160cb5f3677da881f0431dd1430b370bd03338314effaa5a2d1a3a962b6b88bb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_write_to_version_file_plus_or_dot.return._": {"doc_hash": "2606d956a2836928821f54af5066ae269853eb928d94e338d6e665ef5fc27c5f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_render_pep440_render_pep440_pre.return.rendered": {"doc_hash": "62f8dd7acee0867457d7b07d3e6edcf3260a2a189611ef33381ac724528d4433"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_render_pep440_post_render_pep440_post.return.rendered": {"doc_hash": "118807a5c2a97d74d8fe5f796e9018ac18aadcc9b3182b6181ecd25e8dcbd0d7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_render_pep440_old_render_pep440_old.return.rendered": {"doc_hash": "6c6b4eb402371d591eeb0abb50ada6698ebe88b10543f867cefca40fd086f508"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_render_git_describe_render_git_describe.return.rendered": {"doc_hash": "a3d9b39392875e0334af8c64b46fe88c38f8d51c8b168839e4ae228db30b0daf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_render_git_describe_long_render_git_describe_long.return.rendered": {"doc_hash": "cf56b90f4c7b7ac99479cdeafeebaf306a0ab42713bb536dffdfe85f0a8b1c00"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_render_VersioneerBadRootError._The_project_root_direc": {"doc_hash": "2b30b66ceb0b36870dcb6210c247df913c36eacc6b4774a674a3d9084ef0138f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_get_versions_get_versions.return._version_0_unknown_": {"doc_hash": "93f7404067172b3e768cee6e5436263a375ba8d50909b3d9109e889374995ce8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_get_version_get_cmdclass.from_distutils_core_impor": {"doc_hash": "b46b8ebb3921cc678793c9ac073ce2ccfa012b4f230b5a81c900c42f0a97cf9a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_get_cmdclass.cmd_version_get_cmdclass.cmd_version.run.if_vers_error_.print_error_s_vers": {"doc_hash": "690b93737766ad4fb1256933abef4a8b7507e8aa3ff75a426d910969a64d4b3f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_get_cmdclass.cmds_version_cmd_ver_get_cmdclass.if_setuptools_in_sys_mo.else_.from_distutils_command_bu": {"doc_hash": "d2515232f0cacf396d1c894d38fa2cf264319056056cc5e8fd23bfc1b8d8028d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_get_cmdclass.cmd_build_py_get_cmdclass.cmd_build_py.run.if_cfg_versionfile_build_.write_to_version_file_tar": {"doc_hash": "a3e61fb1c123a544a0866cae1590acbd50ccfa6687b2bde1c15463c1eb266a0c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_get_cmdclass.cmds_build_py_cmd_bu_get_cmdclass.None_3.else_.from_distutils_command_sd": {"doc_hash": "b1065450584f405e4c58ab87b443417917886b3a0bb0edbf1ef2bdf1b52de9f5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_get_cmdclass.cmd_sdist_get_cmdclass.return.cmds": {"doc_hash": "8282207f276c680678f0dbfda07ca1696ba9602de8b75e07d62c92859164fa4a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_CONFIG_ERROR_INIT_PY_SNIPPET._": {"doc_hash": "aadd871f92dd45b60bf6a6dfe7fc15fa89a744ae887e426fcf312b1aa2557082"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_do_setup_do_setup.return.0": {"doc_hash": "0a326019dfc6edd9496f7fd3ba1b5d5ce90eb86e158880be5cc484b65f2e547b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_scan_setup_py_": {"doc_hash": "03a81199bd59df0696753ac46016a408d4cad2f45a8a321da47cfe1b9f0f87c3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk_types.py_np_register_chunk_type._HANDLED_CHUNK_TYPES_appe": {"doc_hash": "0da3bb245dcab1557a0e95dca933a33f56a2aeefdc191e66cc58ebef6ab8cb6a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py__should_delegate_check_if_handled_given_other.return.wrapper": {"doc_hash": "2497344ac9555bcf6332a58690a415c6cdee2e9ba6e7dc3a7c404f69cec21929"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_normalize_arg_normalize_arg.if_is_dask_collection_x_.else_.return.x": {"doc_hash": "43155acde636f11f7af9b1388b85e83bf8a9cfe1db9dd69c02ba08a483495c63"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py__pass_extra_kwargs_map_blocks": {"doc_hash": "871f8cd8e87139fdb563a0e94c178aed237d7b2f034986a28959dc16c72b6019"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_map_blocks._Map_a_function_across__map_blocks._Map_a_function_across_": {"doc_hash": "2182b897202a197a344c65c967c6e2bf146e8aae10af94b8d421a513db96dbde"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_map_blocks.if_not_callable_func__map_blocks._prepare_to_inject_it_": {"doc_hash": "dc644cbfdf1ac85578135991cf4d4634c3f8ecec1ffef30330a8de07cd5ac10c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_map_blocks.if_has_keyword_func_blo_map_blocks._objects_and_prepare_to_": {"doc_hash": "4e37820965032efb5b60d93f722e60408eb06eadec75e44ea9c49d3955c62a13"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_map_blocks.None_11_map_blocks.None_11.extra_names_append_block": {"doc_hash": "bda300357eb1ce95e39f1205f4b4aa994647761c1d1bea8be4297cb7f60340a0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.cumsum_Array.cumsum.return.cumsum_self_axis_dtype_": {"doc_hash": "4935593e08ee2e45515920af95a94061edff285e41314a035e2d5386eb08260a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.cumprod_Array.cumprod.return.cumprod_self_axis_dtype": {"doc_hash": "bc9ecd5d76374ded4d1f51b79ea4f6c001a09e74e4b52c2e93442413b544f86a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.squeeze_Array.clip.return.clip_self_min_max_": {"doc_hash": "68a5e7b705b513a3802648325fa85353918850762a3fb15ac8675cfdc366720e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_from_npy_stack_from_npy_stack.return.Array_dsk_name_chunks_": {"doc_hash": "a041e8d6c24ceac26b32abc3f5da85357013d399889b7c0e60fe785d6863a20d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_new_da_object_": {"doc_hash": "6c34eebb36562d35b5a6f28dd7d61dc37a8af2ca869b95e4d3212e58347708e0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_empty_like_empty_like.return.empty_": {"doc_hash": "ef7b9a57a53e7bb64ace4ce4a237f3b3fd7d8a04bc90311accd4dd342f9bc91d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_ones_like_ones_like.return.ones_": {"doc_hash": "2eaf8f35bb5cc34430fba44166f8c3f5bd92748bbd4b8f47e51bf04fed563526"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_zeros_like_zeros_like.return.zeros_": {"doc_hash": "124f18e83e1dc1fba32efca6c78cd63adbdb31d7c1bfec09d7d51db760386b44"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/optimization.py_optimize_optimize.return.optimize_slices_dsk_": {"doc_hash": "57567f62b4e954925fcaac519cd4ab636c700cee9ce25c44bdfdbc2f33719ce3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_warnings_concrete": {"doc_hash": "f34b71e6b103b6dc547b168b681158bc404f69d3606186d4061d9176268416ed"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_fractional_slice_fractional_slice.if_all_ind_slice_None_.else_.return._getitem_rounded_index_": {"doc_hash": "eb84a7cf00879d33fadbdfa9c2f499e6722c56ee3149878df3be4aba56c0d19c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_add_dummy_padding_map_overlap": {"doc_hash": "612b8abe941f06e60865c50502124065bde45ce8def336185d9c061fcfca7159"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_map_overlap._Map_a_function_over_bl_map_overlap._Map_a_function_over_bl": {"doc_hash": "53eac38ab3ee4fef8dcfd308e85151407c60cca73e385017b687739cd47bd8f9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py_format_chunks__get_chunks.return.tuple_chunks_": {"doc_hash": "372e26eb008e854b5a53648e44fd5f16b4776ba97d5dffb45f3445d2d298e1bc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py__balance_chunksizes_": {"doc_hash": "a5e7b5345078bb6245094c2e2f50e985461945a5086c42a09a6a96a60ecd80ab"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py__prefixscan_combine__prefixscan_first.return.func_x_axis_axis_dtype_": {"doc_hash": "5642efaf6ac9cc027cb75412af57fcdb1736928864aa55fc0a7b4a1774cd3996"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_prefixscan_blelloch_prefixscan_blelloch.level.0": {"doc_hash": "5fb8a5452f9729babaded752e1d2c08b1c4894175fd7ccc4422ba4644106aca3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_prefixscan_blelloch.if_n_vals_2__prefixscan_blelloch.return.handle_out_out_result_": {"doc_hash": "c924e405627651bf6ae738d58ec5d6ede7eadd3c8407422c0e4887d1657fd0e7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_cumreduction_cumreduction.for_ind_in_indices_.dsk_name_ind_m_n": {"doc_hash": "1b1446726f1f180eb3d0de656f28f05c756a4348fde1a30d103d1c3ba4cbbe00"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_cumreduction.for_i_in_range_1_n__cumreduction.return.handle_out_out_result_": {"doc_hash": "c045ff583cd01e3f37a9bf0b38ff6db92e9b79ca4266a7646d08111e2103cdb1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py__cumsum_merge__cumprod_merge.return.a_b": {"doc_hash": "52a74e4ab75aa7dd92a2e8aefeb798b653f24006d7371565323c798d3cdb65f5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_cumsum_cumsum.return.cumreduction_": {"doc_hash": "351f11747e255c52ac880ebadd4b81705ca02c225b0484c05e15394601b4c691"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_cumprod_cumprod.return.cumreduction_": {"doc_hash": "17b771f61f09e3189e3f46d0f19cbe281f089db64cf500147cb1a1b075e584ca"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_take_take.warned.split_is_not_None": {"doc_hash": "09310ed5b8b9a2033a8cdaa64b4c983c280e98c32b86401ed8ff24ad47d3f578"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_take.for___index_list_in_plan_take.return.tuple_chunks2_dsk": {"doc_hash": "3a6c5c866fedf64e3feeb1f21ae7eff7a0f2c6f8388508864ca2a271ed10face"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_map_blocks_dtype_inference_test_map_blocks_dtype_inference.assert_dtype_in_msg": {"doc_hash": "8cedd074acefd95efa9237bbb51fa928362f28f03f765a069891afdb2dfe6cb7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_utils.py_test_meta_from_array_literal_": {"doc_hash": "11569dfbbaee39ff9bd1aa30e611f1902f4064f3ca709ab1fb67162de6ae1d30"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_np_functions": {"doc_hash": "f2ab9bdeb08b49138f6c7f6704793d58cde166b35e58f7d5c2c2d32818ea2a63"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_test_is_valid_chunk_type_test_direct_deferral_wrapping_override.assert_eq_res_2_np_ara": {"doc_hash": "9dcfb129566610e8bee872b37f6734d6b1a8191825b4c2ed932189d72107032e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_UnknownScalarThatUnderstandsArrayOps_UnknownScalarThatUnderstandsArrayOps.__array_ufunc__.return.UnknownScalarThatUndersta": {"doc_hash": "59ce974e17c9bd879227f783fed799a50e07950b9e3d20f01ed799aaf8ca1d38"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_test_delegation_unknown_scalar_that_understands_arr_ops_test_delegation_unknown_scalar_that_understands_arr_ops.assert_type_np_multiply_a": {"doc_hash": "3b51896941b3ff6c34e771c4f8e5b26aa6b966a7d15dc0b5d225f288d5f4747a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_UnknownScalar_": {"doc_hash": "9ed5704e001f0ea35d97176552bdfe08f0e3aee778f39ef406dd66c5c9679d84"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_svd_dtype_preservation_test_svd_compressed_deterministic.assert_all_da_compute_u_": {"doc_hash": "493e1313792ed29d8791e057d63475b8afbfee8e66a419f4bc9f0d6b6529ef50"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_lstsq_test_lstsq.None_6": {"doc_hash": "5c03b4034ffbb0e250f88dc5a49f314d70129f72cda2b701f7168e8837368965"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_svd_flip_correction_test_svd_flip_correction.None_1": {"doc_hash": "67710b70cdda87f5ca574a1f965b7522990900fea789d9edb335de5e1ce43d27"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_svd_flip_sign_test_svd_flip_sign.assert_eq_v_y_T_": {"doc_hash": "16baae31b26f9fa34f61cd66d19445fc1c253a3d9de48a3a4662b9e0b5d073cd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_disable_lowlevel_fusion_test_disable_lowlevel_fusion.with_dask_config_set_op.assert_eq_y_1_3_": {"doc_hash": "a94dcd1efa8ee92fd4a6b80abbdd373220d3cc38f273062f59c69cca50199e8d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_map_overlap_assumes_shape_matches_first_array_if_trim_is_false_test_map_overlap_assumes_shape_matches_first_array_if_trim_is_false.assert_z2_shape_10_": {"doc_hash": "232566f86510c32bfd4e8e57f2d4002e4c5fc2307087a4ae818a229ffb76dc20"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_map_overlap_deprecated_signature_test_map_overlap_deprecated_signature.None_2.assert_y_shape_3_": {"doc_hash": "b565d2c41a37348fa4583fe305861171978bce5f07e880898860fea71e95aac1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_plan_rechunk_heterogeneous_test_plan_rechunk_heterogeneous.None_4": {"doc_hash": "8591fd2eac255483334076f50b1bb0cd31943501bed59bad8768f27973e8d11a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_zero_test_rechunk_bad_keys.assert_100_in_str_info": {"doc_hash": "6324a4711e1907a87fdaeff11fcb46b241133df043077753555a3a3b9fd6b9e0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_balance_basics_test_balance_chunks_unchanged.assert_balanced_chunks_0_": {"doc_hash": "d71bb9e395d91e66180b38c02b1184c1bce35baaaee7f3a8c7385a6937d8c926"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_balance_small_test_balance_small.None_3": {"doc_hash": "42b5e2038625331085bbe7c92d744499fb99e22c98b1aae49f894d062f0d171e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_balance_n_chunks_size_test_balance_raises.x_rechunk_chunks_arr_len_": {"doc_hash": "bcc5eca8c9e72e772ce4a330a5d63c3ab258586175bd3c205d9a3113ff3db640"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_balance_basics_2d_test_balance_different_inputs.assert_balanced_chunks_1_": {"doc_hash": "b2e8828ff34e6631cd87b1c277730719a1ccef55337901eae0914a3870796e10"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_balance_split_into_n_chunks_": {"doc_hash": "fc290e6d08ce26c07351d235c20144d488f251d798aba2e36af4ccb090618386"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_median_test_median.assert_eq_": {"doc_hash": "cc105ed6f5db2cca5b2d80c8dfc77c661f3c7bc6e6a2ae21115249502798ad91"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_object_reduction_": {"doc_hash": "a7be902231e466526b136518001e62c5fc2b4e15abbd407a17b268982270dfe7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reshape.py_test_reshape_unknown_sizes_test_reshape_unknown_sizes.None_1.A_reshape_60_1_1_": {"doc_hash": "2cc45ef107777dd432f297009f61f4d1db2025443c3b4ae7af12c222398d3298"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reshape.py_test_reshape_all_chunked_no_merge_test_reshape_all_chunked_no_merge.assert_eq_result_base_re": {"doc_hash": "fdba50699c4c2de7f4bf528483baa763bf27325b75cd73759b5119f334d28780"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reshape.py_test_reshape_all_not_chunked_merge_": {"doc_hash": "d31fbb6ea72155ccb33c84abeaab0f68d42cd0a0117729de3e8a2aa09263e64c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_getitem_avoids_large_chunks_test_getitem_avoids_large_chunks.with_dask_config_set_ar.None_2.assert_result_chunks_": {"doc_hash": "adf02bfaf7608e13d9f849fc63eaf1f8ccd49b6b26a560d5c83d4d4b93e53127"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_getitem_avoids_large_chunks_missing_test_getitem_avoids_large_chunks_missing.with_dask_config_set_ar.assert_eq_result_expecte": {"doc_hash": "0f798bc0fbe41b0927fc585345c8c4785957d947d0e19389a55f742a4f2a459e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_take_avoids_large_chunks_test_take_avoids_large_chunks.with_dask_config_set_ar.None_11": {"doc_hash": "40e36d560d51c1176b212c7dc0589bbab0936c4f6cb09a7882c780a8a3a7a8ba"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_take_uses_config_test_take_uses_config.with_dask_config_set_ar.assert_len_dsk_4": {"doc_hash": "69fd456a10d72b38a7bb4ddfea39a4e06d0ac59da6809e4f1a37b68a67e68127"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_svg.py_test_draw_sizes_test_draw_sizes.assert_b_c_5": {"doc_hash": "65295bbc5fb2f8bd2a0988d3885de43ced97f71a04ef9a11bfb856717bc223f4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_svg.py_test_too_many_lines_fills_sides_darker_": {"doc_hash": "a4ee4f854a87e61871ef292f566d16c4ee524189d0cd9c9e5a5ad00208117798"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_gc_test_bag_groupby_normal_hash.assert_even_0_2_4_": {"doc_hash": "9d7714b0013d60a220d007e73067202215e6107c28bef96bb231f6fa17e9a246"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_itertools_PackedFunctionCall.__call__.if_isinstance_args_tupl.else_.return.self_func_args_": {"doc_hash": "473f9e0fe6bf6acac776a5f1b2023683f2c6015598b575e5a0f8d03337c3a53e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_subs_blockwise_token.return.prefix_d_i": {"doc_hash": "8bb9584d929e7f5c939c14d2430069e9bb5b475997e59c7b6f7c607d2883a236"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_Blockwise_Blockwise._Tensor_Operation": {"doc_hash": "eefbb3a1838a2e64150603be710c7cf2adf5e2c9787a689174a650177a3cfc46"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_Blockwise.__init___Blockwise.__repr__.return._Blockwise_for": {"doc_hash": "ca593c919d12673bfa39786f22e131f19502a967927546930a3e3d8541b1de3b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_Blockwise._dict_Blockwise._dict.return.self__cached_dict_dsk_": {"doc_hash": "b569822042050e060c82d9f6004096798164733c0a41418108a83c376d4839e3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_Blockwise.get_output_keys_Blockwise.get_output_keys.return._self_output_p_for_p_i": {"doc_hash": "b2de19fe9b43a5681b8b8d37b384e44bb9f6a38c15fb7e0ff16bd68218432bc0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_find_all_possible_keys_find_all_possible_keys.return.ret": {"doc_hash": "1338ed02e37aabcf8765dbd0571eeaf948824ea96be208a8a38959d3b751c2d9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.index__Frame.index_24.self._meta.result__meta": {"doc_hash": "34d5b30c8a8b8b97ae26c18f607cd19405297763813dcd8876e11b6c3f56d943"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.nlargest_Series.isin.return.super_isin_values_": {"doc_hash": "a15d320ae2d1bb2afb33edc7ec114605f0bf5891cb18b186385f9fb38aaa65fe"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__build_agg_args_list__build_agg_args_list.return.dict_": {"doc_hash": "0a2d635ccc4ca59d9b19028725441fcef6352c1d7db46780c2f267b268cd0864"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py_SeriesGroupBy_SeriesGroupBy.__init__.super___init___df_by_b": {"doc_hash": "f1f71869d8327d451a98a24832bc1b52f211bab79280f9a655724ca5f7bc161d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/hyperloglog.py__Implementation_of_Hype_compute_first_bit.return.33_bits_sum_axis_1_": {"doc_hash": "c1173253d7e27c51fa855b37a69d39d3e62803ba46aa8a49f5a4a7a6f660a526"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_BlockwiseReadCSV_BlockwiseReadCSV.__repr__.return._BlockwiseReadCSV_name_": {"doc_hash": "f321a7a675d45f4c5ab8477b0912e595922d5fbecb34e9ef9bbfeb954b82bd8e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_from_math_import_ceil_lock.Lock_": {"doc_hash": "d6dacd73c9e69553f7006de18e475e9580cf17e38340cfe02b2370cfb9430ebc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py__meta_from_array__meta_from_array.return.meta__constructor_data_c": {"doc_hash": "18f648e118461a5a60746131b0421ae1ec2b5cbc1b02a5d00b878df1266585e6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_from_pandas_from_pandas.nrows.len_data_": {"doc_hash": "a711b85a171b31575648441fc2751174eb49976de0eb219e69453bafdd6d8315"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_from_pandas.if_chunksize_is_None__from_pandas.return.new_dd_object_dsk_name_": {"doc_hash": "dd9e44631342f1a0f575bce54d4a5a150d584c41cb7aa9cd558661bd051a4f3d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowEngine._pandas_to_arrow_table_ArrowEngine.write_partition.if_return_metadata_.else_.return._": {"doc_hash": "67455fed80752332de83fc41e81a248e05d304adeefbf602e2bdba85eb46408c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_ParquetSubgraph.__getitem___ParquetSubgraph.__getitem__.return._": {"doc_hash": "b316b7b69d8c7c0d44caf7f4be03c40851b195d900cc42a8388c8927bc6f43a3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_ParquetSubgraph.__len___ParquetSubgraph.map_tasks.return.self": {"doc_hash": "5a56ab0ab1205d889bfe01f6243f8e2afbea45a41bc08eb9c9c57df6c198883e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_BlockwiseParquet_BlockwiseParquet.__repr__.return._BlockwiseParquet_name_": {"doc_hash": "98bcb5664274c8361d8866237fefb38aa35a70f81e2c205e81aa5a64c809c367"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/sql.py_read_sql_table.sa_read_sql_table.if_head_rows_0_.else_.if_divisions_is_None_and_.raise_ValueError_": {"doc_hash": "6f3d3898aee3be24d3157c719db4d22fb140f0d0ea634ae7f30d00566e01cd59"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/sql.py_to_sql.if_not_isinstance_uri_st_": {"doc_hash": "1df8f8164a7e6f3acb4e5ccf3ab5066f37e4a88297eca262685f6188fd860348"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_optimize_blockwise_parquet_test_optimize_blockwise_parquet.None_2": {"doc_hash": "e30e8d3d37f10cd48ead4393bd8a04d495307e995e410eb03bec5a14213b3ec6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_divisions_with_null_partition_test_divisions_with_null_partition.assert_ddf_read_divisions": {"doc_hash": "99bb17dd5569a102ce88c80357e3971a73e5868efa24a6035198830aff95b8ae"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_parquet_pyarrow_write_empty_metadata_test_parquet_pyarrow_write_empty_metadata.try_.except_AttributeError_.pytest_fail_Unexpected_A": {"doc_hash": "c60382103bd63b2b23456be54bdf195a62a6c6e59ea035b3ad59bd43ed335a12"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_parquet_pyarrow_write_empty_metadata_append_": {"doc_hash": "feaa01b4957d3ac11c0cef848e4ae4b38c7e12a1dbf0c12c51665248010d8484"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_passing_engine_as_uri_raises_helpful_error_test_passing_engine_as_uri_raises_helpful_error.with_tmpfile_as_f_.with_pytest_raises_ValueE.ddf_to_sql_test_engine": {"doc_hash": "eadbb84bdd1fefed74deed21f49cb924f9bda130abd8b2d236e002043a4eef3d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_meta_test_meta_no_head_rows.None_1": {"doc_hash": "f584e1542ba8914decb77c8f9036d4e7fbc9e1b2ea8e9920082f714aaa1152de"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_no_meta_no_head_rows_test_datetimes.with_tmpfile_as_f_.assert_eq_data_map_partit": {"doc_hash": "4c40467f66c2e6d56be833bcc8769699fe48a9413805a95d5618b206163adbfb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_key_stringify_key_stringify.return.task": {"doc_hash": "b0c39fea59be274cdfc93366a0703ca9083f9c38edef2c893db534de89afb5f9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_SimpleShuffleLayer_SimpleShuffleLayer.__dask_distributed_pack__.return._": {"doc_hash": "3be129c2cf217a5e67b14217b2ac1ef2e6fc22e3fdc37c243181cfd2fd30e8ac"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_SimpleShuffleLayer.__dask_distributed_unpack___SimpleShuffleLayer.__dask_distributed_unpack__.dependencies_update_": {"doc_hash": "f3b3874ea43436305aba5619ca4b3d3becfb144da429dacc6d62faa8308ba152"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_SimpleShuffleLayer._keys_to_parts_SimpleShuffleLayer._cull_dependencies.return.deps": {"doc_hash": "eda35795edeed2e57062c026277d56d170a970439f67468bd8ca4becfbfc7094"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_SimpleShuffleLayer._cull_SimpleShuffleLayer.cull.if_parts_out_self_part.else_.return.self_culled_deps": {"doc_hash": "ac7cc426eaa770b0b28d93e96cb2dcbe0bd45605ae9fb786f9bce774e2fbfd0b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_SimpleShuffleLayer._construct_graph_SimpleShuffleLayer._construct_graph.return.dsk": {"doc_hash": "b04b86a4c0ad288bfca508afeb1351d446f2e39d56b84b8ad5e36e452ea7bb27"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_ShuffleLayer_ShuffleLayer.__dask_distributed_pack__.return.ret": {"doc_hash": "8d3305b39b5ddcba860ce91a21fb29db581824d6da88f0e14ac691aafae4a15d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_ShuffleLayer._cull_dependencies_ShuffleLayer._cull.return.ShuffleLayer_": {"doc_hash": "0ad0524c1da5d9bf63637372000e5fca804a2dd62520c874ee6d7c184c47c281"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_ShuffleLayer._construct_graph_ShuffleLayer._construct_graph.return.dsk": {"doc_hash": "e9d543929731b8efedfb219016d7e64e8de2d5419eb55c16a11bc0551b3718ff"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_rearrange_by_column_disk__noop.return.x": {"doc_hash": "6ff3511ada9de3b3032dc740e19c3e94207390e2cd9fbbb27039562325807949"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_rearrange_by_column_tasks_rearrange_by_column_tasks.max_branch.max_branch_or_32": {"doc_hash": "c6644431e368ac6a145dfd3f811253acf2609438c392450a6ccb498b1c23ab41"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_rearrange_by_column_tasks.if_npartitions_or_df_npa_rearrange_by_column_tasks.return.df2": {"doc_hash": "b022dc5ed6ab04218e212b298087a346f3432dbea0e8faab70094fb16dac19c6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_fuse_roots_test_fuse_roots.hlg_validate_": {"doc_hash": "eff715eccde0079c8407c11c5c287257d18b34581fc1d51ec39a6e947caaac4a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_attrs_dataframe_test_attrs_series.assert_s_fillna_1_attrs_": {"doc_hash": "129d8ffed9dc01d360cf1594839bd9ba5af24c82f9989419f405de07414b9779"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_attrs_series_in_dataframes_": {"doc_hash": "2bcb83cb90b4d22ab3b6bbfe8c84904e82d91396a4b6a9d97887718488bf9d2b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_groupby_concat_cudf_test_groupby_concat_cudf.assert_eq_res_dd_compute_": {"doc_hash": "c2d059a12548a2d0428b71f8c21d907fdf6be6bf1dd9b0684693d462acef0e63"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_categorical_join_test_categorical_join.if_dd__compat_PANDAS_GT_1.else_.assert_expected_values_": {"doc_hash": "b5cd84249a1af33503a36df354c8f5908674c3746f73b3104947a6d4e397b79e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_categorical_merge_with_columns_missing_from_left_test_categorical_merge_with_columns_missing_from_left.assert_assert_eq_expected": {"doc_hash": "4bae9d35b02a06fb413528baaa50becdc13cc5145f461a7f2f1fe54269d22b26"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_categorical_merge_with_merge_column_cat_in_one_and_not_other_upcasts_test_categorical_merge_with_merge_column_cat_in_one_and_not_other_upcasts.assert_assert_eq_expected": {"doc_hash": "b38005f64771c1cb6c6c5b1c4e7acf98087db11d3a36e7898c7fe9c9cdacfd88"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_categorical_merge_retains_category_dtype_": {"doc_hash": "932aa446691462331c0ff9343e965e6dc928a2b671699d54b05ca31723fd0ba3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_dataframe_shuffle_on_tasks_api_test_set_index_overlap.assert_eq_a_b_": {"doc_hash": "0ebfdf0990a552302beec3ab19458e5bcec5d0008218a9e439a993dd2c8b7c85"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_shuffle_hlg_layer_test_shuffle_hlg_layer.assert_dsk_dict_culled_": {"doc_hash": "28411a0d8f7ef6f28b9fad6890d5f727988f1b154570b825dbf23401af64b143"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_shuffle_hlg_layer_serialize_": {"doc_hash": "af430332886db9175d597b814d69a79bb4ad405de845bb60da974eafdde7a7d0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_abc_compute_layer_dependencies.return.ret": {"doc_hash": "c1960c511bc5eda8186d03caf63f939cde67d9797866eb6a057588f70d2d6581"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_Layer_Layer.get_output_keys.return.self_keys_": {"doc_hash": "59c88cd557274554dfb4ad2016d0e69fb09b10f48aadb0e8d76192b6492d34d4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_Layer.cull_Layer.cull.return.BasicLayer_out_ret_deps": {"doc_hash": "09cd382d74a6b12229e2a80c36dcef23bf4ec1375b464da473a7b7280f07afba"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_Layer.get_dependencies_Layer.map_tasks.return.BasicLayer_k_func_v_fo": {"doc_hash": "e895766cc638a893010e884c0d7ce642d63052edb949c7dffdd35033420220ff"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_Layer.__dask_distributed_pack___Layer.__dask_distributed_pack__.return.None": {"doc_hash": "b84255ee06aa3b13639f2a5911b6482f9412349c91f937b07dd4ff54499d80cd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_Layer.__dask_distributed_unpack___Layer.__dask_distributed_unpack__.raise_NotImplementedError": {"doc_hash": "57bf81537f473028f30608a9e0a0a766d8aef4396ac47911372b821262826da4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_Layer.__reduce___Layer.__copy__.return.obj": {"doc_hash": "edd8669fd34832da0456a206334385b81cd0731a9ce97a22d3021cd974f7a61f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_BasicLayer_BasicLayer.get_dependencies.return.self_dependencies_key_": {"doc_hash": "f0f5f69f58f530a4af674ab9955c1e4466073f631af57d25b6e3d7bfb836d793"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph_HighLevelGraph._Task_graph_composed_of": {"doc_hash": "edafdebb014e73a0ac53c42253f1af5a1abe57e0c6b2b06cf3e249fa162ae060"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.__init___HighLevelGraph.__init__.self.layers_5._": {"doc_hash": "df80a1a2a9f5bc87ab6921f7146f1cad50b26762eec94872ae2886cda9ae2868"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.__getitem___HighLevelGraph.keyset.return.self__keys": {"doc_hash": "6da3099479817c33960c51919c3f649eb6b20a8dfc5d12c694607198f23ae8d6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.get_all_external_keys_HighLevelGraph.get_all_external_keys.return.self__all_external_keys": {"doc_hash": "23c04c4c142b135b0a1a4aa69b3e16402aaa735eac3fca0278f2a88fdfa05780"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.get_all_dependencies_HighLevelGraph.get_all_dependencies.return.self_key_dependencies": {"doc_hash": "ed68d406c0804c774e6254aca18080a8962f19e2c6a6e44ba239be596b6835f3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.dependents_HighLevelGraph.visualize.return.graphviz_to_file_g_filen": {"doc_hash": "9362e4933fcf93de1280ed204da453fec784b0169963d2912f98761637f39b2b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph._toposort_layers_HighLevelGraph._toposort_layers.return.ret": {"doc_hash": "a587f2d92192137016891372de2ce8f0fd2d037107d262d1721363314d58b4ce"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.cull_HighLevelGraph.cull.return.HighLevelGraph_ret_layers": {"doc_hash": "020e7394edac1d8e444e05487207adc4c19b5a00b5437c0fb297c95c17d8c630"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.map_basic_layers_HighLevelGraph.map_basic_layers.return.HighLevelGraph_layers_se": {"doc_hash": "6ec9f73af25d79ab75d54c12661ad576e6908ffa729d0eb171f74e0f11c5ac37"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.map_tasks_HighLevelGraph.map_tasks.return.HighLevelGraph_": {"doc_hash": "a9df28448d4549f5cd59ab3a564290a3f8df616624323e9c81707acf80bb7583"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_from_distutils_version_im_f3.pass": {"doc_hash": "e64af7bae7c97c92adab2be2de56995c1ffbc7d5b169366cd4c29d2af4c21955"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_highgraph.py_from_functools_import_par_test_basic.assert_all_isinstance_lay": {"doc_hash": "79706eef100759e73b3fc3345da55b6816278b071d62733554364333d984a482"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_highgraph.py_test_keys_values_items_methods_test_keys_values_items_methods.assert_items_k_v_f": {"doc_hash": "fd4d5b0f01bf544ee16e233c2e5e5aba13147fb7870a5227dee820195c90b431"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_highgraph.py_test_cull_test_cull.assert_dict_culled_by_y_": {"doc_hash": "aecfdac9808d5d37d485d110915a682897803a058c30a06f9fb3a1cb6d08c73b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_highgraph.py_test_map_basic_layers_test_map_basic_layers.assert_eq_y_42_3_": {"doc_hash": "855c3ce0721dd1946dcc48333b71341aef77192dc4c0e6789c64caf34536605a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_highgraph.py_test_map_tasks_": {"doc_hash": "6858a2a221571a43fea746ec8441691a2d80ff81447d75641e3b726eef9f1640"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_SubgraphCallable_with_numpy_test_SubgraphCallable_with_numpy.assert_f1_f4": {"doc_hash": "7f9eb9e1aa534bb0ad2a32cf553c6f981d8d11659ab93f8d11f1638d570e2443"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/docs/source/scripts/scheduling.py_dense_dense.return.d_x_height_1_i_": {"doc_hash": "9f34075ab05f58646c81e74b1d1da3d8d6a7648c1037d17460e2d09b45d4880f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/docs/source/scripts/scheduling.py_np_": {"doc_hash": "44e9e129ba4e27047139d813465f415ed2f46cc056c2f9a21669c1e704fbba91"}}, "docstore/data": {"/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/conftest.py_pytest_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/conftest.py_pytest_", "embedding": null, "metadata": {"file_path": "conftest.py", "file_name": "conftest.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 26, "span_ids": ["imports", "pytest_addoption", "pytest_runtest_setup"], "tokens": 207}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\n\n# The doctests in these files fail due to either:\n# - Non-required dependencies not being installed\n# - Imported doctests due to pulling the docstrings from other packages\n# (e.g. `numpy`). No need to run these doctests.\ncollect_ignore = [\n \"dask/bytes/hdfs3.py\",\n \"dask/bytes/pyarrow.py\",\n \"dask/bytes/s3.py\",\n \"dask/array/ghost.py\",\n \"dask/array/fft.py\",\n \"dask/dataframe/io/io.py\",\n \"dask/dataframe/io/parquet/arrow.py\",\n \"dask/dot.py\",\n]\n\n\ndef pytest_addoption(parser):\n parser.addoption(\"--runslow\", action=\"store_true\", help=\"run slow tests\")\n\n\ndef pytest_runtest_setup(item):\n if \"slow\" in item.keywords and not item.config.getoption(\"--runslow\"):\n pytest.skip(\"need --runslow option to run\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/__init__.py__": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/__init__.py__", "embedding": null, "metadata": {"file_path": "dask/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 20, "span_ids": ["imports"], "tokens": 102}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from . import config, datasets\nfrom .core import istask\nfrom .local import get_sync as get\n\ntry:\n from .delayed import delayed\nexcept ImportError:\n pass\ntry:\n from .base import visualize, compute, persist, optimize, is_dask_collection\nexcept ImportError:\n pass\n\nfrom ._version import get_versions\n\nversions = get_versions()\n__version__ = versions[\"version\"]\n__git_revision__ = versions[\"full-revisionid\"]\ndel get_versions, versions", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py__This_file_helps_to_comp_register_vcs_handler.return.decorate": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py__This_file_helps_to_comp_register_vcs_handler.return.decorate", "embedding": null, "metadata": {"file_path": "dask/_version.py", "file_name": "_version.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 67, "span_ids": ["VersioneerConfig", "impl", "NotThisMethod", "get_keywords", "register_vcs_handler", "docstring", "get_config"], "tokens": 482}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# This file helps to compute a version number in source trees obtained from\n# git-archive tarball (such as those provided by github's download-from-tag\n# feature). Distribution tarballs (built by setup.py sdist) and build\n# directories (produced by setup.py build) will contain a much shorter file\n# that just contains the computed version number.\n\n# This file is released into the public domain. Generated by\n# versioneer-0.16 (https://github.com/warner/python-versioneer)\n\n\"\"\"Git implementation of _version.py.\"\"\"\n\nimport errno\nimport os\nimport re\nimport subprocess\nimport sys\n\n\ndef get_keywords():\n \"\"\"Get the keywords needed to look up the version information.\"\"\"\n # these strings will be replaced by git during git-archive.\n # setup.py/versioneer.py will grep for the variable names, so they must\n # each be defined on a line of their own. _version.py will just call\n # get_keywords().\n git_refnames = \"$Format:%d$\"\n git_full = \"$Format:%H$\"\n keywords = {\"refnames\": git_refnames, \"full\": git_full}\n return keywords\n\n\nclass VersioneerConfig:\n \"\"\"Container for Versioneer configuration parameters.\"\"\"\n\n\ndef get_config():\n \"\"\"Create, populate and return the VersioneerConfig() object.\"\"\"\n # these strings are filled in when 'setup.py versioneer' creates\n # _version.py\n cfg = VersioneerConfig()\n cfg.VCS = \"git\"\n cfg.style = \"pep440\"\n cfg.tag_prefix = \"\"\n cfg.parentdir_prefix = \"dask-\"\n cfg.versionfile_source = \"dask/_version.py\"\n cfg.verbose = False\n return cfg\n\n\nclass NotThisMethod(Exception):\n \"\"\"Exception raised if a method is not valid for the current scenario.\"\"\"\n\n\nLONG_VERSION_PY = {}\nHANDLERS = {}\n\n\ndef register_vcs_handler(vcs, method): # decorator\n \"\"\"Decorator to mark a method as the handler for a particular VCS.\"\"\"\n\n def decorate(f):\n \"\"\"Store f in HANDLERS[vcs][method].\"\"\"\n if vcs not in HANDLERS:\n HANDLERS[vcs] = {}\n HANDLERS[vcs][method] = f\n return f\n\n return decorate", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_run_command_run_command.return.stdout": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_run_command_run_command.return.stdout", "embedding": null, "metadata": {"file_path": "dask/_version.py", "file_name": "_version.py", "file_type": "text/x-python", "category": "implementation", "start_line": 70, "end_line": 102, "span_ids": ["run_command"], "tokens": 246}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):\n \"\"\"Call the given command(s).\"\"\"\n assert isinstance(commands, list)\n p = None\n for c in commands:\n try:\n dispcmd = str([c] + args)\n # remember shell=False, so use git.cmd on windows, not just git\n p = subprocess.Popen(\n [c] + args,\n cwd=cwd,\n stdout=subprocess.PIPE,\n stderr=(subprocess.PIPE if hide_stderr else None),\n )\n break\n except EnvironmentError:\n e = sys.exc_info()[1]\n if e.errno == errno.ENOENT:\n continue\n if verbose:\n print(\"unable to run %s\" % dispcmd)\n print(e)\n return None\n else:\n if verbose:\n print(\"unable to find command, tried %s\" % (commands,))\n return None\n stdout = p.communicate()[0].strip().decode()\n if p.returncode != 0:\n if verbose:\n print(\"unable to run %s (error)\" % dispcmd)\n return None\n return stdout", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_versions_from_parentdir_versions_from_parentdir.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_versions_from_parentdir_versions_from_parentdir.return._", "embedding": null, "metadata": {"file_path": "dask/_version.py", "file_name": "_version.py", "file_type": "text/x-python", "category": "implementation", "start_line": 105, "end_line": 124, "span_ids": ["versions_from_parentdir"], "tokens": 166}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def versions_from_parentdir(parentdir_prefix, root, verbose):\n \"\"\"Try to determine the version from the parent directory name.\n\n Source tarballs conventionally unpack into a directory that includes\n both the project name and a version string.\n \"\"\"\n dirname = os.path.basename(root)\n if not dirname.startswith(parentdir_prefix):\n if verbose:\n print(\n \"guessing rootdir is '%s', but '%s' doesn't start with \"\n \"prefix '%s'\" % (root, dirname, parentdir_prefix)\n )\n raise NotThisMethod(\"rootdir doesn't start with parentdir_prefix\")\n return {\n \"version\": dirname[len(parentdir_prefix) :],\n \"full-revisionid\": None,\n \"dirty\": False,\n \"error\": None,\n }", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_git_get_keywords_git_get_keywords.return.keywords": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_git_get_keywords_git_get_keywords.return.keywords", "embedding": null, "metadata": {"file_path": "dask/_version.py", "file_name": "_version.py", "file_type": "text/x-python", "category": "implementation", "start_line": 127, "end_line": 149, "span_ids": ["git_get_keywords"], "tokens": 214}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@register_vcs_handler(\"git\", \"get_keywords\")\ndef git_get_keywords(versionfile_abs):\n \"\"\"Extract version information from the given file.\"\"\"\n # the code embedded in _version.py can just fetch the value of these\n # keywords. When used from setup.py, we don't want to import _version.py,\n # so we do it with a regexp instead. This function is not used from\n # _version.py.\n keywords = {}\n try:\n f = open(versionfile_abs, \"r\")\n for line in f.readlines():\n if line.strip().startswith(\"git_refnames =\"):\n mo = re.search(r'=\\s*\"(.*)\"', line)\n if mo:\n keywords[\"refnames\"] = mo.group(1)\n if line.strip().startswith(\"git_full =\"):\n mo = re.search(r'=\\s*\"(.*)\"', line)\n if mo:\n keywords[\"full\"] = mo.group(1)\n f.close()\n except EnvironmentError:\n pass\n return keywords", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_git_versions_from_keywords_git_versions_from_keywords.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_git_versions_from_keywords_git_versions_from_keywords.return._", "embedding": null, "metadata": {"file_path": "dask/_version.py", "file_name": "_version.py", "file_type": "text/x-python", "category": "implementation", "start_line": 152, "end_line": 200, "span_ids": ["git_versions_from_keywords"], "tokens": 567}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@register_vcs_handler(\"git\", \"keywords\")\ndef git_versions_from_keywords(keywords, tag_prefix, verbose):\n \"\"\"Get version information from git keywords.\"\"\"\n if not keywords:\n raise NotThisMethod(\"no keywords at all, weird\")\n refnames = keywords[\"refnames\"].strip()\n if refnames.startswith(\"$Format\"):\n if verbose:\n print(\"keywords are unexpanded, not using\")\n raise NotThisMethod(\"unexpanded keywords, not a git-archive tarball\")\n refs = set([r.strip() for r in refnames.strip(\"()\").split(\",\")])\n # starting in git-1.8.3, tags are listed as \"tag: foo-1.0\" instead of\n # just \"foo-1.0\". If we see a \"tag: \" prefix, prefer those.\n TAG = \"tag: \"\n tags = set([r[len(TAG) :] for r in refs if r.startswith(TAG)])\n if not tags:\n # Either we're using git < 1.8.3, or there really are no tags. We use\n # a heuristic: assume all version tags have a digit. The old git %d\n # expansion behaves like git log --decorate=short and strips out the\n # refs/heads/ and refs/tags/ prefixes that would let us distinguish\n # between branches and tags. By ignoring refnames without digits, we\n # filter out many common branch names like \"release\" and\n # \"stabilization\", as well as \"HEAD\" and \"master\".\n tags = set([r for r in refs if re.search(r\"\\d\", r)])\n if verbose:\n print(\"discarding '%s', no digits\" % \",\".join(refs - tags))\n if verbose:\n print(\"likely tags: %s\" % \",\".join(sorted(tags)))\n for ref in sorted(tags):\n # sorting will prefer e.g. \"2.0\" over \"2.0rc1\"\n if ref.startswith(tag_prefix):\n r = ref[len(tag_prefix) :]\n if verbose:\n print(\"picking %s\" % r)\n return {\n \"version\": r,\n \"full-revisionid\": keywords[\"full\"].strip(),\n \"dirty\": False,\n \"error\": None,\n }\n # no suitable tags, so version is \"0+unknown\", but full hex is still there\n if verbose:\n print(\"no suitable tags, using unknown + full revision id\")\n return {\n \"version\": \"0+unknown\",\n \"full-revisionid\": keywords[\"full\"].strip(),\n \"dirty\": False,\n \"error\": \"no suitable tags\",\n }", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_git_pieces_from_vcs_git_pieces_from_vcs.return.pieces": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_git_pieces_from_vcs_git_pieces_from_vcs.return.pieces", "embedding": null, "metadata": {"file_path": "dask/_version.py", "file_name": "_version.py", "file_type": "text/x-python", "category": "implementation", "start_line": 203, "end_line": 293, "span_ids": ["git_pieces_from_vcs"], "tokens": 787}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@register_vcs_handler(\"git\", \"pieces_from_vcs\")\ndef git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):\n \"\"\"Get version from 'git describe' in the root of the source tree.\n\n This only gets called if the git-archive 'subst' keywords were *not*\n expanded, and _version.py hasn't already been rewritten with a short\n version string, meaning we're inside a checked out source tree.\n \"\"\"\n if not os.path.exists(os.path.join(root, \".git\")):\n if verbose:\n print(\"no .git in %s\" % root)\n raise NotThisMethod(\"no .git directory\")\n\n GITS = [\"git\"]\n if sys.platform == \"win32\":\n GITS = [\"git.cmd\", \"git.exe\"]\n # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]\n # if there isn't one, this yields HEX[-dirty] (no NUM)\n describe_out = run_command(\n GITS,\n [\n \"describe\",\n \"--tags\",\n \"--dirty\",\n \"--always\",\n \"--long\",\n \"--match\",\n \"%s*\" % tag_prefix,\n ],\n cwd=root,\n )\n # --long was added in git-1.5.5\n if describe_out is None:\n raise NotThisMethod(\"'git describe' failed\")\n describe_out = describe_out.strip()\n full_out = run_command(GITS, [\"rev-parse\", \"HEAD\"], cwd=root)\n if full_out is None:\n raise NotThisMethod(\"'git rev-parse' failed\")\n full_out = full_out.strip()\n\n pieces = {}\n pieces[\"long\"] = full_out\n pieces[\"short\"] = full_out[:7] # maybe improved later\n pieces[\"error\"] = None\n\n # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]\n # TAG might have hyphens.\n git_describe = describe_out\n\n # look for -dirty suffix\n dirty = git_describe.endswith(\"-dirty\")\n pieces[\"dirty\"] = dirty\n if dirty:\n git_describe = git_describe[: git_describe.rindex(\"-dirty\")]\n\n # now we have TAG-NUM-gHEX or HEX\n\n if \"-\" in git_describe:\n # TAG-NUM-gHEX\n mo = re.search(r\"^(.+)-(\\d+)-g([0-9a-f]+)$\", git_describe)\n if not mo:\n # unparseable. Maybe git-describe is misbehaving?\n pieces[\"error\"] = \"unable to parse git-describe output: '%s'\" % describe_out\n return pieces\n\n # tag\n full_tag = mo.group(1)\n if not full_tag.startswith(tag_prefix):\n if verbose:\n fmt = \"tag '%s' doesn't start with prefix '%s'\"\n print(fmt % (full_tag, tag_prefix))\n pieces[\"error\"] = \"tag '%s' doesn't start with prefix '%s'\" % (\n full_tag,\n tag_prefix,\n )\n return pieces\n pieces[\"closest-tag\"] = full_tag[len(tag_prefix) :]\n\n # distance: number of commits since tag\n pieces[\"distance\"] = int(mo.group(2))\n\n # commit: short hex revision ID\n pieces[\"short\"] = mo.group(3)\n\n else:\n # HEX: no tags\n pieces[\"closest-tag\"] = None\n count_out = run_command(GITS, [\"rev-list\", \"HEAD\", \"--count\"], cwd=root)\n pieces[\"distance\"] = int(count_out) # total number of commits\n\n return pieces", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_plus_or_dot_render_pep440.return.rendered": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_plus_or_dot_render_pep440.return.rendered", "embedding": null, "metadata": {"file_path": "dask/_version.py", "file_name": "_version.py", "file_type": "text/x-python", "category": "implementation", "start_line": 296, "end_line": 324, "span_ids": ["plus_or_dot", "render_pep440"], "tokens": 257}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def plus_or_dot(pieces):\n \"\"\"Return a + if we don't already have one, else return a .\"\"\"\n if \"+\" in pieces.get(\"closest-tag\", \"\"):\n return \".\"\n return \"+\"\n\n\ndef render_pep440(pieces):\n \"\"\"Build up version string, with post-release \"local version identifier\".\n\n Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you\n get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty\n\n Exceptions:\n 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n if pieces[\"distance\"] or pieces[\"dirty\"]:\n rendered += plus_or_dot(pieces)\n rendered += \"%d.g%s\" % (pieces[\"distance\"], pieces[\"short\"])\n if pieces[\"dirty\"]:\n rendered += \".dirty\"\n else:\n # exception #1\n rendered = \"0+untagged.%d.g%s\" % (pieces[\"distance\"], pieces[\"short\"])\n if pieces[\"dirty\"]:\n rendered += \".dirty\"\n return rendered", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_render_pep440_pre_render_pep440_post.return.rendered": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_render_pep440_pre_render_pep440_post.return.rendered", "embedding": null, "metadata": {"file_path": "dask/_version.py", "file_name": "_version.py", "file_type": "text/x-python", "category": "implementation", "start_line": 327, "end_line": 367, "span_ids": ["render_pep440_post", "render_pep440_pre"], "tokens": 321}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def render_pep440_pre(pieces):\n \"\"\"TAG[.post.devDISTANCE] -- No -dirty.\n\n Exceptions:\n 1: no tags. 0.post.devDISTANCE\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n if pieces[\"distance\"]:\n rendered += \".post.dev%d\" % pieces[\"distance\"]\n else:\n # exception #1\n rendered = \"0.post.dev%d\" % pieces[\"distance\"]\n return rendered\n\n\ndef render_pep440_post(pieces):\n \"\"\"TAG[.postDISTANCE[.dev0]+gHEX] .\n\n The \".dev0\" means dirty. Note that .dev0 sorts backwards\n (a dirty tree will appear \"older\" than the corresponding clean one),\n but you shouldn't be releasing software with -dirty anyways.\n\n Exceptions:\n 1: no tags. 0.postDISTANCE[.dev0]\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n if pieces[\"distance\"] or pieces[\"dirty\"]:\n rendered += \".post%d\" % pieces[\"distance\"]\n if pieces[\"dirty\"]:\n rendered += \".dev0\"\n rendered += plus_or_dot(pieces)\n rendered += \"g%s\" % pieces[\"short\"]\n else:\n # exception #1\n rendered = \"0.post%d\" % pieces[\"distance\"]\n if pieces[\"dirty\"]:\n rendered += \".dev0\"\n rendered += \"+g%s\" % pieces[\"short\"]\n return rendered", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_render_pep440_old_render_pep440_old.return.rendered": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_render_pep440_old_render_pep440_old.return.rendered", "embedding": null, "metadata": {"file_path": "dask/_version.py", "file_name": "_version.py", "file_type": "text/x-python", "category": "implementation", "start_line": 370, "end_line": 389, "span_ids": ["render_pep440_old"], "tokens": 143}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def render_pep440_old(pieces):\n \"\"\"TAG[.postDISTANCE[.dev0]] .\n\n The \".dev0\" means dirty.\n\n Exceptions:\n 1: no tags. 0.postDISTANCE[.dev0]\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n if pieces[\"distance\"] or pieces[\"dirty\"]:\n rendered += \".post%d\" % pieces[\"distance\"]\n if pieces[\"dirty\"]:\n rendered += \".dev0\"\n else:\n # exception #1\n rendered = \"0.post%d\" % pieces[\"distance\"]\n if pieces[\"dirty\"]:\n rendered += \".dev0\"\n return rendered", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_render_git_describe_render_git_describe.return.rendered": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_render_git_describe_render_git_describe.return.rendered", "embedding": null, "metadata": {"file_path": "dask/_version.py", "file_name": "_version.py", "file_type": "text/x-python", "category": "implementation", "start_line": 392, "end_line": 409, "span_ids": ["render_git_describe"], "tokens": 129}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def render_git_describe(pieces):\n \"\"\"TAG[-DISTANCE-gHEX][-dirty].\n\n Like 'git describe --tags --dirty --always'.\n\n Exceptions:\n 1: no tags. HEX[-dirty] (note: no 'g' prefix)\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n if pieces[\"distance\"]:\n rendered += \"-%d-g%s\" % (pieces[\"distance\"], pieces[\"short\"])\n else:\n # exception #1\n rendered = pieces[\"short\"]\n if pieces[\"dirty\"]:\n rendered += \"-dirty\"\n return rendered", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_render_git_describe_long_render_git_describe_long.return.rendered": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_render_git_describe_long_render_git_describe_long.return.rendered", "embedding": null, "metadata": {"file_path": "dask/_version.py", "file_name": "_version.py", "file_type": "text/x-python", "category": "implementation", "start_line": 412, "end_line": 429, "span_ids": ["render_git_describe_long"], "tokens": 133}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def render_git_describe_long(pieces):\n \"\"\"TAG-DISTANCE-gHEX[-dirty].\n\n Like 'git describe --tags --dirty --always -long'.\n The distance/hash is unconditional.\n\n Exceptions:\n 1: no tags. HEX[-dirty] (note: no 'g' prefix)\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n rendered += \"-%d-g%s\" % (pieces[\"distance\"], pieces[\"short\"])\n else:\n # exception #1\n rendered = pieces[\"short\"]\n if pieces[\"dirty\"]:\n rendered += \"-dirty\"\n return rendered", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_render_render.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_render_render.return._", "embedding": null, "metadata": {"file_path": "dask/_version.py", "file_name": "_version.py", "file_type": "text/x-python", "category": "implementation", "start_line": 432, "end_line": 465, "span_ids": ["render"], "tokens": 259}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def render(pieces, style):\n \"\"\"Render the given version pieces into the requested style.\"\"\"\n if pieces[\"error\"]:\n return {\n \"version\": \"unknown\",\n \"full-revisionid\": pieces.get(\"long\"),\n \"dirty\": None,\n \"error\": pieces[\"error\"],\n }\n\n if not style or style == \"default\":\n style = \"pep440\" # the default\n\n if style == \"pep440\":\n rendered = render_pep440(pieces)\n elif style == \"pep440-pre\":\n rendered = render_pep440_pre(pieces)\n elif style == \"pep440-post\":\n rendered = render_pep440_post(pieces)\n elif style == \"pep440-old\":\n rendered = render_pep440_old(pieces)\n elif style == \"git-describe\":\n rendered = render_git_describe(pieces)\n elif style == \"git-describe-long\":\n rendered = render_git_describe_long(pieces)\n else:\n raise ValueError(\"unknown style '%s'\" % style)\n\n return {\n \"version\": rendered,\n \"full-revisionid\": pieces[\"long\"],\n \"dirty\": pieces[\"dirty\"],\n \"error\": None,\n }", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_get_versions_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_get_versions_", "embedding": null, "metadata": {"file_path": "dask/_version.py", "file_name": "_version.py", "file_type": "text/x-python", "category": "implementation", "start_line": 468, "end_line": 516, "span_ids": ["get_versions"], "tokens": 363}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def get_versions():\n \"\"\"Get version information or return default if unable to do so.\"\"\"\n # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have\n # __file__, we can work backwards from there to the root. Some\n # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which\n # case we can only use expanded keywords.\n\n cfg = get_config()\n verbose = cfg.verbose\n\n try:\n return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose)\n except NotThisMethod:\n pass\n\n try:\n root = os.path.realpath(__file__)\n # versionfile_source is the relative path from the top of the source\n # tree (where the .git directory might live) to this file. Invert\n # this to find the root from __file__.\n for i in cfg.versionfile_source.split(\"/\"):\n root = os.path.dirname(root)\n except NameError:\n return {\n \"version\": \"0+unknown\",\n \"full-revisionid\": None,\n \"dirty\": None,\n \"error\": \"unable to find root of source tree\",\n }\n\n try:\n pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)\n return render(pieces, cfg.style)\n except NotThisMethod:\n pass\n\n try:\n if cfg.parentdir_prefix:\n return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)\n except NotThisMethod:\n pass\n\n return {\n \"version\": \"0+unknown\",\n \"full-revisionid\": None,\n \"dirty\": None,\n \"error\": \"unable to compute version\",\n }", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/__init__.py_try__": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/__init__.py_try__", "embedding": null, "metadata": {"file_path": "dask/array/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 252, "span_ids": ["impl"], "tokens": 1143}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "try:\n from .blockwise import blockwise, atop\n from .core import (\n Array,\n block,\n concatenate,\n stack,\n from_array,\n store,\n map_blocks,\n to_hdf5,\n to_npy_stack,\n from_npy_stack,\n from_delayed,\n asarray,\n asanyarray,\n PerformanceWarning,\n broadcast_arrays,\n broadcast_to,\n from_zarr,\n to_zarr,\n unify_chunks,\n )\n from .tiledb_io import from_tiledb, to_tiledb\n from .numpy_compat import rollaxis, moveaxis\n from .chunk_types import register_chunk_type\n from .routines import (\n take,\n choose,\n argwhere,\n where,\n coarsen,\n insert,\n shape,\n union1d,\n ravel,\n roll,\n unique,\n squeeze,\n ptp,\n diff,\n ediff1d,\n gradient,\n bincount,\n digitize,\n histogram,\n cov,\n array,\n dstack,\n vstack,\n hstack,\n compress,\n extract,\n round,\n count_nonzero,\n flatnonzero,\n nonzero,\n unravel_index,\n around,\n isin,\n isnull,\n notnull,\n isclose,\n allclose,\n corrcoef,\n swapaxes,\n tensordot,\n transpose,\n dot,\n vdot,\n matmul,\n outer,\n apply_along_axis,\n apply_over_axes,\n result_type,\n atleast_1d,\n atleast_2d,\n atleast_3d,\n piecewise,\n flip,\n flipud,\n fliplr,\n einsum,\n average,\n )\n from .reshape import reshape\n from .ufunc import (\n add,\n subtract,\n multiply,\n divide,\n logaddexp,\n logaddexp2,\n true_divide,\n floor_divide,\n negative,\n power,\n remainder,\n mod,\n conj,\n exp,\n exp2,\n log,\n log2,\n log10,\n log1p,\n expm1,\n sqrt,\n square,\n cbrt,\n reciprocal,\n sin,\n cos,\n tan,\n arcsin,\n arccos,\n arctan,\n arctan2,\n hypot,\n sinh,\n cosh,\n tanh,\n arcsinh,\n arccosh,\n arctanh,\n deg2rad,\n rad2deg,\n greater,\n greater_equal,\n less,\n less_equal,\n not_equal,\n equal,\n maximum,\n bitwise_and,\n bitwise_or,\n bitwise_xor,\n bitwise_not,\n invert,\n minimum,\n logical_and,\n logical_or,\n logical_xor,\n logical_not,\n fmax,\n fmin,\n isreal,\n iscomplex,\n isfinite,\n isinf,\n isneginf,\n isposinf,\n isnan,\n signbit,\n copysign,\n nextafter,\n spacing,\n ldexp,\n fmod,\n floor,\n ceil,\n trunc,\n degrees,\n radians,\n rint,\n fix,\n angle,\n real,\n imag,\n clip,\n fabs,\n sign,\n absolute,\n i0,\n sinc,\n nan_to_num,\n frexp,\n modf,\n divide,\n frompyfunc,\n float_power,\n divmod,\n )\n from .reductions import (\n sum,\n prod,\n mean,\n std,\n var,\n any,\n all,\n min,\n max,\n median,\n moment,\n trace,\n argmin,\n argmax,\n nansum,\n nanmean,\n nanmedian,\n nanstd,\n nanvar,\n nanmin,\n nanmax,\n nanargmin,\n nanargmax,\n cumsum,\n cumprod,\n topk,\n argtopk,\n nanprod,\n nancumprod,\n nancumsum,\n reduction,\n )\n from .percentile import percentile\n from . import ma\n from . import random, linalg, overlap, fft, backends\n from .overlap import map_overlap\n from .wrap import ones, zeros, empty, full\n from .creation import ones_like, zeros_like, empty_like, full_like\n from .rechunk import rechunk\n from ..base import compute\n from .optimization import optimize\n from .creation import (\n arange,\n linspace,\n meshgrid,\n indices,\n diag,\n eye,\n triu,\n tril,\n fromfunction,\n tile,\n repeat,\n pad,\n diagonal,\n )\n from .gufunc import apply_gufunc, gufunc, as_gufunc\n from .utils import assert_eq\n\nexcept ImportError as e:\n msg = (\n \"Dask array requirements are not installed.\\n\\n\"\n \"Please either conda or pip install as follows:\\n\\n\"\n \" conda install dask # either conda install\\n\"\n ' python -m pip install \"dask[array]\" --upgrade # or python -m pip install'\n )\n raise ImportError(str(e) + \"\\n\\n\" + msg) from e", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/backends.py_tensordot_lookup_register_cupy._cupy_einsum.return.cupy_einsum_args_kwar": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/backends.py_tensordot_lookup_register_cupy._cupy_einsum.return.cupy_einsum_args_kwar", "embedding": null, "metadata": {"file_path": "dask/array/backends.py", "file_name": "backends.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 17, "span_ids": ["imports", "register_cupy"], "tokens": 150}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from .core import tensordot_lookup, concatenate_lookup, einsum_lookup\n\n\n@tensordot_lookup.register_lazy(\"cupy\")\n@concatenate_lookup.register_lazy(\"cupy\")\ndef register_cupy():\n import cupy\n\n concatenate_lookup.register(cupy.ndarray, cupy.concatenate)\n tensordot_lookup.register(cupy.ndarray, cupy.tensordot)\n\n @einsum_lookup.register(cupy.ndarray)\n def _cupy_einsum(*args, **kwargs):\n # NB: cupy does not accept `order` or `casting` kwargs - ignore\n kwargs.pop(\"casting\", None)\n kwargs.pop(\"order\", None)\n return cupy.einsum(*args, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/backends.py_register_cupyx_register_cupyx.concatenate_lookup_regist": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/backends.py_register_cupyx_register_cupyx.concatenate_lookup_regist", "embedding": null, "metadata": {"file_path": "dask/array/backends.py", "file_name": "backends.py", "file_type": "text/x-python", "category": "implementation", "start_line": 20, "end_line": 45, "span_ids": ["register_cupyx"], "tokens": 182}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@concatenate_lookup.register_lazy(\"cupyx\")\ndef register_cupyx():\n\n from cupyx.scipy.sparse import spmatrix\n\n try:\n from cupyx.scipy.sparse import hstack\n from cupyx.scipy.sparse import vstack\n except ImportError as e:\n raise ImportError(\n \"Stacking of sparse arrays requires at least CuPy version 8.0.0\"\n ) from e\n\n def _concat_cupy_sparse(L, axis=0):\n if axis == 0:\n return vstack(L)\n elif axis == 1:\n return hstack(L)\n else:\n msg = (\n \"Can only concatenate cupy sparse matrices for axis in \"\n \"{0, 1}. Got %s\" % axis\n )\n raise ValueError(msg)\n\n concatenate_lookup.register(spmatrix, _concat_cupy_sparse)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/backends.py_register_sparse_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/backends.py_register_sparse_", "embedding": null, "metadata": {"file_path": "dask/array/backends.py", "file_name": "backends.py", "file_type": "text/x-python", "category": "implementation", "start_line": 48, "end_line": 74, "span_ids": ["register_scipy_sparse", "register_sparse"], "tokens": 173}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@tensordot_lookup.register_lazy(\"sparse\")\n@concatenate_lookup.register_lazy(\"sparse\")\ndef register_sparse():\n import sparse\n\n concatenate_lookup.register(sparse.COO, sparse.concatenate)\n tensordot_lookup.register(sparse.COO, sparse.tensordot)\n\n\n@concatenate_lookup.register_lazy(\"scipy\")\ndef register_scipy_sparse():\n import scipy.sparse\n\n def _concatenate(L, axis=0):\n if axis == 0:\n return scipy.sparse.vstack(L)\n elif axis == 1:\n return scipy.sparse.hstack(L)\n else:\n msg = (\n \"Can only concatenate scipy sparse matrices for axis in \"\n \"{0, 1}. Got %s\" % axis\n )\n raise ValueError(msg)\n\n concatenate_lookup.register(scipy.sparse.spmatrix, _concatenate)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/blockwise.py_numbers_blockwise._Tensor_operation_Gene": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/blockwise.py_numbers_blockwise._Tensor_operation_Gene", "embedding": null, "metadata": {"file_path": "dask/array/blockwise.py", "file_name": "blockwise.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 124, "span_ids": ["imports", "blockwise"], "tokens": 1155}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import numbers\nimport warnings\n\nimport tlz as toolz\n\nfrom .. import base, utils\nfrom ..delayed import unpack_collections\nfrom ..highlevelgraph import HighLevelGraph\nfrom ..blockwise import blockwise as core_blockwise\n\n\ndef blockwise(\n func,\n out_ind,\n *args,\n name=None,\n token=None,\n dtype=None,\n adjust_chunks=None,\n new_axes=None,\n align_arrays=True,\n concatenate=None,\n meta=None,\n **kwargs\n):\n \"\"\"Tensor operation: Generalized inner and outer products\n\n A broad class of blocked algorithms and patterns can be specified with a\n concise multi-index notation. The ``blockwise`` function applies an in-memory\n function across multiple blocks of multiple inputs in a variety of ways.\n Many dask.array operations are special cases of blockwise including\n elementwise, broadcasting, reductions, tensordot, and transpose.\n\n Parameters\n ----------\n func : callable\n Function to apply to individual tuples of blocks\n out_ind : iterable\n Block pattern of the output, something like 'ijk' or (1, 2, 3)\n *args : sequence of Array, index pairs\n Sequence like (x, 'ij', y, 'jk', z, 'i')\n **kwargs : dict\n Extra keyword arguments to pass to function\n dtype : np.dtype\n Datatype of resulting array.\n concatenate : bool, keyword only\n If true concatenate arrays along dummy indices, else provide lists\n adjust_chunks : dict\n Dictionary mapping index to function to be applied to chunk sizes\n new_axes : dict, keyword only\n New indexes and their dimension lengths\n\n Examples\n --------\n 2D embarrassingly parallel operation from two arrays, x, and y.\n\n >>> z = blockwise(operator.add, 'ij', x, 'ij', y, 'ij', dtype='f8') # z = x + y # doctest: +SKIP\n\n Outer product multiplying x by y, two 1-d vectors\n\n >>> z = blockwise(operator.mul, 'ij', x, 'i', y, 'j', dtype='f8') # doctest: +SKIP\n\n z = x.T\n\n >>> z = blockwise(np.transpose, 'ji', x, 'ij', dtype=x.dtype) # doctest: +SKIP\n\n The transpose case above is illustrative because it does same transposition\n both on each in-memory block by calling ``np.transpose`` and on the order\n of the blocks themselves, by switching the order of the index ``ij -> ji``.\n\n We can compose these same patterns with more variables and more complex\n in-memory functions\n\n z = X + Y.T\n\n >>> z = blockwise(lambda x, y: x + y.T, 'ij', x, 'ij', y, 'ji', dtype='f8') # doctest: +SKIP\n\n Any index, like ``i`` missing from the output index is interpreted as a\n contraction (note that this differs from Einstein convention; repeated\n indices do not imply contraction.) In the case of a contraction the passed\n function should expect an iterable of blocks on any array that holds that\n index. To receive arrays concatenated along contracted dimensions instead\n pass ``concatenate=True``.\n\n Inner product multiplying x by y, two 1-d vectors\n\n >>> def sequence_dot(x_blocks, y_blocks):\n ... result = 0\n ... for x, y in zip(x_blocks, y_blocks):\n ... result += x.dot(y)\n ... return result\n\n >>> z = blockwise(sequence_dot, '', x, 'i', y, 'i', dtype='f8') # doctest: +SKIP\n\n Add new single-chunk dimensions with the ``new_axes=`` keyword, including\n the length of the new dimension. New dimensions will always be in a single\n chunk.\n\n >>> def f(x):\n ... return x[:, None] * np.ones((1, 5))\n\n >>> z = blockwise(f, 'az', x, 'a', new_axes={'z': 5}, dtype=x.dtype) # doctest: +SKIP\n\n New dimensions can also be multi-chunk by specifying a tuple of chunk\n sizes. This has limited utility as is (because the chunks are all the\n same), but the resulting graph can be modified to achieve more useful\n results (see ``da.map_blocks``).\n\n >>> z = blockwise(f, 'az', x, 'a', new_axes={'z': (5, 5)}, dtype=x.dtype) # doctest: +SKIP\n\n If the applied function changes the size of each chunk you can specify this\n with a ``adjust_chunks={...}`` dictionary holding a function for each index\n that modifies the dimension size in that index.\n\n >>> def double(x):\n ... return np.concatenate([x, x])\n\n >>> y = blockwise(double, 'ij', x, 'ij',\n ... adjust_chunks={'i': lambda n: 2 * n}, dtype=x.dtype) # doctest: +SKIP\n\n Include literals by indexing with None\n\n >>> y = blockwise(add, 'ij', x, 'ij', 1234, None, dtype=x.dtype) # doctest: +SKIP\n \"\"\"\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/blockwise.py_blockwise.out_blockwise.chunks._chunkss_i_for_i_in_out_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/blockwise.py_blockwise.out_blockwise.chunks._chunkss_i_for_i_in_out_", "embedding": null, "metadata": {"file_path": "dask/array/blockwise.py", "file_name": "blockwise.py", "file_type": "text/x-python", "category": "implementation", "start_line": 125, "end_line": 223, "span_ids": ["blockwise"], "tokens": 745}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def blockwise(\n func,\n out_ind,\n *args,\n name=None,\n token=None,\n dtype=None,\n adjust_chunks=None,\n new_axes=None,\n align_arrays=True,\n concatenate=None,\n meta=None,\n **kwargs\n):\n out = name\n new_axes = new_axes or {}\n\n # Input Validation\n if len(set(out_ind)) != len(out_ind):\n raise ValueError(\n \"Repeated elements not allowed in output index\",\n [k for k, v in toolz.frequencies(out_ind).items() if v > 1],\n )\n new = (\n set(out_ind)\n - {a for arg in args[1::2] if arg is not None for a in arg}\n - set(new_axes or ())\n )\n if new:\n raise ValueError(\"Unknown dimension\", new)\n\n from .core import unify_chunks, normalize_arg\n\n if align_arrays:\n chunkss, arrays = unify_chunks(*args)\n else:\n arginds = [(a, i) for (a, i) in toolz.partition(2, args) if i is not None]\n chunkss = {}\n # For each dimension, use the input chunking that has the most blocks;\n # this will ensure that broadcasting works as expected, and in\n # particular the number of blocks should be correct if the inputs are\n # consistent.\n for arg, ind in arginds:\n for c, i in zip(arg.chunks, ind):\n if i not in chunkss or len(c) > len(chunkss[i]):\n chunkss[i] = c\n arrays = args[::2]\n\n for k, v in new_axes.items():\n if not isinstance(v, tuple):\n v = (v,)\n chunkss[k] = v\n\n arginds = zip(arrays, args[1::2])\n numblocks = {}\n\n dependencies = []\n arrays = []\n\n # Normalize arguments\n argindsstr = []\n\n for arg, ind in arginds:\n if ind is None:\n arg = normalize_arg(arg)\n arg, collections = unpack_collections(arg)\n dependencies.extend(collections)\n else:\n if (\n hasattr(arg, \"ndim\")\n and hasattr(ind, \"__len__\")\n and arg.ndim != len(ind)\n ):\n raise ValueError(\n \"Index string %s does not match array dimension %d\"\n % (ind, arg.ndim)\n )\n numblocks[arg.name] = arg.numblocks\n arrays.append(arg)\n arg = arg.name\n argindsstr.extend((arg, ind))\n\n # Normalize keyword arguments\n kwargs2 = {}\n for k, v in kwargs.items():\n v = normalize_arg(v)\n v, collections = unpack_collections(v)\n dependencies.extend(collections)\n kwargs2[k] = v\n\n # Finish up the name\n if not out:\n out = \"%s-%s\" % (\n token or utils.funcname(func).strip(\"_\"),\n base.tokenize(func, out_ind, argindsstr, dtype, **kwargs),\n )\n\n graph = core_blockwise(\n func,\n out,\n out_ind,\n *argindsstr,\n numblocks=numblocks,\n dependencies=dependencies,\n new_axes=new_axes,\n concatenate=concatenate,\n **kwargs2\n )\n graph = HighLevelGraph.from_collections(\n out, graph, dependencies=arrays + dependencies\n )\n\n chunks = [chunkss[i] for i in out_ind]\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/blockwise.py_blockwise.if_adjust_chunks__": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/blockwise.py_blockwise.if_adjust_chunks__", "embedding": null, "metadata": {"file_path": "dask/array/blockwise.py", "file_name": "blockwise.py", "file_type": "text/x-python", "category": "implementation", "start_line": 224, "end_line": 260, "span_ids": ["impl:2", "blockwise", "atop"], "tokens": 345}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def blockwise(\n func,\n out_ind,\n *args,\n name=None,\n token=None,\n dtype=None,\n adjust_chunks=None,\n new_axes=None,\n align_arrays=True,\n concatenate=None,\n meta=None,\n **kwargs\n):\n # ... other code\n if adjust_chunks:\n for i, ind in enumerate(out_ind):\n if ind in adjust_chunks:\n if callable(adjust_chunks[ind]):\n chunks[i] = tuple(map(adjust_chunks[ind], chunks[i]))\n elif isinstance(adjust_chunks[ind], numbers.Integral):\n chunks[i] = tuple(adjust_chunks[ind] for _ in chunks[i])\n elif isinstance(adjust_chunks[ind], (tuple, list)):\n if len(adjust_chunks[ind]) != len(chunks[i]):\n raise ValueError(\n \"Dimension {0} has {1} blocks, \"\n \"adjust_chunks specified with \"\n \"{2} blocks\".format(\n i, len(chunks[i]), len(adjust_chunks[ind])\n )\n )\n chunks[i] = tuple(adjust_chunks[ind])\n else:\n raise NotImplementedError(\n \"adjust_chunks values must be callable, int, or tuple\"\n )\n chunks = tuple(chunks)\n\n if meta is None:\n from .utils import compute_meta\n\n meta = compute_meta(func, dtype, *args[::2], **kwargs)\n return new_da_object(graph, out, chunks, meta=meta, dtype=dtype)\n\n\ndef atop(*args, **kwargs):\n warnings.warn(\"The da.atop function has moved to da.blockwise\")\n return blockwise(*args, **kwargs)\n\n\nfrom .core import new_da_object", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py__A_set_of_NumPy_functi_keepdims_wrapper.return.keepdims_wrapped_callable": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py__A_set_of_NumPy_functi_keepdims_wrapper.return.keepdims_wrapped_callable", "embedding": null, "metadata": {"file_path": "dask/array/chunk.py", "file_name": "chunk.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 51, "span_ids": ["keepdims_wrapper", "docstring"], "tokens": 280}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\" A set of NumPy functions to apply per chunk \"\"\"\nfrom collections.abc import Container, Iterable, Sequence\nfrom functools import wraps\n\nfrom tlz import concat\nimport numpy as np\nfrom . import numpy_compat as npcompat\n\nfrom ..core import flatten\nfrom ..utils import ignoring\n\nfrom numbers import Integral\n\ntry:\n from numpy import take_along_axis\nexcept ImportError: # pragma: no cover\n take_along_axis = npcompat.take_along_axis\n\n\ndef keepdims_wrapper(a_callable):\n \"\"\"\n A wrapper for functions that don't provide keepdims to ensure that they do.\n \"\"\"\n\n @wraps(a_callable)\n def keepdims_wrapped_callable(x, axis=None, keepdims=None, *args, **kwargs):\n r = a_callable(x, axis=axis, *args, **kwargs)\n\n if not keepdims:\n return r\n\n axes = axis\n\n if axes is None:\n axes = range(x.ndim)\n\n if not isinstance(axes, (Container, Iterable, Sequence)):\n axes = [axes]\n\n r_slice = tuple()\n for each_axis in range(x.ndim):\n if each_axis in axes:\n r_slice += (None,)\n else:\n r_slice += (slice(None),)\n\n r = r[r_slice]\n\n return r\n\n return keepdims_wrapped_callable", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py__Wrap_NumPy_functions_to_None_2.nanstd.np_nanstd": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py__Wrap_NumPy_functions_to_None_2.nanstd.np_nanstd", "embedding": null, "metadata": {"file_path": "dask/array/chunk.py", "file_name": "chunk.py", "file_type": "text/x-python", "category": "implementation", "start_line": 54, "end_line": 86, "span_ids": ["impl:6", "keepdims_wrapper"], "tokens": 190}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# Wrap NumPy functions to ensure they provide keepdims.\nsum = np.sum\nprod = np.prod\nmin = np.min\nmax = np.max\nargmin = keepdims_wrapper(np.argmin)\nnanargmin = keepdims_wrapper(np.nanargmin)\nargmax = keepdims_wrapper(np.argmax)\nnanargmax = keepdims_wrapper(np.nanargmax)\nany = np.any\nall = np.all\nnansum = np.nansum\nnanprod = np.nanprod\n\nnancumprod = np.nancumprod\nnancumsum = np.nancumsum\n\nnanmin = np.nanmin\nnanmax = np.nanmax\nmean = np.mean\n\nwith ignoring(AttributeError):\n nanmean = np.nanmean\n\nvar = np.var\n\nwith ignoring(AttributeError):\n nanvar = np.nanvar\n\nstd = np.std\n\nwith ignoring(AttributeError):\n nanstd = np.nanstd", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py_coarsen_coarsen.return.reduction_x_reshape_newsh": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py_coarsen_coarsen.return.reduction_x_reshape_newsh", "embedding": null, "metadata": {"file_path": "dask/array/chunk.py", "file_name": "chunk.py", "file_type": "text/x-python", "category": "implementation", "start_line": 89, "end_line": 143, "span_ids": ["coarsen"], "tokens": 581}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def coarsen(reduction, x, axes, trim_excess=False, **kwargs):\n \"\"\"Coarsen array by applying reduction to fixed size neighborhoods\n\n Parameters\n ----------\n reduction: function\n Function like np.sum, np.mean, etc...\n x: np.ndarray\n Array to be coarsened\n axes: dict\n Mapping of axis to coarsening factor\n\n Examples\n --------\n >>> x = np.array([1, 2, 3, 4, 5, 6])\n >>> coarsen(np.sum, x, {0: 2}) #doctest: +SKIP\n array([ 3, 7, 11])\n >>> coarsen(np.max, x, {0: 3}) #doctest: +SKIP\n array([3, 6])\n\n Provide dictionary of scale per dimension\n\n >>> x = np.arange(24).reshape((4, 6))\n >>> x\n array([[ 0, 1, 2, 3, 4, 5],\n [ 6, 7, 8, 9, 10, 11],\n [12, 13, 14, 15, 16, 17],\n [18, 19, 20, 21, 22, 23]])\n\n >>> coarsen(np.min, x, {0: 2, 1: 3}) #doctest: +SKIP\n array([[ 0, 3],\n [12, 15]])\n\n You must avoid excess elements explicitly\n\n >>> x = np.array([1, 2, 3, 4, 5, 6, 7, 8])\n >>> coarsen(np.min, x, {0: 3}, trim_excess=True) #doctest: +SKIP\n array([1, 4])\n \"\"\"\n # Insert singleton dimensions if they don't exist already\n for i in range(x.ndim):\n if i not in axes:\n axes[i] = 1\n\n if trim_excess:\n ind = tuple(\n slice(0, -(d % axes[i])) if d % axes[i] else slice(None, None)\n for i, d in enumerate(x.shape)\n )\n x = x[ind]\n\n # (10, 10) -> (5, 2, 5, 2)\n newshape = tuple(concat([(x.shape[i] // axes[i], axes[i]) for i in range(x.ndim)]))\n\n return reduction(x.reshape(newshape), axis=tuple(range(1, x.ndim * 2, 2)), **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py_trim_trim.return.x_tuple_slice_ax_ax_if_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py_trim_trim.return.x_tuple_slice_ax_ax_if_", "embedding": null, "metadata": {"file_path": "dask/array/chunk.py", "file_name": "chunk.py", "file_type": "text/x-python", "category": "implementation", "start_line": 146, "end_line": 165, "span_ids": ["trim"], "tokens": 223}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def trim(x, axes=None):\n \"\"\"Trim boundaries off of array\n\n >>> x = np.arange(24).reshape((4, 6))\n >>> trim(x, axes={0: 0, 1: 1})\n array([[ 1, 2, 3, 4],\n [ 7, 8, 9, 10],\n [13, 14, 15, 16],\n [19, 20, 21, 22]])\n\n >>> trim(x, axes={0: 1, 1: 1})\n array([[ 7, 8, 9, 10],\n [13, 14, 15, 16]])\n \"\"\"\n if isinstance(axes, Integral):\n axes = [axes] * x.ndim\n if isinstance(axes, dict):\n axes = [axes.get(i, 0) for i in range(x.ndim)]\n\n return x[tuple(slice(ax, -ax if ax else None) for ax in axes)]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py_topk_topk.return.a_tuple_k_slice_if_i_a": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py_topk_topk.return.a_tuple_k_slice_if_i_a", "embedding": null, "metadata": {"file_path": "dask/array/chunk.py", "file_name": "chunk.py", "file_type": "text/x-python", "category": "implementation", "start_line": 168, "end_line": 183, "span_ids": ["topk"], "tokens": 155}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def topk(a, k, axis, keepdims):\n \"\"\"Chunk and combine function of topk\n\n Extract the k largest elements from a on the given axis.\n If k is negative, extract the -k smallest elements instead.\n Note that, unlike in the parent function, the returned elements\n are not sorted internally.\n \"\"\"\n assert keepdims is True\n axis = axis[0]\n if abs(k) >= a.shape[axis]:\n return a\n\n a = np.partition(a, -k, axis=axis)\n k_slice = slice(-k, None) if k > 0 else slice(-k)\n return a[tuple(k_slice if i == axis else slice(None) for i in range(a.ndim))]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py_topk_aggregate_argtopk_preprocess.return.a_idx": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py_topk_aggregate_argtopk_preprocess.return.a_idx", "embedding": null, "metadata": {"file_path": "dask/array/chunk.py", "file_name": "chunk.py", "file_type": "text/x-python", "category": "implementation", "start_line": 186, "end_line": 209, "span_ids": ["argtopk_preprocess", "topk_aggregate"], "tokens": 164}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def topk_aggregate(a, k, axis, keepdims):\n \"\"\"Final aggregation function of topk\n\n Invoke topk one final time and then sort the results internally.\n \"\"\"\n assert keepdims is True\n a = topk(a, k, axis, keepdims)\n axis = axis[0]\n a = np.sort(a, axis=axis)\n if k < 0:\n return a\n return a[\n tuple(\n slice(None, None, -1) if i == axis else slice(None) for i in range(a.ndim)\n )\n ]\n\n\ndef argtopk_preprocess(a, idx):\n \"\"\"Preparatory step for argtopk\n\n Put data together with its original indices in a tuple.\n \"\"\"\n return a, idx", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py_argtopk_argtopk.return.take_along_axis_a_idx2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py_argtopk_argtopk.return.take_along_axis_a_idx2_", "embedding": null, "metadata": {"file_path": "dask/array/chunk.py", "file_name": "chunk.py", "file_type": "text/x-python", "category": "implementation", "start_line": 212, "end_line": 238, "span_ids": ["argtopk"], "tokens": 278}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def argtopk(a_plus_idx, k, axis, keepdims):\n \"\"\"Chunk and combine function of argtopk\n\n Extract the indices of the k largest elements from a on the given axis.\n If k is negative, extract the indices of the -k smallest elements instead.\n Note that, unlike in the parent function, the returned elements\n are not sorted internally.\n \"\"\"\n assert keepdims is True\n axis = axis[0]\n\n if isinstance(a_plus_idx, list):\n a_plus_idx = list(flatten(a_plus_idx))\n a = np.concatenate([ai for ai, _ in a_plus_idx], axis)\n idx = np.concatenate(\n [np.broadcast_to(idxi, ai.shape) for ai, idxi in a_plus_idx], axis\n )\n else:\n a, idx = a_plus_idx\n\n if abs(k) >= a.shape[axis]:\n return a_plus_idx\n\n idx2 = np.argpartition(a, -k, axis=axis)\n k_slice = slice(-k, None) if k > 0 else slice(-k)\n idx2 = idx2[tuple(k_slice if i == axis else slice(None) for i in range(a.ndim))]\n return take_along_axis(a, idx2, axis), take_along_axis(idx, idx2, axis)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py_argtopk_aggregate_argtopk_aggregate.return.idx_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py_argtopk_aggregate_argtopk_aggregate.return.idx_", "embedding": null, "metadata": {"file_path": "dask/array/chunk.py", "file_name": "chunk.py", "file_type": "text/x-python", "category": "implementation", "start_line": 241, "end_line": 259, "span_ids": ["argtopk_aggregate"], "tokens": 159}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def argtopk_aggregate(a_plus_idx, k, axis, keepdims):\n \"\"\"Final aggregation function of argtopk\n\n Invoke argtopk one final time, sort the results internally, drop the data\n and return the index only.\n \"\"\"\n assert keepdims is True\n a, idx = argtopk(a_plus_idx, k, axis, keepdims)\n axis = axis[0]\n\n idx2 = np.argsort(a, axis=axis)\n idx = take_along_axis(idx, idx2, axis)\n if k < 0:\n return idx\n return idx[\n tuple(\n slice(None, None, -1) if i == axis else slice(None) for i in range(idx.ndim)\n )\n ]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py_arange_view.if_order_C_.else_.return.x_T_view_dtype_T": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py_arange_view.if_order_C_.else_.return.x_T_view_dtype_T", "embedding": null, "metadata": {"file_path": "dask/array/chunk.py", "file_name": "chunk.py", "file_type": "text/x-python", "category": "implementation", "start_line": 262, "end_line": 277, "span_ids": ["view", "astype", "arange"], "tokens": 119}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def arange(start, stop, step, length, dtype):\n res = np.arange(start, stop, step, dtype)\n return res[:-1] if len(res) > length else res\n\n\ndef astype(x, astype_dtype=None, **kwargs):\n return x.astype(astype_dtype, **kwargs)\n\n\ndef view(x, dtype, order=\"C\"):\n if order == \"C\":\n x = np.ascontiguousarray(x)\n return x.view(dtype)\n else:\n x = np.asfortranarray(x)\n return x.T.view(dtype).T", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py_slice_with_int_dask_array_slice_with_int_dask_array.return.x_tuple_idx_if_i_axis_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py_slice_with_int_dask_array_slice_with_int_dask_array.return.x_tuple_idx_if_i_axis_", "embedding": null, "metadata": {"file_path": "dask/array/chunk.py", "file_name": "chunk.py", "file_type": "text/x-python", "category": "implementation", "start_line": 280, "end_line": 319, "span_ids": ["slice_with_int_dask_array"], "tokens": 366}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def slice_with_int_dask_array(x, idx, offset, x_size, axis):\n \"\"\"Chunk function of `slice_with_int_dask_array_on_axis`.\n Slice one chunk of x by one chunk of idx.\n\n Parameters\n ----------\n x: ndarray, any dtype, any shape\n i-th chunk of x\n idx: ndarray, ndim=1, dtype=any integer\n j-th chunk of idx (cartesian product with the chunks of x)\n offset: ndarray, shape=(1, ), dtype=int64\n Index of the first element along axis of the current chunk of x\n x_size: int\n Total size of the x da.Array along axis\n axis: int\n normalized axis to take elements from (0 <= axis < x.ndim)\n\n Returns\n -------\n x sliced along axis, using only the elements of idx that fall inside the\n current chunk.\n \"\"\"\n # Needed when idx is unsigned\n idx = idx.astype(np.int64)\n\n # Normalize negative indices\n idx = np.where(idx < 0, idx + x_size, idx)\n\n # A chunk of the offset dask Array is a numpy array with shape (1, ).\n # It indicates the index of the first element along axis of the current\n # chunk of x.\n idx = idx - offset\n\n # Drop elements of idx that do not fall inside the current chunk of x\n idx_filter = (idx >= 0) & (idx < x.shape[axis])\n idx = idx[idx_filter]\n\n # np.take does not support slice indices\n # return np.take(x, idx, axis)\n return x[tuple(idx if i == axis else slice(None) for i in range(x.ndim))]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py_slice_with_int_dask_array_aggregate_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py_slice_with_int_dask_array_aggregate_", "embedding": null, "metadata": {"file_path": "dask/array/chunk.py", "file_name": "chunk.py", "file_type": "text/x-python", "category": "implementation", "start_line": 322, "end_line": 376, "span_ids": ["slice_with_int_dask_array_aggregate"], "tokens": 492}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def slice_with_int_dask_array_aggregate(idx, chunk_outputs, x_chunks, axis):\n \"\"\"Final aggregation function of `slice_with_int_dask_array_on_axis`.\n Aggregate all chunks of x by one chunk of idx, reordering the output of\n `slice_with_int_dask_array`.\n\n Note that there is no combine function, as a recursive aggregation (e.g.\n with split_every) would not give any benefit.\n\n Parameters\n ----------\n idx: ndarray, ndim=1, dtype=any integer\n j-th chunk of idx\n chunk_outputs: ndarray\n concatenation along axis of the outputs of `slice_with_int_dask_array`\n for all chunks of x and the j-th chunk of idx\n x_chunks: tuple\n dask chunks of the x da.Array along axis, e.g. ``(3, 3, 2)``\n axis: int\n normalized axis to take elements from (0 <= axis < x.ndim)\n\n Returns\n -------\n Selection from all chunks of x for the j-th chunk of idx, in the correct\n order\n \"\"\"\n # Needed when idx is unsigned\n idx = idx.astype(np.int64)\n\n # Normalize negative indices\n idx = np.where(idx < 0, idx + sum(x_chunks), idx)\n\n x_chunk_offset = 0\n chunk_output_offset = 0\n\n # Assemble the final index that picks from the output of the previous\n # kernel by adding together one layer per chunk of x\n # FIXME: this could probably be reimplemented with a faster search-based\n # algorithm\n idx_final = np.zeros_like(idx)\n for x_chunk in x_chunks:\n idx_filter = (idx >= x_chunk_offset) & (idx < x_chunk_offset + x_chunk)\n idx_cum = np.cumsum(idx_filter)\n idx_final += np.where(idx_filter, idx_cum - 1 + chunk_output_offset, 0)\n x_chunk_offset += x_chunk\n if idx_cum.size > 0:\n chunk_output_offset += idx_cum[-1]\n\n # np.take does not support slice indices\n # return np.take(chunk_outputs, idx_final, axis)\n return chunk_outputs[\n tuple(\n idx_final if i == axis else slice(None) for i in range(chunk_outputs.ndim)\n )\n ]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk_types.py_try__": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk_types.py_try__", "embedding": null, "metadata": {"file_path": "dask/array/chunk_types.py", "file_name": "chunk_types.py", "file_type": "text/x-python", "category": "implementation", "start_line": 108, "end_line": 150, "span_ids": ["is_valid_array_chunk", "impl:3", "is_valid_chunk_type"], "tokens": 191}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "try:\n import cupy\n\n register_chunk_type(cupy.ndarray)\nexcept ImportError:\n pass\n\ntry:\n from cupyx.scipy.sparse import spmatrix\n\n register_chunk_type(spmatrix)\nexcept ImportError:\n pass\n\ntry:\n import sparse\n\n register_chunk_type(sparse.SparseArray)\nexcept ImportError:\n pass\n\ntry:\n import scipy.sparse\n\n register_chunk_type(scipy.sparse.spmatrix)\nexcept ImportError:\n pass\n\n\ndef is_valid_chunk_type(type):\n \"\"\" Check if given type is a valid chunk and downcast array type\"\"\"\n try:\n return type in _HANDLED_CHUNK_TYPES or issubclass(\n type, tuple(_HANDLED_CHUNK_TYPES)\n )\n except TypeError:\n return False\n\n\ndef is_valid_array_chunk(array):\n \"\"\" Check if given array is of a valid type to operate with\"\"\"\n return array is None or isinstance(array, tuple(_HANDLED_CHUNK_TYPES))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_math_PerformanceWarning._A_warning_given_when_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_math_PerformanceWarning._A_warning_given_when_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 87, "span_ids": ["imports", "PerformanceWarning"], "tokens": 569}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import math\nimport operator\nimport os\nimport pickle\nimport re\nimport sys\nimport traceback\nimport uuid\nimport warnings\nfrom bisect import bisect\nfrom collections.abc import Iterable, Iterator, Mapping\nfrom functools import partial, wraps, reduce\nfrom itertools import product, zip_longest\nfrom numbers import Number, Integral\nfrom operator import add, getitem, mul\nfrom threading import Lock\n\nfrom tlz import partition, concat, first, groupby, accumulate, frequencies\nfrom tlz.curried import pluck\nimport numpy as np\n\nfrom . import chunk\nfrom .. import config, compute\nfrom ..base import (\n DaskMethodsMixin,\n tokenize,\n dont_optimize,\n compute_as_if_collection,\n persist,\n is_dask_collection,\n)\nfrom ..blockwise import broadcast_dimensions\nfrom ..context import globalmethod\nfrom ..utils import (\n ndeepmap,\n ignoring,\n concrete,\n derived_from,\n is_integer,\n IndexCallable,\n funcname,\n SerializableLock,\n Dispatch,\n factors,\n parse_bytes,\n has_keyword,\n M,\n ndimlist,\n format_bytes,\n typename,\n is_dataframe_like,\n is_series_like,\n is_index_like,\n)\nfrom ..core import quote\nfrom ..delayed import delayed, Delayed\nfrom .. import threaded, core\nfrom ..sizeof import sizeof\nfrom ..highlevelgraph import HighLevelGraph\nfrom .numpy_compat import _Recurser, _make_sliced_dtype\nfrom .slicing import slice_array, replace_ellipsis, cached_cumsum\nfrom .blockwise import blockwise\nfrom .chunk_types import is_valid_array_chunk, is_valid_chunk_type\n\n\nconfig.update_defaults({\"array\": {\"chunk-size\": \"128MiB\", \"rechunk-threshold\": 4}})\n\n\nconcatenate_lookup = Dispatch(\"concatenate\")\ntensordot_lookup = Dispatch(\"tensordot\")\neinsum_lookup = Dispatch(\"einsum\")\nconcatenate_lookup.register((object, np.ndarray), np.concatenate)\ntensordot_lookup.register((object, np.ndarray), np.tensordot)\neinsum_lookup.register((object, np.ndarray), np.einsum)\n\nunknown_chunk_message = (\n \"\\n\\n\"\n \"A possible solution: \"\n \"https://docs.dask.org/en/latest/array-chunks.html#unknown-chunks\\n\"\n \"Summary: to compute chunks sizes, use\\n\\n\"\n \" x.compute_chunk_sizes() # for Dask Array `x`\\n\"\n \" ddf.to_dask_array(lengths=True) # for Dask DataFrame `ddf`\"\n)\n\n\nclass PerformanceWarning(Warning):\n \"\"\" A warning given when bad chunking may cause poor performance \"\"\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_getter_getter_nofancy.return.getter_a_b_asarray_asar": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_getter_getter_nofancy.return.getter_a_b_asarray_asar", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 87, "end_line": 115, "span_ids": ["getter", "getter_nofancy"], "tokens": 209}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def getter(a, b, asarray=True, lock=None):\n if isinstance(b, tuple) and any(x is None for x in b):\n b2 = tuple(x for x in b if x is not None)\n b3 = tuple(\n None if x is None else slice(None, None)\n for x in b\n if not isinstance(x, Integral)\n )\n return getter(a, b2, asarray=asarray, lock=lock)[b3]\n\n if lock:\n lock.acquire()\n try:\n c = a[b]\n if asarray:\n c = np.asarray(c)\n finally:\n if lock:\n lock.release()\n return c\n\n\ndef getter_nofancy(a, b, asarray=True, lock=None):\n \"\"\"A simple wrapper around ``getter``.\n\n Used to indicate to the optimization passes that the backend doesn't\n support fancy indexing.\n \"\"\"\n return getter(a, b, asarray=asarray, lock=lock)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_getter_inline_getter_inline.return.getter_a_b_asarray_asar": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_getter_inline_getter_inline.return.getter_a_b_asarray_asar", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 118, "end_line": 137, "span_ids": ["getter_inline"], "tokens": 178}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def getter_inline(a, b, asarray=True, lock=None):\n \"\"\"A getter function that optimizations feel comfortable inlining\n\n Slicing operations with this function may be inlined into a graph, such as\n in the following rewrite\n\n **Before**\n\n >>> a = x[:10] # doctest: +SKIP\n >>> b = a + 1 # doctest: +SKIP\n >>> c = a * 2 # doctest: +SKIP\n\n **After**\n\n >>> b = x[:10] + 1 # doctest: +SKIP\n >>> c = x[:10] * 2 # doctest: +SKIP\n\n This inlining can be relevant to operations when running off of disk.\n \"\"\"\n return getter(a, b, asarray=asarray, lock=lock)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_optimize_implements.return.decorator": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_optimize_implements.return.decorator", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 140, "end_line": 167, "span_ids": ["implements", "impl:13"], "tokens": 178}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from .optimization import optimize, fuse_slice\n\n\n# __array_function__ dict for mapping aliases and mismatching names\n_HANDLED_FUNCTIONS = {}\n\n\ndef implements(*numpy_functions):\n \"\"\"Register an __array_function__ implementation for dask.array.Array\n\n Register that a function implements the API of a NumPy function (or several\n NumPy functions in case of aliases) which is handled with\n ``__array_function__``.\n\n Parameters\n ----------\n \\\\*numpy_functions : callables\n One or more NumPy functions that are handled by ``__array_function__``\n and will be mapped by `implements` to a `dask.array` function.\n \"\"\"\n\n def decorator(dask_func):\n for numpy_function in numpy_functions:\n _HANDLED_FUNCTIONS[numpy_function] = dask_func\n\n return dask_func\n\n return decorator", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_slices_from_chunks_slices_from_chunks.return.list_product_slices_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_slices_from_chunks_slices_from_chunks.return.list_product_slices_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 191, "end_line": 207, "span_ids": ["slices_from_chunks"], "tokens": 243}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def slices_from_chunks(chunks):\n \"\"\"Translate chunks tuple to a set of slices in product order\n\n >>> slices_from_chunks(((2, 2), (3, 3, 3))) # doctest: +NORMALIZE_WHITESPACE\n [(slice(0, 2, None), slice(0, 3, None)),\n (slice(0, 2, None), slice(3, 6, None)),\n (slice(0, 2, None), slice(6, 9, None)),\n (slice(2, 4, None), slice(0, 3, None)),\n (slice(2, 4, None), slice(3, 6, None)),\n (slice(2, 4, None), slice(6, 9, None))]\n \"\"\"\n cumdims = [cached_cumsum(bds, initial_zero=True) for bds in chunks]\n slices = [\n [slice(s, s + dim) for s, dim in zip(starts, shapes)]\n for starts, shapes in zip(cumdims, chunks)\n ]\n return list(product(*slices))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_getem_getem.return.dict_zip_keys_values_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_getem_getem.return.dict_zip_keys_values_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 210, "end_line": 249, "span_ids": ["getem"], "tokens": 503}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def getem(\n arr,\n chunks,\n getitem=getter,\n shape=None,\n out_name=None,\n lock=False,\n asarray=True,\n dtype=None,\n):\n \"\"\"Dask getting various chunks from an array-like\n\n >>> getem('X', chunks=(2, 3), shape=(4, 6)) # doctest: +SKIP\n {('X', 0, 0): (getter, 'X', (slice(0, 2), slice(0, 3))),\n ('X', 1, 0): (getter, 'X', (slice(2, 4), slice(0, 3))),\n ('X', 1, 1): (getter, 'X', (slice(2, 4), slice(3, 6))),\n ('X', 0, 1): (getter, 'X', (slice(0, 2), slice(3, 6)))}\n\n >>> getem('X', chunks=((2, 2), (3, 3))) # doctest: +SKIP\n {('X', 0, 0): (getter, 'X', (slice(0, 2), slice(0, 3))),\n ('X', 1, 0): (getter, 'X', (slice(2, 4), slice(0, 3))),\n ('X', 1, 1): (getter, 'X', (slice(2, 4), slice(3, 6))),\n ('X', 0, 1): (getter, 'X', (slice(0, 2), slice(3, 6)))}\n \"\"\"\n out_name = out_name or arr\n chunks = normalize_chunks(chunks, shape, dtype=dtype)\n keys = product([out_name], *(range(len(bds)) for bds in chunks))\n slices = slices_from_chunks(chunks)\n\n if (\n has_keyword(getitem, \"asarray\")\n and has_keyword(getitem, \"lock\")\n and (not asarray or lock)\n ):\n values = [(getitem, arr, x, asarray, lock) for x in slices]\n else:\n # Common case, drop extra parameters\n values = [(getitem, arr, x) for x in slices]\n\n return dict(zip(keys, values))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_dotmany_dotmany.return.sum_map_partial_np_dot_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_dotmany_dotmany.return.sum_map_partial_np_dot_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 252, "end_line": 271, "span_ids": ["dotmany"], "tokens": 196}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def dotmany(A, B, leftfunc=None, rightfunc=None, **kwargs):\n \"\"\"Dot product of many aligned chunks\n\n >>> x = np.array([[1, 2], [1, 2]])\n >>> y = np.array([[10, 20], [10, 20]])\n >>> dotmany([x, x, x], [y, y, y])\n array([[ 90, 180],\n [ 90, 180]])\n\n Optionally pass in functions to apply to the left and right chunks\n\n >>> dotmany([x, x, x], [y, y, y], rightfunc=np.transpose)\n array([[150, 150],\n [150, 150]])\n \"\"\"\n if leftfunc:\n A = map(leftfunc, A)\n if rightfunc:\n B = map(rightfunc, B)\n return sum(map(partial(np.dot, **kwargs), A, B))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py__concatenate2__concatenate2.return.concatenate_arrays_axis_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py__concatenate2__concatenate2.return.concatenate_arrays_axis_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 274, "end_line": 324, "span_ids": ["_concatenate2"], "tokens": 474}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _concatenate2(arrays, axes=[]):\n \"\"\"Recursively Concatenate nested lists of arrays along axes\n\n Each entry in axes corresponds to each level of the nested list. The\n length of axes should correspond to the level of nesting of arrays.\n If axes is an empty list or tuple, return arrays, or arrays[0] if\n arrays is a list.\n\n >>> x = np.array([[1, 2], [3, 4]])\n >>> _concatenate2([x, x], axes=[0])\n array([[1, 2],\n [3, 4],\n [1, 2],\n [3, 4]])\n\n >>> _concatenate2([x, x], axes=[1])\n array([[1, 2, 1, 2],\n [3, 4, 3, 4]])\n\n >>> _concatenate2([[x, x], [x, x]], axes=[0, 1])\n array([[1, 2, 1, 2],\n [3, 4, 3, 4],\n [1, 2, 1, 2],\n [3, 4, 3, 4]])\n\n Supports Iterators\n >>> _concatenate2(iter([x, x]), axes=[1])\n array([[1, 2, 1, 2],\n [3, 4, 3, 4]])\n\n Special Case\n >>> _concatenate2([x, x], axes=())\n array([[1, 2],\n [3, 4]])\n \"\"\"\n if axes == ():\n if isinstance(arrays, list):\n return arrays[0]\n else:\n return arrays\n\n if isinstance(arrays, Iterator):\n arrays = list(arrays)\n if not isinstance(arrays, (list, tuple)):\n return arrays\n if len(axes) > 1:\n arrays = [_concatenate2(a, axes=axes[1:]) for a in arrays]\n concatenate = concatenate_lookup.dispatch(\n type(max(arrays, key=lambda x: getattr(x, \"__array_priority__\", 0)))\n )\n return concatenate(arrays, axis=axes[0])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_apply_infer_dtype_apply_infer_dtype.return.o_dtype_if_nout_is_None_e": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_apply_infer_dtype_apply_infer_dtype.return.o_dtype_if_nout_is_None_e", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 327, "end_line": 391, "span_ids": ["apply_infer_dtype"], "tokens": 485}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def apply_infer_dtype(func, args, kwargs, funcname, suggest_dtype=\"dtype\", nout=None):\n \"\"\"\n Tries to infer output dtype of ``func`` for a small set of input arguments.\n\n Parameters\n ----------\n func: Callable\n Function for which output dtype is to be determined\n\n args: List of array like\n Arguments to the function, which would usually be used. Only attributes\n ``ndim`` and ``dtype`` are used.\n\n kwargs: dict\n Additional ``kwargs`` to the ``func``\n\n funcname: String\n Name of calling function to improve potential error messages\n\n suggest_dtype: None/False or String\n If not ``None`` adds suggestion to potential error message to specify a dtype\n via the specified kwarg. Defaults to ``'dtype'``.\n\n nout: None or Int\n ``None`` if function returns single output, integer if many.\n Deafults to ``None``.\n\n Returns\n -------\n : dtype or List of dtype\n One or many dtypes (depending on ``nout``)\n \"\"\"\n args = [\n np.ones((1,) * x.ndim, dtype=x.dtype) if isinstance(x, Array) else x\n for x in args\n ]\n try:\n with np.errstate(all=\"ignore\"):\n o = func(*args, **kwargs)\n except Exception as e:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n tb = \"\".join(traceback.format_tb(exc_traceback))\n suggest = (\n (\n \"Please specify the dtype explicitly using the \"\n \"`{dtype}` kwarg.\\n\\n\".format(dtype=suggest_dtype)\n )\n if suggest_dtype\n else \"\"\n )\n msg = (\n \"`dtype` inference failed in `{0}`.\\n\\n\"\n \"{1}\"\n \"Original error is below:\\n\"\n \"------------------------\\n\"\n \"{2}\\n\\n\"\n \"Traceback:\\n\"\n \"---------\\n\"\n \"{3}\"\n ).format(funcname, suggest, repr(e), tb)\n else:\n msg = None\n if msg is not None:\n raise ValueError(msg)\n return o.dtype if nout is None else tuple(e.dtype for e in o)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_map_blocks.if_extra_argpairs__map_blocks.return.out": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_map_blocks.if_extra_argpairs__map_blocks.return.out", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 759, "end_line": 782, "span_ids": ["map_blocks"], "tokens": 211}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def map_blocks(\n func,\n *args,\n name=None,\n token=None,\n dtype=None,\n chunks=None,\n drop_axis=[],\n new_axis=None,\n meta=None,\n **kwargs,\n):\n # ... other code\n\n if extra_argpairs:\n # Rewrite the Blockwise layer. It would be nice to find a way to\n # avoid doing it twice, but it's currently needed to determine\n # out.chunks from the first pass. Since it constructs a Blockwise\n # rather than an expanded graph, it shouldn't be too expensive.\n out = blockwise(\n _pass_extra_kwargs,\n out_ind,\n func,\n None,\n tuple(extra_names),\n None,\n *concat(extra_argpairs),\n *concat(argpairs),\n name=out.name,\n dtype=out.dtype,\n concatenate=True,\n align_arrays=False,\n adjust_chunks=dict(zip(out_ind, out.chunks)),\n meta=meta,\n **kwargs,\n )\n\n return out", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_broadcast_chunks_broadcast_chunks.return.tuple_result_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_broadcast_chunks_broadcast_chunks.return.tuple_result_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 785, "end_line": 826, "span_ids": ["broadcast_chunks"], "tokens": 422}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def broadcast_chunks(*chunkss):\n \"\"\"Construct a chunks tuple that broadcasts many chunks tuples\n\n >>> a = ((5, 5),)\n >>> b = ((5, 5),)\n >>> broadcast_chunks(a, b)\n ((5, 5),)\n\n >>> a = ((10, 10, 10), (5, 5),)\n >>> b = ((5, 5),)\n >>> broadcast_chunks(a, b)\n ((10, 10, 10), (5, 5))\n\n >>> a = ((10, 10, 10), (5, 5),)\n >>> b = ((1,), (5, 5),)\n >>> broadcast_chunks(a, b)\n ((10, 10, 10), (5, 5))\n\n >>> a = ((10, 10, 10), (5, 5),)\n >>> b = ((3, 3,), (5, 5),)\n >>> broadcast_chunks(a, b)\n Traceback (most recent call last):\n ...\n ValueError: Chunks do not align: [(10, 10, 10), (3, 3)]\n \"\"\"\n if not chunkss:\n return ()\n elif len(chunkss) == 1:\n return chunkss[0]\n n = max(map(len, chunkss))\n chunkss2 = [((1,),) * (n - len(c)) + c for c in chunkss]\n result = []\n for i in range(n):\n step1 = [c[i] for c in chunkss2]\n if all(c == (1,) for c in step1):\n step2 = step1\n else:\n step2 = [c for c in step1 if c != (1,)]\n if len(set(step2)) != 1:\n raise ValueError(\"Chunks do not align: %s\" % str(step2))\n result.append(step2[0])\n return tuple(result)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_store_store.sources_dsk_1.Array___dask_optimize___": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_store_store.sources_dsk_1.Array___dask_optimize___", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 829, "end_line": 915, "span_ids": ["store"], "tokens": 768}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def store(\n sources,\n targets,\n lock=True,\n regions=None,\n compute=True,\n return_stored=False,\n **kwargs,\n):\n \"\"\"Store dask arrays in array-like objects, overwrite data in target\n\n This stores dask arrays into object that supports numpy-style setitem\n indexing. It stores values chunk by chunk so that it does not have to\n fill up memory. For best performance you can align the block size of\n the storage target with the block size of your array.\n\n If your data fits in memory then you may prefer calling\n ``np.array(myarray)`` instead.\n\n Parameters\n ----------\n\n sources: Array or iterable of Arrays\n targets: array-like or Delayed or iterable of array-likes and/or Delayeds\n These should support setitem syntax ``target[10:20] = ...``\n lock: boolean or threading.Lock, optional\n Whether or not to lock the data stores while storing.\n Pass True (lock each file individually), False (don't lock) or a\n particular ``threading.Lock`` object to be shared among all writes.\n regions: tuple of slices or list of tuples of slices\n Each ``region`` tuple in ``regions`` should be such that\n ``target[region].shape = source.shape``\n for the corresponding source and target in sources and targets,\n respectively. If this is a tuple, the contents will be assumed to be\n slices, so do not provide a tuple of tuples.\n compute: boolean, optional\n If true compute immediately, return ``dask.delayed.Delayed`` otherwise\n return_stored: boolean, optional\n Optionally return the stored result (default False).\n\n Examples\n --------\n >>> x = ... # doctest: +SKIP\n\n >>> import h5py # doctest: +SKIP\n >>> f = h5py.File('myfile.hdf5', mode='a') # doctest: +SKIP\n >>> dset = f.create_dataset('/data', shape=x.shape,\n ... chunks=x.chunks,\n ... dtype='f8') # doctest: +SKIP\n\n >>> store(x, dset) # doctest: +SKIP\n\n Alternatively store many arrays at the same time\n\n >>> store([x, y, z], [dset1, dset2, dset3]) # doctest: +SKIP\n \"\"\"\n\n if isinstance(sources, Array):\n sources = [sources]\n targets = [targets]\n\n if any(not isinstance(s, Array) for s in sources):\n raise ValueError(\"All sources must be dask array objects\")\n\n if len(sources) != len(targets):\n raise ValueError(\n \"Different number of sources [%d] and targets [%d]\"\n % (len(sources), len(targets))\n )\n\n if isinstance(regions, tuple) or regions is None:\n regions = [regions]\n\n if len(sources) > 1 and len(regions) == 1:\n regions *= len(sources)\n\n if len(sources) != len(regions):\n raise ValueError(\n \"Different number of sources [%d] and targets [%d] than regions [%d]\"\n % (len(sources), len(targets), len(regions))\n )\n\n # Optimize all sources together\n sources_dsk = HighLevelGraph.merge(*[e.__dask_graph__() for e in sources])\n sources_dsk = Array.__dask_optimize__(\n sources_dsk, list(core.flatten([e.__dask_keys__() for e in sources]))\n )\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_store.sources2_store.if_return_stored_.else_.if_compute_.else_.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_store.sources2_store.if_return_stored_.else_.if_compute_.else_.return.result", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 968, "end_line": 1024, "span_ids": ["store"], "tokens": 556}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def store(\n sources,\n targets,\n lock=True,\n regions=None,\n compute=True,\n return_stored=False,\n **kwargs,\n):\n # ... other code\n sources2 = [Array(sources_dsk, e.name, e.chunks, meta=e) for e in sources]\n\n # Optimize all targets together\n targets2 = []\n targets_keys = []\n targets_dsk = []\n for e in targets:\n if isinstance(e, Delayed):\n targets2.append(e.key)\n targets_keys.extend(e.__dask_keys__())\n targets_dsk.append(e.__dask_graph__())\n elif is_dask_collection(e):\n raise TypeError(\"Targets must be either Delayed objects or array-likes\")\n else:\n targets2.append(e)\n\n targets_dsk = HighLevelGraph.merge(*targets_dsk)\n targets_dsk = Delayed.__dask_optimize__(targets_dsk, targets_keys)\n\n load_stored = return_stored and not compute\n toks = [str(uuid.uuid1()) for _ in range(len(sources))]\n store_dsk = HighLevelGraph.merge(\n *[\n insert_to_ooc(s, t, lock, r, return_stored, load_stored, tok)\n for s, t, r, tok in zip(sources2, targets2, regions, toks)\n ]\n )\n store_keys = list(store_dsk.keys())\n\n store_dsk = HighLevelGraph.merge(store_dsk, targets_dsk, sources_dsk)\n store_dsk = HighLevelGraph.from_collections(id(store_dsk), dict(store_dsk))\n\n if return_stored:\n load_store_dsk = store_dsk\n if compute:\n store_dlyds = [Delayed(k, store_dsk) for k in store_keys]\n store_dlyds = persist(*store_dlyds, **kwargs)\n store_dsk_2 = HighLevelGraph.merge(*[e.dask for e in store_dlyds])\n\n load_store_dsk = retrieve_from_ooc(store_keys, store_dsk, store_dsk_2)\n\n result = tuple(\n Array(load_store_dsk, \"load-store-%s\" % t, s.chunks, meta=s)\n for s, t in zip(sources, toks)\n )\n\n return result\n else:\n name = \"store-\" + str(uuid.uuid1())\n dsk = HighLevelGraph.merge({name: store_keys}, store_dsk)\n result = Delayed(name, dsk)\n\n if compute:\n result.compute(**kwargs)\n return None\n else:\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_blockdims_from_blockshape_blockdims_from_blockshape.return.tuple_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_blockdims_from_blockshape_blockdims_from_blockshape.return.tuple_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 974, "end_line": 1000, "span_ids": ["blockdims_from_blockshape"], "tokens": 274}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def blockdims_from_blockshape(shape, chunks):\n \"\"\"\n\n >>> blockdims_from_blockshape((10, 10), (4, 3))\n ((4, 4, 2), (3, 3, 3, 1))\n >>> blockdims_from_blockshape((10, 0), (4, 0))\n ((4, 4, 2), (0,))\n \"\"\"\n if chunks is None:\n raise TypeError(\"Must supply chunks= keyword argument\")\n if shape is None:\n raise TypeError(\"Must supply shape= keyword argument\")\n if np.isnan(sum(shape)) or np.isnan(sum(chunks)):\n raise ValueError(\n \"Array chunk sizes are unknown. shape: %s, chunks: %s%s\"\n % (shape, chunks, unknown_chunk_message)\n )\n if not all(map(is_integer, chunks)):\n raise ValueError(\"chunks can only contain integers.\")\n if not all(map(is_integer, shape)):\n raise ValueError(\"shape can only contain integers.\")\n shape = tuple(map(int, shape))\n chunks = tuple(map(int, chunks))\n return tuple(\n ((bd,) * (d // bd) + ((d % bd,) if d % bd else ()) if d else (0,))\n for d, bd in zip(shape, chunks)\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_finalize_CHUNKS_NONE_ERROR_MESSAGE._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_finalize_CHUNKS_NONE_ERROR_MESSAGE._", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1003, "end_line": 1021, "span_ids": ["finalize", "impl:17"], "tokens": 120}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def finalize(results):\n if not results:\n return concatenate3(results)\n results2 = results\n while isinstance(results2, (tuple, list)):\n if len(results2) > 1:\n return concatenate3(results)\n else:\n results2 = results2[0]\n return unpack_singleton(results)\n\n\nCHUNKS_NONE_ERROR_MESSAGE = \"\"\"\nYou must specify a chunks= keyword argument.\nThis specifies the chunksize of your array blocks.\n\nSee the following documentation page for details:\n https://docs.dask.org/en/latest/array-creation.html#chunks\n\"\"\".strip()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array_Array.__slots__._dask__name__cached": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array_Array.__slots__._dask__name__cached", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1024, "end_line": 1053, "span_ids": ["Array"], "tokens": 200}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n \"\"\"Parallel Dask Array\n\n A parallel nd-array comprised of many numpy arrays arranged in a grid.\n\n This constructor is for advanced uses only. For normal use see the\n ``da.from_array`` function.\n\n Parameters\n ----------\n dask : dict\n Task dependency graph\n name : string\n Name of array in dask\n shape : tuple of ints\n Shape of the entire array\n chunks: iterable of tuples\n block sizes along each dimension\n dtype : str or dtype\n Typecode or data-type for the new Dask Array\n meta : empty ndarray\n empty ndarray created with same NumPy backend, ndim and dtype as the\n Dask Array being created (overrides dtype)\n\n See Also\n --------\n dask.array.from_array\n \"\"\"\n\n __slots__ = \"dask\", \"_name\", \"_cached_keys\", \"_chunks\", \"_meta\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__new___Array.__new__.return.self": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__new___Array.__new__.return.self", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1055, "end_line": 1084, "span_ids": ["Array.__new__"], "tokens": 242}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n def __new__(cls, dask, name, chunks, dtype=None, meta=None, shape=None):\n self = super(Array, cls).__new__(cls)\n assert isinstance(dask, Mapping)\n if not isinstance(dask, HighLevelGraph):\n dask = HighLevelGraph.from_collections(name, dask, dependencies=())\n self.dask = dask\n self.name = str(name)\n meta = meta_from_array(meta, dtype=dtype)\n\n if (\n isinstance(chunks, str)\n or isinstance(chunks, tuple)\n and chunks\n and any(isinstance(c, str) for c in chunks)\n ):\n dt = meta.dtype\n else:\n dt = None\n self._chunks = normalize_chunks(chunks, shape, dtype=dt)\n if self._chunks is None:\n raise ValueError(CHUNKS_NONE_ERROR_MESSAGE)\n\n self._meta = meta_from_array(meta, ndim=self.ndim, dtype=dtype)\n\n for plugin in config.get(\"array_plugins\", ()):\n result = plugin(self)\n if result is not None:\n self = result\n\n return self", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__reduce___Array.__dask_keys__.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__reduce___Array.__dask_keys__.return.result", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1086, "end_line": 1112, "span_ids": ["Array.__dask_layers__", "Array.__dask_keys__", "Array.__reduce__", "Array.__dask_graph__"], "tokens": 208}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n def __reduce__(self):\n return (Array, (self.dask, self.name, self.chunks, self.dtype))\n\n def __dask_graph__(self):\n return self.dask\n\n def __dask_layers__(self):\n return (self.name,)\n\n def __dask_keys__(self):\n if self._cached_keys is not None:\n return self._cached_keys\n\n name, chunks, numblocks = self.name, self.chunks, self.numblocks\n\n def keys(*args):\n if not chunks:\n return [(name,)]\n ind = len(args)\n if ind + 1 == len(numblocks):\n result = [(name,) + args + (i,) for i in range(numblocks[ind])]\n else:\n result = [keys(*(args + (i,))) for i in range(numblocks[ind])]\n return result\n\n self._cached_keys = result = keys()\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__dask_tokenize___Array.npartitions.return.reduce_mul_self_numblock": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__dask_tokenize___Array.npartitions.return.reduce_mul_self_numblock", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1114, "end_line": 1134, "span_ids": ["Array.numblocks", "Array.__dask_tokenize__", "Array:5", "Array.__dask_postpersist__", "Array.__dask_postcompute__", "Array.npartitions"], "tokens": 152}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n def __dask_tokenize__(self):\n return self.name\n\n __dask_optimize__ = globalmethod(\n optimize, key=\"array_optimize\", falsey=dont_optimize\n )\n __dask_scheduler__ = staticmethod(threaded.get)\n\n def __dask_postcompute__(self):\n return finalize, ()\n\n def __dask_postpersist__(self):\n return Array, (self.name, self.chunks, self.dtype, self._meta)\n\n @property\n def numblocks(self):\n return tuple(map(len, self.chunks))\n\n @property\n def npartitions(self):\n return reduce(mul, self.numblocks, 1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.compute_chunk_sizes_Array.compute_chunk_sizes.return.x": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.compute_chunk_sizes_Array.compute_chunk_sizes.return.x", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1136, "end_line": 1183, "span_ids": ["Array.compute_chunk_sizes"], "tokens": 375}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n def compute_chunk_sizes(self):\n \"\"\"\n Compute the chunk sizes for a Dask array. This is especially useful\n when the chunk sizes are unknown (e.g., when indexing one Dask array\n with another).\n\n Notes\n -----\n This function modifies the Dask array in-place.\n\n Examples\n --------\n >>> import dask.array as da\n >>> import numpy as np\n >>> x = da.from_array([-2, -1, 0, 1, 2], chunks=2)\n >>> x.chunks\n ((2, 2, 1),)\n >>> y = x[x <= 0]\n >>> y.chunks\n ((nan, nan, nan),)\n >>> y.compute_chunk_sizes() # in-place computation\n dask.array\n >>> y.chunks\n ((2, 1, 0),)\n\n \"\"\"\n x = self\n chunk_shapes = x.map_blocks(\n _get_chunk_shape,\n dtype=int,\n chunks=tuple(len(c) * (1,) for c in x.chunks) + ((x.ndim,),),\n new_axis=x.ndim,\n )\n\n c = []\n for i in range(x.ndim):\n s = x.ndim * [0] + [i]\n s[i] = slice(None)\n s = tuple(s)\n\n c.append(tuple(chunk_shapes[s]))\n\n # `map_blocks` assigns numpy dtypes\n # cast chunk dimensions back to python int before returning\n x._chunks = tuple(\n [tuple([int(chunk) for chunk in chunks]) for chunks in compute(tuple(c))[0]]\n )\n return x", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.shape_Array.__len__.return.sum_self_chunks_0_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.shape_Array.__len__.return.sum_self_chunks_0_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1185, "end_line": 1215, "span_ids": ["Array._get_chunks", "Array:9", "Array._set_chunks", "Array.dtype", "Array.chunksize", "Array.__len__", "Array.shape"], "tokens": 217}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n @property\n def shape(self):\n return tuple(cached_cumsum(c, initial_zero=True)[-1] for c in self.chunks)\n\n @property\n def chunksize(self):\n return tuple(max(c) for c in self.chunks)\n\n @property\n def dtype(self):\n return self._meta.dtype\n\n def _get_chunks(self):\n return self._chunks\n\n def _set_chunks(self, chunks):\n msg = (\n \"Can not set chunks directly\\n\\n\"\n \"Please use the rechunk method instead:\\n\"\n \" x.rechunk({})\\n\\n\"\n \"If trying to avoid unknown chunks, use\\n\"\n \" x.compute_chunk_sizes()\"\n )\n raise TypeError(msg.format(chunks))\n\n chunks = property(_get_chunks, _set_chunks, \"chunks property\")\n\n def __len__(self):\n if not self.chunks:\n raise TypeError(\"len() of unsized object\")\n return sum(self.chunks[0])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__array_ufunc___Array.__array_ufunc__.if_method___call___.else_.return.NotImplemented": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__array_ufunc___Array.__array_ufunc__.if_method___call___.else_.return.NotImplemented", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1270, "end_line": 1307, "span_ids": ["Array.__array_ufunc__"], "tokens": 297}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n def __array_ufunc__(self, numpy_ufunc, method, *inputs, **kwargs):\n out = kwargs.get(\"out\", ())\n for x in inputs + out:\n if _should_delegate(x):\n return NotImplemented\n\n if method == \"__call__\":\n if numpy_ufunc is np.matmul:\n from .routines import matmul\n\n # special case until apply_gufunc handles optional dimensions\n return matmul(*inputs, **kwargs)\n if numpy_ufunc.signature is not None:\n from .gufunc import apply_gufunc\n\n return apply_gufunc(\n numpy_ufunc, numpy_ufunc.signature, *inputs, **kwargs\n )\n if numpy_ufunc.nout > 1:\n from . import ufunc\n\n try:\n da_ufunc = getattr(ufunc, numpy_ufunc.__name__)\n except AttributeError:\n return NotImplemented\n return da_ufunc(*inputs, **kwargs)\n else:\n return elemwise(numpy_ufunc, *inputs, **kwargs)\n elif method == \"outer\":\n from . import ufunc\n\n try:\n da_ufunc = getattr(ufunc, numpy_ufunc.__name__)\n except AttributeError:\n return NotImplemented\n return da_ufunc.outer(*inputs, **kwargs)\n else:\n return NotImplemented", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__repr___Array._repr_html_.return._n_join_both_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__repr___Array._repr_html_.return._n_join_both_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1257, "end_line": 1294, "span_ids": ["Array._repr_html_", "Array.__repr__"], "tokens": 270}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n def __repr__(self):\n \"\"\"\n\n >>> import dask.array as da\n >>> da.ones((10, 10), chunks=(5, 5), dtype='i4')\n dask.array<..., shape=(10, 10), dtype=int32, chunksize=(5, 5), chunktype=numpy.ndarray>\n \"\"\"\n chunksize = str(self.chunksize)\n name = self.name.rsplit(\"-\", 1)[0]\n return \"dask.array<%s, shape=%s, dtype=%s, chunksize=%s, chunktype=%s.%s>\" % (\n name,\n self.shape,\n self.dtype,\n chunksize,\n type(self._meta).__module__.split(\".\")[0],\n type(self._meta).__name__,\n )\n\n def _repr_html_(self):\n table = self._repr_html_table()\n try:\n grid = self.to_svg(size=config.get(\"array.svg.size\", 120))\n except NotImplementedError:\n grid = \"\"\n\n both = [\n \"\",\n \"\",\n \"\",\n \"\",\n \"\",\n \"
\",\n table,\n \"\",\n grid,\n \"
\",\n ]\n return \"\\n\".join(both)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array._repr_html_table_Array._repr_html_table.return._n_join_table_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array._repr_html_table_Array._repr_html_table.return._n_join_table_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1296, "end_line": 1330, "span_ids": ["Array._repr_html_table"], "tokens": 357}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n def _repr_html_table(self):\n if \"sparse\" in typename(type(self._meta)):\n nbytes = None\n cbytes = None\n elif not math.isnan(self.nbytes):\n nbytes = format_bytes(self.nbytes)\n cbytes = format_bytes(np.prod(self.chunksize) * self.dtype.itemsize)\n else:\n nbytes = \"unknown\"\n cbytes = \"unknown\"\n\n table = [\n \"\",\n \" \",\n \" \",\n \" \",\n \" \",\n \" \"\n % (nbytes, cbytes)\n if nbytes is not None\n else \"\",\n \" \"\n % (str(self.shape), str(self.chunksize)),\n \" \"\n % (len(self.__dask_graph__()), self.npartitions),\n \" \"\n % (\n self.dtype,\n type(self._meta).__module__.split(\".\")[0],\n type(self._meta).__name__,\n ),\n \" \",\n \"
Array Chunk
Bytes %s %s
Shape %s %s
Count %d Tasks %d Chunks
Type %s %s.%s
\",\n ]\n return \"\\n\".join(table)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.ndim_Array.__array__.return.x": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.ndim_Array.__array__.return.x", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1332, "end_line": 1369, "span_ids": ["Array.name_26", "Array.ndim", "Array.__array__", "Array.size", "Array.name", "Array.itemsize", "Array:11", "Array.nbytes"], "tokens": 232}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n @property\n def ndim(self):\n return len(self.shape)\n\n @property\n def size(self):\n \"\"\" Number of elements in array \"\"\"\n return reduce(mul, self.shape, 1)\n\n @property\n def nbytes(self):\n \"\"\" Number of bytes in array \"\"\"\n return self.size * self.dtype.itemsize\n\n @property\n def itemsize(self):\n \"\"\" Length of one array element in bytes \"\"\"\n return self.dtype.itemsize\n\n @property\n def name(self):\n return self._name\n\n @name.setter\n def name(self, val):\n self._name = val\n # Clear the key cache when the name is reset\n self._cached_keys = None\n\n __array_priority__ = 11 # higher than numpy.ndarray and numpy.matrix\n\n def __array__(self, dtype=None, **kwargs):\n x = self.compute()\n if dtype and x.dtype != dtype:\n x = x.astype(dtype)\n if not isinstance(x, np.ndarray):\n x = np.array(x)\n return x", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__array_function___Array.__array_function__.handle_nonmatching_names.return._HANDLED_FUNCTIONS_func_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__array_function___Array.__array_function__.handle_nonmatching_names.return._HANDLED_FUNCTIONS_func_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1371, "end_line": 1392, "span_ids": ["Array.__array_function__"], "tokens": 221}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n def __array_function__(self, func, types, args, kwargs):\n import dask.array as module\n\n def handle_nonmatching_names(func, args, kwargs):\n if func not in _HANDLED_FUNCTIONS:\n warnings.warn(\n \"The `{}` function is not implemented by Dask array. \"\n \"You may want to use the da.map_blocks function \"\n \"or something similar to silence this warning. \"\n \"Your code may stop working in a future release.\".format(\n func.__module__ + \".\" + func.__name__\n ),\n FutureWarning,\n )\n # Need to convert to array object (e.g. numpy.ndarray or\n # cupy.ndarray) as needed, so we can call the NumPy function\n # again and it gets the chance to dispatch to the right\n # implementation.\n args, kwargs = compute(args, kwargs)\n return func(*args, **kwargs)\n\n return _HANDLED_FUNCTIONS[func](*args, **kwargs)\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__array_function__._First_verify_that_all__Array.__array_function__.return.da_func_args_kwargs_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__array_function__._First_verify_that_all__Array.__array_function__.return.da_func_args_kwargs_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1394, "end_line": 1413, "span_ids": ["Array.__array_function__"], "tokens": 226}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n def __array_function__(self, func, types, args, kwargs):\n\n # First, verify that all types are handled by Dask. Otherwise, return NotImplemented.\n if not all(type is Array or is_valid_chunk_type(type) for type in types):\n return NotImplemented\n\n # Now try to find a matching function name. If that doesn't work, we may\n # be dealing with an alias or a function that's simply not in the Dask API.\n # Handle aliases via the _HANDLED_FUNCTIONS dict mapping, and warn otherwise.\n for submodule in func.__module__.split(\".\")[1:]:\n try:\n module = getattr(module, submodule)\n except AttributeError:\n return handle_nonmatching_names(func, args, kwargs)\n\n if not hasattr(module, func.__name__):\n return handle_nonmatching_names(func, args, kwargs)\n\n da_func = getattr(module, func.__name__)\n if da_func is func:\n return handle_nonmatching_names(func, args, kwargs)\n return da_func(*args, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array._elemwise_Array.to_svg.return.svg_self_chunks_size_siz": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array._elemwise_Array.to_svg.return.svg_self_chunks_size_siz", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1415, "end_line": 1447, "span_ids": ["Array.to_svg", "Array.store", "Array._elemwise"], "tokens": 180}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n @property\n def _elemwise(self):\n return elemwise\n\n @wraps(store)\n def store(self, target, **kwargs):\n r = store([self], [target], **kwargs)\n\n if kwargs.get(\"return_stored\", False):\n r = r[0]\n\n return r\n\n def to_svg(self, size=500):\n \"\"\"Convert chunks from Dask Array into an SVG Image\n\n Parameters\n ----------\n chunks: tuple\n size: int\n Rough size of the image\n\n Examples\n --------\n >>> x.to_svg(size=500) # doctest: +SKIP\n\n Returns\n -------\n text: An svg string depicting the array as a grid of chunks\n \"\"\"\n from .svg import svg\n\n return svg(self.chunks, size=size)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.to_hdf5_Array.to_hdf5.return.to_hdf5_filename_datapat": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.to_hdf5_Array.to_hdf5.return.to_hdf5_filename_datapat", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1449, "end_line": 1463, "span_ids": ["Array.to_hdf5"], "tokens": 140}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n def to_hdf5(self, filename, datapath, **kwargs):\n \"\"\"Store array in HDF5 file\n\n >>> x.to_hdf5('myfile.hdf5', '/x') # doctest: +SKIP\n\n Optionally provide arguments as though to ``h5py.File.create_dataset``\n\n >>> x.to_hdf5('myfile.hdf5', '/x', compression='lzf', shuffle=True) # doctest: +SKIP\n\n See Also\n --------\n da.store\n h5py.File.create_dataset\n \"\"\"\n return to_hdf5(filename, datapath, self, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.to_dask_dataframe_Array.to_dask_dataframe.return.from_dask_array_self_col": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.to_dask_dataframe_Array.to_dask_dataframe.return.from_dask_array_self_col", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1465, "end_line": 1494, "span_ids": ["Array.to_dask_dataframe"], "tokens": 284}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n def to_dask_dataframe(self, columns=None, index=None, meta=None):\n \"\"\"Convert dask Array to dask Dataframe\n\n Parameters\n ----------\n columns: list or string\n list of column names if DataFrame, single string if Series\n index : dask.dataframe.Index, optional\n An optional *dask* Index to use for the output Series or DataFrame.\n\n The default output index depends on whether the array has any unknown\n chunks. If there are any unknown chunks, the output has ``None``\n for all the divisions (one per chunk). If all the chunks are known,\n a default index with known divsions is created.\n\n Specifying ``index`` can be useful if you're conforming a Dask Array\n to an existing dask Series or DataFrame, and you would like the\n indices to match.\n meta : object, optional\n An optional `meta` parameter can be passed for dask\n to specify the concrete dataframe type to use for partitions of\n the Dask dataframe. By default, pandas DataFrame is used.\n\n See Also\n --------\n dask.dataframe.from_dask_array\n \"\"\"\n from ..dataframe import from_dask_array\n\n return from_dask_array(self, columns=columns, index=index, meta=meta)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__bool___Array.__complex__.return.self__scalarfunc_complex_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__bool___Array.__complex__.return.self__scalarfunc_complex_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1496, "end_line": 1522, "span_ids": ["Array.__int__", "Array._scalarfunc", "Array.__float__", "Array:13", "Array.__complex__", "Array.__bool__", "Array:15"], "tokens": 189}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n def __bool__(self):\n if self.size > 1:\n raise ValueError(\n \"The truth value of a {0} is ambiguous. \"\n \"Use a.any() or a.all().\".format(self.__class__.__name__)\n )\n else:\n return bool(self.compute())\n\n __nonzero__ = __bool__ # python 2\n\n def _scalarfunc(self, cast_type):\n if self.size > 1:\n raise TypeError(\"Only length-1 arrays can be converted to Python scalars\")\n else:\n return cast_type(self.compute())\n\n def __int__(self):\n return self._scalarfunc(int)\n\n __long__ = __int__ # python 2\n\n def __float__(self):\n return self._scalarfunc(float)\n\n def __complex__(self):\n return self._scalarfunc(complex)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__setitem___Array.__setitem__.if_isinstance_key_Array_.else_.raise_NotImplementedError": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__setitem___Array.__setitem__.if_isinstance_key_Array_.else_.raise_NotImplementedError", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1576, "end_line": 1599, "span_ids": ["Array.__setitem__"], "tokens": 209}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n def __setitem__(self, key, value):\n from .routines import where\n\n if isinstance(key, Array):\n if isinstance(value, Array) and value.ndim > 1:\n raise ValueError(\"boolean index array should have 1 dimension\")\n try:\n y = where(key, value, self)\n except ValueError as e:\n raise ValueError(\n \"Boolean index assignment in Dask \"\n \"expects equally shaped arrays.\\nExample: da1[da2] = da3 \"\n \"where da1.shape == (4,), da2.shape == (4,) \"\n \"and da3.shape == (4,).\"\n ) from e\n self._meta = y._meta\n self.dask = y.dask\n self.name = y.name\n self._chunks = y.chunks\n return self\n else:\n raise NotImplementedError(\n \"Item assignment with %s not supported\" % type(key)\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__getitem___Array.__getitem__.return.Array_graph_out_chunks_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__getitem___Array.__getitem__.return.Array_graph_out_chunks_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1541, "end_line": 1592, "span_ids": ["Array.__getitem__"], "tokens": 459}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n def __getitem__(self, index):\n # Field access, e.g. x['a'] or x[['a', 'b']]\n if isinstance(index, str) or (\n isinstance(index, list) and index and all(isinstance(i, str) for i in index)\n ):\n if isinstance(index, str):\n dt = self.dtype[index]\n else:\n dt = _make_sliced_dtype(self.dtype, index)\n\n if dt.shape:\n new_axis = list(range(self.ndim, self.ndim + len(dt.shape)))\n chunks = self.chunks + tuple((i,) for i in dt.shape)\n return self.map_blocks(\n getitem, index, dtype=dt.base, chunks=chunks, new_axis=new_axis\n )\n else:\n return self.map_blocks(getitem, index, dtype=dt)\n\n if not isinstance(index, tuple):\n index = (index,)\n\n from .slicing import (\n normalize_index,\n slice_with_int_dask_array,\n slice_with_bool_dask_array,\n )\n\n index2 = normalize_index(index, self.shape)\n dependencies = {self.name}\n for i in index2:\n if isinstance(i, Array):\n dependencies.add(i.name)\n\n if any(isinstance(i, Array) and i.dtype.kind in \"iu\" for i in index2):\n self, index2 = slice_with_int_dask_array(self, index2)\n if any(isinstance(i, Array) and i.dtype == bool for i in index2):\n self, index2 = slice_with_bool_dask_array(self, index2)\n\n if all(isinstance(i, slice) and i == slice(None) for i in index2):\n return self\n\n out = \"getitem-\" + tokenize(self, index2)\n dsk, chunks = slice_array(out, self.name, self.chunks, index2, self.itemsize)\n\n graph = HighLevelGraph.from_collections(out, dsk, dependencies=[self])\n\n meta = meta_from_array(self._meta, ndim=len(chunks))\n if np.isscalar(meta):\n meta = np.array(meta)\n\n return Array(graph, out, chunks, meta=meta)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array._vindex_Array._vindex.return._vindex_self_key_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array._vindex_Array._vindex.return._vindex_self_key_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1594, "end_line": 1612, "span_ids": ["Array._vindex"], "tokens": 186}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n def _vindex(self, key):\n if not isinstance(key, tuple):\n key = (key,)\n if any(k is None for k in key):\n raise IndexError(\n \"vindex does not support indexing with None (np.newaxis), \"\n \"got {}\".format(key)\n )\n if all(isinstance(k, slice) for k in key):\n if all(\n k.indices(d) == slice(0, d).indices(d) for k, d in zip(key, self.shape)\n ):\n return self\n raise IndexError(\n \"vindex requires at least one non-slice to vectorize over \"\n \"when the slices are not over the entire array (i.e, x[:]). \"\n \"Use normal slicing instead when only using slices. Got: {}\".format(key)\n )\n return _vindex(self, *key)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.vindex_Array.vindex.return.IndexCallable_self__vinde": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.vindex_Array.vindex.return.IndexCallable_self__vinde", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1674, "end_line": 1696, "span_ids": ["Array.vindex"], "tokens": 251}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n @property\n def vindex(self):\n \"\"\"Vectorized indexing with broadcasting.\n\n This is equivalent to numpy's advanced indexing, using arrays that are\n broadcast against each other. This allows for pointwise indexing:\n\n >>> import dask.array as da\n >>> x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\n >>> x = da.from_array(x, chunks=2)\n >>> x.vindex[[0, 1, 2], [0, 1, 2]].compute()\n array([1, 5, 9])\n\n Mixed basic/advanced indexing with slices/arrays is also supported. The\n order of dimensions in the result follows those proposed for\n `ndarray.vindex `_:\n the subspace spanned by arrays is followed by all slices.\n\n Note: ``vindex`` provides more general functionality than standard\n indexing, but it also has fewer optimizations and can be significantly\n slower.\n \"\"\"\n return IndexCallable(self._vindex)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array._blocks_Array._blocks.return.Array_graph_name_chunks": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array._blocks_Array._blocks.return.Array_graph_name_chunks", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1637, "end_line": 1662, "span_ids": ["Array._blocks"], "tokens": 257}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n def _blocks(self, index):\n from .slicing import normalize_index\n\n if not isinstance(index, tuple):\n index = (index,)\n if sum(isinstance(ind, (np.ndarray, list)) for ind in index) > 1:\n raise ValueError(\"Can only slice with a single list\")\n if any(ind is None for ind in index):\n raise ValueError(\"Slicing with np.newaxis or None is not supported\")\n index = normalize_index(index, self.numblocks)\n index = tuple(slice(k, k + 1) if isinstance(k, Number) else k for k in index)\n\n name = \"blocks-\" + tokenize(self, index)\n\n new_keys = np.array(self.__dask_keys__(), dtype=object)[index]\n\n chunks = tuple(\n tuple(np.array(c)[i].tolist()) for c, i in zip(self.chunks, index)\n )\n\n keys = product(*(range(len(c)) for c in chunks))\n\n layer = {(name,) + key: tuple(new_keys[key].tolist()) for key in keys}\n\n graph = HighLevelGraph.from_collections(name, layer, dependencies=[self])\n return Array(graph, name, chunks, meta=self)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.blocks_Array.blocks.return.IndexCallable_self__block": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.blocks_Array.blocks.return.IndexCallable_self__block", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1664, "end_line": 1696, "span_ids": ["Array.blocks"], "tokens": 327}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n @property\n def blocks(self):\n \"\"\"Slice an array by blocks\n\n This allows blockwise slicing of a Dask array. You can perform normal\n Numpy-style slicing but now rather than slice elements of the array you\n slice along blocks so, for example, ``x.blocks[0, ::2]`` produces a new\n dask array with every other block in the first row of blocks.\n\n You can index blocks in any way that could index a numpy array of shape\n equal to the number of blocks in each dimension, (available as\n array.numblocks). The dimension of the output array will be the same\n as the dimension of this array, even if integer indices are passed.\n This does not support slicing with ``np.newaxis`` or multiple lists.\n\n Examples\n --------\n >>> import dask.array as da\n >>> x = da.arange(10, chunks=2)\n >>> x.blocks[0].compute()\n array([0, 1])\n >>> x.blocks[:3].compute()\n array([0, 1, 2, 3, 4, 5])\n >>> x.blocks[::2].compute()\n array([0, 1, 4, 5, 8, 9])\n >>> x.blocks[[-1, 0]].compute()\n array([8, 9, 0, 1])\n\n Returns\n -------\n A Dask array\n \"\"\"\n return IndexCallable(self._blocks)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.partitions_Array.partitions.return.self_blocks": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.partitions_Array.partitions.return.self_blocks", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1698, "end_line": 1735, "span_ids": ["Array.partitions"], "tokens": 381}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n @property\n def partitions(self):\n \"\"\"Slice an array by partitions. Alias of dask array .blocks attribute.\n\n This alias allows you to write agnostic code that works with both\n dask arrays and dask dataframes.\n\n This allows blockwise slicing of a Dask array. You can perform normal\n Numpy-style slicing but now rather than slice elements of the array you\n slice along blocks so, for example, ``x.blocks[0, ::2]`` produces a new\n dask array with every other block in the first row of blocks.\n\n You can index blocks in any way that could index a numpy array of shape\n equal to the number of blocks in each dimension, (available as\n array.numblocks). The dimension of the output array will be the same\n as the dimension of this array, even if integer indices are passed.\n This does not support slicing with ``np.newaxis`` or multiple lists.\n\n Examples\n --------\n >>> import dask.array as da\n >>> x = da.arange(10, chunks=2)\n >>> x.partitions[0].compute()\n array([0, 1])\n >>> x.partitions[:3].compute()\n array([0, 1, 2, 3, 4, 5])\n >>> x.partitions[::2].compute()\n array([0, 1, 4, 5, 8, 9])\n >>> x.partitions[[-1, 0]].compute()\n array([8, 9, 0, 1])\n >>> all(x.partitions[:].compute() == x.blocks[:].compute())\n True\n\n Returns\n -------\n A Dask array\n \"\"\"\n return self.blocks", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.dot_Array.argtopk.return.argtopk_self_k_axis_axi": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.dot_Array.argtopk.return.argtopk_self_k_axis_axi", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1737, "end_line": 1801, "span_ids": ["Array.T", "Array.argtopk", "Array.dot", "Array.choose", "Array.reshape", "Array.A", "Array.topk", "Array.transpose", "Array.ravel", "Array:17"], "tokens": 439}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n @derived_from(np.ndarray)\n def dot(self, other):\n from .routines import tensordot\n\n return tensordot(self, other, axes=((self.ndim - 1,), (other.ndim - 2,)))\n\n @property\n def A(self):\n return self\n\n @property\n def T(self):\n return self.transpose()\n\n @derived_from(np.ndarray)\n def transpose(self, *axes):\n from .routines import transpose\n\n if not axes:\n axes = None\n elif len(axes) == 1 and isinstance(axes[0], Iterable):\n axes = axes[0]\n if (axes == tuple(range(self.ndim))) or (axes == tuple(range(-self.ndim, 0))):\n # no transpose necessary\n return self\n else:\n return transpose(self, axes=axes)\n\n @derived_from(np.ndarray)\n def ravel(self):\n from .routines import ravel\n\n return ravel(self)\n\n flatten = ravel\n\n @derived_from(np.ndarray)\n def choose(self, choices):\n from .routines import choose\n\n return choose(self, choices)\n\n @derived_from(np.ndarray)\n def reshape(self, *shape):\n from .reshape import reshape\n\n if len(shape) == 1 and not isinstance(shape[0], Number):\n shape = shape[0]\n return reshape(self, shape)\n\n def topk(self, k, axis=-1, split_every=None):\n \"\"\"The top k elements of an array.\n\n See ``da.topk`` for docstring\"\"\"\n from .reductions import topk\n\n return topk(self, k, axis=axis, split_every=split_every)\n\n def argtopk(self, k, axis=-1, split_every=None):\n \"\"\"The indices of the top k elements of an array.\n\n See ``da.argtopk`` for docstring\"\"\"\n from .reductions import argtopk\n\n return argtopk(self, k, axis=axis, split_every=split_every)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.astype_Array.astype.return.self_map_blocks_chunk_ast": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.astype_Array.astype.return.self_map_blocks_chunk_ast", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1803, "end_line": 1843, "span_ids": ["Array.astype"], "tokens": 425}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n def astype(self, dtype, **kwargs):\n \"\"\"Copy of the array, cast to a specified type.\n\n Parameters\n ----------\n dtype : str or dtype\n Typecode or data-type to which the array is cast.\n casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional\n Controls what kind of data casting may occur. Defaults to 'unsafe'\n for backwards compatibility.\n\n * 'no' means the data types should not be cast at all.\n * 'equiv' means only byte-order changes are allowed.\n * 'safe' means only casts which can preserve values are allowed.\n * 'same_kind' means only safe casts or casts within a kind,\n like float64 to float32, are allowed.\n * 'unsafe' means any data conversions may be done.\n copy : bool, optional\n By default, astype always returns a newly allocated array. If this\n is set to False and the `dtype` requirement is satisfied, the input\n array is returned instead of a copy.\n \"\"\"\n # Scalars don't take `casting` or `copy` kwargs - as such we only pass\n # them to `map_blocks` if specified by user (different than defaults).\n extra = set(kwargs) - {\"casting\", \"copy\"}\n if extra:\n raise TypeError(\n \"astype does not take the following keyword \"\n \"arguments: {0!s}\".format(list(extra))\n )\n casting = kwargs.get(\"casting\", \"unsafe\")\n dtype = np.dtype(dtype)\n if self.dtype == dtype:\n return self\n elif not np.can_cast(self.dtype, dtype, casting=casting):\n raise TypeError(\n \"Cannot cast array from {0!r} to {1!r}\"\n \" according to the rule \"\n \"{2!r}\".format(self.dtype, dtype, casting)\n )\n return self.map_blocks(chunk.astype, dtype=dtype, astype_dtype=dtype, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__abs___Array.__sub__.return.elemwise_operator_sub_se": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__abs___Array.__sub__.return.elemwise_operator_sub_se", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1845, "end_line": 1955, "span_ids": ["Array.__ne__", "Array.__invert__", "Array.__rand__", "Array.__rmod__", "Array.__radd__", "Array.__lshift__", "Array.__rpow__", "Array.__ror__", "Array.__rdiv__", "Array.__le__", "Array.__mod__", "Array.__eq__", "Array.__div__", "Array.__rlshift__", "Array.__sub__", "Array.__mul__", "Array.__add__", "Array.__and__", "Array.__neg__", "Array.__ge__", "Array.__lt__", "Array.__gt__", "Array.__pos__", "Array.__rshift__", "Array.__rmul__", "Array.__rrshift__", "Array.__pow__", "Array.__abs__", "Array.__or__"], "tokens": 805}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n def __abs__(self):\n return elemwise(operator.abs, self)\n\n @check_if_handled_given_other\n def __add__(self, other):\n return elemwise(operator.add, self, other)\n\n @check_if_handled_given_other\n def __radd__(self, other):\n return elemwise(operator.add, other, self)\n\n @check_if_handled_given_other\n def __and__(self, other):\n return elemwise(operator.and_, self, other)\n\n @check_if_handled_given_other\n def __rand__(self, other):\n return elemwise(operator.and_, other, self)\n\n @check_if_handled_given_other\n def __div__(self, other):\n return elemwise(operator.div, self, other)\n\n @check_if_handled_given_other\n def __rdiv__(self, other):\n return elemwise(operator.div, other, self)\n\n @check_if_handled_given_other\n def __eq__(self, other):\n return elemwise(operator.eq, self, other)\n\n @check_if_handled_given_other\n def __gt__(self, other):\n return elemwise(operator.gt, self, other)\n\n @check_if_handled_given_other\n def __ge__(self, other):\n return elemwise(operator.ge, self, other)\n\n def __invert__(self):\n return elemwise(operator.invert, self)\n\n @check_if_handled_given_other\n def __lshift__(self, other):\n return elemwise(operator.lshift, self, other)\n\n @check_if_handled_given_other\n def __rlshift__(self, other):\n return elemwise(operator.lshift, other, self)\n\n @check_if_handled_given_other\n def __lt__(self, other):\n return elemwise(operator.lt, self, other)\n\n @check_if_handled_given_other\n def __le__(self, other):\n return elemwise(operator.le, self, other)\n\n @check_if_handled_given_other\n def __mod__(self, other):\n return elemwise(operator.mod, self, other)\n\n @check_if_handled_given_other\n def __rmod__(self, other):\n return elemwise(operator.mod, other, self)\n\n @check_if_handled_given_other\n def __mul__(self, other):\n return elemwise(operator.mul, self, other)\n\n @check_if_handled_given_other\n def __rmul__(self, other):\n return elemwise(operator.mul, other, self)\n\n @check_if_handled_given_other\n def __ne__(self, other):\n return elemwise(operator.ne, self, other)\n\n def __neg__(self):\n return elemwise(operator.neg, self)\n\n @check_if_handled_given_other\n def __or__(self, other):\n return elemwise(operator.or_, self, other)\n\n def __pos__(self):\n return self\n\n @check_if_handled_given_other\n def __ror__(self, other):\n return elemwise(operator.or_, other, self)\n\n @check_if_handled_given_other\n def __pow__(self, other):\n return elemwise(operator.pow, self, other)\n\n @check_if_handled_given_other\n def __rpow__(self, other):\n return elemwise(operator.pow, other, self)\n\n @check_if_handled_given_other\n def __rshift__(self, other):\n return elemwise(operator.rshift, self, other)\n\n @check_if_handled_given_other\n def __rrshift__(self, other):\n return elemwise(operator.rshift, other, self)\n\n @check_if_handled_given_other\n def __sub__(self, other):\n return elemwise(operator.sub, self, other)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__rsub___Array.sum.return.sum_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__rsub___Array.sum.return.sum_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1957, "end_line": 2056, "span_ids": ["Array.__rxor__", "Array.argmin", "Array.__matmul__", "Array.__rdivmod__", "Array.all", "Array.argmax", "Array.sum", "Array.__rfloordiv__", "Array.__truediv__", "Array.__floordiv__", "Array.__xor__", "Array.any", "Array.__rtruediv__", "Array.min", "Array.__divmod__", "Array.__rsub__", "Array.max", "Array.__rmatmul__"], "tokens": 795}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n @check_if_handled_given_other\n def __rsub__(self, other):\n return elemwise(operator.sub, other, self)\n\n @check_if_handled_given_other\n def __truediv__(self, other):\n return elemwise(operator.truediv, self, other)\n\n @check_if_handled_given_other\n def __rtruediv__(self, other):\n return elemwise(operator.truediv, other, self)\n\n @check_if_handled_given_other\n def __floordiv__(self, other):\n return elemwise(operator.floordiv, self, other)\n\n @check_if_handled_given_other\n def __rfloordiv__(self, other):\n return elemwise(operator.floordiv, other, self)\n\n @check_if_handled_given_other\n def __xor__(self, other):\n return elemwise(operator.xor, self, other)\n\n @check_if_handled_given_other\n def __rxor__(self, other):\n return elemwise(operator.xor, other, self)\n\n @check_if_handled_given_other\n def __matmul__(self, other):\n from .routines import matmul\n\n return matmul(self, other)\n\n @check_if_handled_given_other\n def __rmatmul__(self, other):\n from .routines import matmul\n\n return matmul(other, self)\n\n @check_if_handled_given_other\n def __divmod__(self, other):\n from .ufunc import divmod\n\n return divmod(self, other)\n\n @check_if_handled_given_other\n def __rdivmod__(self, other):\n from .ufunc import divmod\n\n return divmod(other, self)\n\n @derived_from(np.ndarray)\n def any(self, axis=None, keepdims=False, split_every=None, out=None):\n from .reductions import any\n\n return any(self, axis=axis, keepdims=keepdims, split_every=split_every, out=out)\n\n @derived_from(np.ndarray)\n def all(self, axis=None, keepdims=False, split_every=None, out=None):\n from .reductions import all\n\n return all(self, axis=axis, keepdims=keepdims, split_every=split_every, out=out)\n\n @derived_from(np.ndarray)\n def min(self, axis=None, keepdims=False, split_every=None, out=None):\n from .reductions import min\n\n return min(self, axis=axis, keepdims=keepdims, split_every=split_every, out=out)\n\n @derived_from(np.ndarray)\n def max(self, axis=None, keepdims=False, split_every=None, out=None):\n from .reductions import max\n\n return max(self, axis=axis, keepdims=keepdims, split_every=split_every, out=out)\n\n @derived_from(np.ndarray)\n def argmin(self, axis=None, split_every=None, out=None):\n from .reductions import argmin\n\n return argmin(self, axis=axis, split_every=split_every, out=out)\n\n @derived_from(np.ndarray)\n def argmax(self, axis=None, split_every=None, out=None):\n from .reductions import argmax\n\n return argmax(self, axis=axis, split_every=split_every, out=out)\n\n @derived_from(np.ndarray)\n def sum(self, axis=None, dtype=None, keepdims=False, split_every=None, out=None):\n from .reductions import sum\n\n return sum(\n self,\n axis=axis,\n dtype=dtype,\n keepdims=keepdims,\n split_every=split_every,\n out=out,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.trace_Array.var.return.var_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.trace_Array.var.return.var_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2058, "end_line": 2120, "span_ids": ["Array.std", "Array.trace", "Array.var", "Array.mean", "Array.prod"], "tokens": 402}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n @derived_from(np.ndarray)\n def trace(self, offset=0, axis1=0, axis2=1, dtype=None):\n from .reductions import trace\n\n return trace(self, offset=offset, axis1=axis1, axis2=axis2, dtype=dtype)\n\n @derived_from(np.ndarray)\n def prod(self, axis=None, dtype=None, keepdims=False, split_every=None, out=None):\n from .reductions import prod\n\n return prod(\n self,\n axis=axis,\n dtype=dtype,\n keepdims=keepdims,\n split_every=split_every,\n out=out,\n )\n\n @derived_from(np.ndarray)\n def mean(self, axis=None, dtype=None, keepdims=False, split_every=None, out=None):\n from .reductions import mean\n\n return mean(\n self,\n axis=axis,\n dtype=dtype,\n keepdims=keepdims,\n split_every=split_every,\n out=out,\n )\n\n @derived_from(np.ndarray)\n def std(\n self, axis=None, dtype=None, keepdims=False, ddof=0, split_every=None, out=None\n ):\n from .reductions import std\n\n return std(\n self,\n axis=axis,\n dtype=dtype,\n keepdims=keepdims,\n ddof=ddof,\n split_every=split_every,\n out=out,\n )\n\n @derived_from(np.ndarray)\n def var(\n self, axis=None, dtype=None, keepdims=False, ddof=0, split_every=None, out=None\n ):\n from .reductions import var\n\n return var(\n self,\n axis=axis,\n dtype=dtype,\n keepdims=keepdims,\n ddof=ddof,\n split_every=split_every,\n out=out,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.moment_Array.map_blocks.return.map_blocks_func_self_a": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.moment_Array.map_blocks.return.map_blocks_func_self_a", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2122, "end_line": 2181, "span_ids": ["Array.moment", "Array.map_blocks"], "tokens": 415}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n def moment(\n self,\n order,\n axis=None,\n dtype=None,\n keepdims=False,\n ddof=0,\n split_every=None,\n out=None,\n ):\n \"\"\"Calculate the nth centralized moment.\n\n Parameters\n ----------\n order : int\n Order of the moment that is returned, must be >= 2.\n axis : int, optional\n Axis along which the central moment is computed. The default is to\n compute the moment of the flattened array.\n dtype : data-type, optional\n Type to use in computing the moment. For arrays of integer type the\n default is float64; for arrays of float types it is the same as the\n array type.\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left in the\n result as dimensions with size one. With this option, the result\n will broadcast correctly against the original array.\n ddof : int, optional\n \"Delta Degrees of Freedom\": the divisor used in the calculation is\n N - ddof, where N represents the number of elements. By default\n ddof is zero.\n\n Returns\n -------\n moment : ndarray\n\n References\n ----------\n .. [1] Pebay, Philippe (2008), \"Formulas for Robust, One-Pass Parallel\n Computation of Covariances and Arbitrary-Order Statistical Moments\",\n Technical Report SAND2008-6212, Sandia National Laboratories.\n\n \"\"\"\n\n from .reductions import moment\n\n return moment(\n self,\n order,\n axis=axis,\n dtype=dtype,\n keepdims=keepdims,\n ddof=ddof,\n split_every=split_every,\n out=out,\n )\n\n @wraps(map_blocks)\n def map_blocks(self, func, *args, **kwargs):\n return map_blocks(func, self, *args, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.map_overlap_Array.map_overlap.return.map_overlap_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.map_overlap_Array.map_overlap.return.map_overlap_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2244, "end_line": 2330, "span_ids": ["Array.map_overlap"], "tokens": 1065}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n def map_overlap(self, func, depth, boundary=None, trim=True, **kwargs):\n \"\"\"Map a function over blocks of the array with some overlap\n\n We share neighboring zones between blocks of the array, then map a\n function, then trim away the neighboring strips.\n\n Note that this function will attempt to automatically determine the output\n array type before computing it, please refer to the ``meta`` keyword argument\n in ``map_blocks`` if you expect that the function will not succeed when\n operating on 0-d arrays.\n\n Parameters\n ----------\n func: function\n The function to apply to each extended block\n depth: int, tuple, or dict\n The number of elements that each block should share with its neighbors\n If a tuple or dict then this can be different per axis\n boundary: str, tuple, dict\n How to handle the boundaries.\n Values include 'reflect', 'periodic', 'nearest', 'none',\n or any constant value like 0 or np.nan\n trim: bool\n Whether or not to trim ``depth`` elements from each block after\n calling the map function.\n Set this to False if your mapping function already does this for you\n **kwargs:\n Other keyword arguments valid in ``map_blocks``\n\n Examples\n --------\n >>> import dask.array as da\n >>> x = np.array([1, 1, 2, 3, 3, 3, 2, 1, 1])\n >>> x = da.from_array(x, chunks=5)\n >>> def derivative(x):\n ... return x - np.roll(x, 1)\n\n >>> y = x.map_overlap(derivative, depth=1, boundary=0)\n >>> y.compute()\n array([ 1, 0, 1, 1, 0, 0, -1, -1, 0])\n\n >>> import dask.array as da\n >>> x = np.arange(16).reshape((4, 4))\n >>> d = da.from_array(x, chunks=(2, 2))\n >>> d.map_overlap(lambda x: x + x.size, depth=1).compute()\n array([[16, 17, 18, 19],\n [20, 21, 22, 23],\n [24, 25, 26, 27],\n [28, 29, 30, 31]])\n\n >>> func = lambda x: x + x.size\n >>> depth = {0: 1, 1: 1}\n >>> boundary = {0: 'reflect', 1: 'none'}\n >>> d.map_overlap(func, depth, boundary).compute() # doctest: +NORMALIZE_WHITESPACE\n array([[12, 13, 14, 15],\n [16, 17, 18, 19],\n [20, 21, 22, 23],\n [24, 25, 26, 27]])\n\n >>> x = np.arange(16).reshape((4, 4))\n >>> d = da.from_array(x, chunks=(2, 2))\n >>> y = d.map_overlap(lambda x: x + x[2], depth=1, meta=np.array(()))\n >>> y\n dask.array<_trim, shape=(4, 4), dtype=float64, chunksize=(2, 2), chunktype=numpy.ndarray>\n >>> y.compute()\n array([[ 4, 6, 8, 10],\n [ 8, 10, 12, 14],\n [20, 22, 24, 26],\n [24, 26, 28, 30]])\n\n >>> import cupy # doctest: +SKIP\n >>> x = cupy.arange(16).reshape((5, 4)) # doctest: +SKIP\n >>> d = da.from_array(x, chunks=(2, 2)) # doctest: +SKIP\n >>> y = d.map_overlap(lambda x: x + x[2], depth=1, meta=cupy.array(())) # doctest: +SKIP\n >>> y # doctest: +SKIP\n dask.array<_trim, shape=(4, 4), dtype=float64, chunksize=(2, 2), chunktype=cupy.ndarray>\n >>> y.compute() # doctest: +SKIP\n array([[ 4, 6, 8, 10],\n [ 8, 10, 12, 14],\n [20, 22, 24, 26],\n [24, 26, 28, 30]])\n \"\"\"\n from .overlap import map_overlap\n\n return map_overlap(\n func, self, depth=depth, boundary=boundary, trim=trim, **kwargs\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.view_Array.view.return.self_map_blocks_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.view_Array.view.return.self_map_blocks_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2289, "end_line": 2330, "span_ids": ["Array.view"], "tokens": 353}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n def view(self, dtype=None, order=\"C\"):\n \"\"\"Get a view of the array as a new data type\n\n Parameters\n ----------\n dtype:\n The dtype by which to view the array.\n The default, None, results in the view having the same data-type\n as the original array.\n order: string\n 'C' or 'F' (Fortran) ordering\n\n This reinterprets the bytes of the array under a new dtype. If that\n dtype does not have the same size as the original array then the shape\n will change.\n\n Beware that both numpy and dask.array can behave oddly when taking\n shape-changing views of arrays under Fortran ordering. Under some\n versions of NumPy this function will fail when taking shape-changing\n views of Fortran ordered arrays if the first dimension has chunks of\n size one.\n \"\"\"\n if dtype is None:\n dtype = self.dtype\n else:\n dtype = np.dtype(dtype)\n mult = self.dtype.itemsize / dtype.itemsize\n\n if order == \"C\":\n chunks = self.chunks[:-1] + (\n tuple(ensure_int(c * mult) for c in self.chunks[-1]),\n )\n elif order == \"F\":\n chunks = (\n tuple(ensure_int(c * mult) for c in self.chunks[0]),\n ) + self.chunks[1:]\n else:\n raise ValueError(\"Order must be one of 'C' or 'F'\")\n\n return self.map_blocks(\n chunk.view, dtype, order=order, dtype=dtype, chunks=chunks\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.swapaxes_Array.__deepcopy__.return.c": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.swapaxes_Array.__deepcopy__.return.c", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2332, "end_line": 2356, "span_ids": ["Array.swapaxes", "Array.copy", "Array.__deepcopy__", "Array.round"], "tokens": 175}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n @derived_from(np.ndarray)\n def swapaxes(self, axis1, axis2):\n from .routines import swapaxes\n\n return swapaxes(self, axis1, axis2)\n\n @derived_from(np.ndarray)\n def round(self, decimals=0):\n from .routines import round\n\n return round(self, decimals=decimals)\n\n def copy(self):\n \"\"\"\n Copy array. This is a no-op for dask.arrays, which are immutable\n \"\"\"\n if self.npartitions == 1:\n return self.map_blocks(M.copy)\n else:\n return Array(self.dask, self.name, self.chunks, meta=self)\n\n def __deepcopy__(self, memo):\n c = self.copy()\n memo[id(self)] = c\n return c", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.to_delayed_Array.to_delayed.return.np_array_L_dtype_object_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.to_delayed_Array.to_delayed.return.np_array_L_dtype_object_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2358, "end_line": 2378, "span_ids": ["Array.to_delayed"], "tokens": 193}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n def to_delayed(self, optimize_graph=True):\n \"\"\"Convert into an array of ``dask.delayed`` objects, one per chunk.\n\n Parameters\n ----------\n optimize_graph : bool, optional\n If True [default], the graph is optimized before converting into\n ``dask.delayed`` objects.\n\n See Also\n --------\n dask.array.from_delayed\n \"\"\"\n keys = self.__dask_keys__()\n graph = self.__dask_graph__()\n if optimize_graph:\n graph = self.__dask_optimize__(graph, keys) # TODO, don't collape graph\n name = \"delayed-\" + self.name\n graph = HighLevelGraph.from_collections(name, graph, dependencies=())\n L = ndeepmap(self.ndim, lambda k: Delayed(k, graph), keys)\n return np.array(L, dtype=object)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.repeat_ensure_int.return.i": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.repeat_ensure_int.return.i", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2380, "end_line": 2417, "span_ids": ["ensure_int", "Array.to_zarr", "Array.to_tiledb", "Array.repeat", "Array.nonzero"], "tokens": 252}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n @derived_from(np.ndarray)\n def repeat(self, repeats, axis=None):\n from .creation import repeat\n\n return repeat(self, repeats, axis=axis)\n\n @derived_from(np.ndarray)\n def nonzero(self):\n from .routines import nonzero\n\n return nonzero(self)\n\n def to_zarr(self, *args, **kwargs):\n \"\"\"Save array to the zarr storage format\n\n See https://zarr.readthedocs.io for details about the format.\n\n See function ``to_zarr()`` for parameters.\n \"\"\"\n return to_zarr(self, *args, **kwargs)\n\n def to_tiledb(self, uri, *args, **kwargs):\n \"\"\"Save array to the TileDB storage manager\n\n See function ``to_tiledb()`` for argument documentation.\n\n See https://docs.tiledb.io for details about the format and engine.\n \"\"\"\n from .tiledb_io import to_tiledb\n\n return to_tiledb(self, uri, *args, **kwargs)\n\n\ndef ensure_int(f):\n i = int(f)\n if i != f:\n raise ValueError(\"Could not coerce %f to integer\" % f)\n return i", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_normalize_chunks_normalize_chunks.if_isinstance_chunks_lis.chunks.tuple_chunks_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_normalize_chunks_normalize_chunks.if_isinstance_chunks_lis.chunks.tuple_chunks_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2534, "end_line": 2614, "span_ids": ["normalize_chunks"], "tokens": 756}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def normalize_chunks(chunks, shape=None, limit=None, dtype=None, previous_chunks=None):\n \"\"\"Normalize chunks to tuple of tuples\n\n This takes in a variety of input types and information and produces a full\n tuple-of-tuples result for chunks, suitable to be passed to Array or\n rechunk or any other operation that creates a Dask array.\n\n Parameters\n ----------\n chunks: tuple, int, dict, or string\n The chunks to be normalized. See examples below for more details\n shape: Tuple[int]\n The shape of the array\n limit: int (optional)\n The maximum block size to target in bytes,\n if freedom is given to choose\n dtype: np.dtype\n previous_chunks: Tuple[Tuple[int]] optional\n Chunks from a previous array that we should use for inspiration when\n rechunking auto dimensions. If not provided but auto-chunking exists\n then auto-dimensions will prefer square-like chunk shapes.\n\n Examples\n --------\n Specify uniform chunk sizes\n\n >>> from dask.array.core import normalize_chunks\n >>> normalize_chunks((2, 2), shape=(5, 6))\n ((2, 2, 1), (2, 2, 2))\n\n Also passes through fully explicit tuple-of-tuples\n\n >>> normalize_chunks(((2, 2, 1), (2, 2, 2)), shape=(5, 6))\n ((2, 2, 1), (2, 2, 2))\n\n Cleans up lists to tuples\n\n >>> normalize_chunks([[2, 2], [3, 3]])\n ((2, 2), (3, 3))\n\n Expands integer inputs 10 -> (10, 10)\n\n >>> normalize_chunks(10, shape=(30, 5))\n ((10, 10, 10), (5,))\n\n Expands dict inputs\n\n >>> normalize_chunks({0: 2, 1: 3}, shape=(6, 6))\n ((2, 2, 2), (3, 3))\n\n The values -1 and None get mapped to full size\n\n >>> normalize_chunks((5, -1), shape=(10, 10))\n ((5, 5), (10,))\n\n Use the value \"auto\" to automatically determine chunk sizes along certain\n dimensions. This uses the ``limit=`` and ``dtype=`` keywords to\n determine how large to make the chunks. The term \"auto\" can be used\n anywhere an integer can be used. See array chunking documentation for more\n information.\n\n >>> normalize_chunks((\"auto\",), shape=(20,), limit=5, dtype='uint8')\n ((5, 5, 5, 5),)\n\n You can also use byte sizes (see ``dask.utils.parse_bytes``) in place of\n \"auto\" to ask for a particular size\n\n >>> normalize_chunks(\"1kiB\", shape=(2000,), dtype='float32')\n ((250, 250, 250, 250, 250, 250, 250, 250),)\n\n Respects null dimensions\n\n >>> normalize_chunks((), shape=(0, 0))\n ((0,), (0,))\n \"\"\"\n if dtype and not isinstance(dtype, np.dtype):\n dtype = np.dtype(dtype)\n if chunks is None:\n raise ValueError(CHUNKS_NONE_ERROR_MESSAGE)\n if isinstance(chunks, list):\n chunks = tuple(chunks)\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_normalize_chunks.if_isinstance_chunks_Nu_normalize_chunks.return.tuple_tuple_int_x_if_not": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_normalize_chunks.if_isinstance_chunks_Nu_normalize_chunks.return.tuple_tuple_int_x_if_not", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2500, "end_line": 2578, "span_ids": ["normalize_chunks"], "tokens": 721}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def normalize_chunks(chunks, shape=None, limit=None, dtype=None, previous_chunks=None):\n # ... other code\n if isinstance(chunks, (Number, str)):\n chunks = (chunks,) * len(shape)\n if isinstance(chunks, dict):\n chunks = tuple(chunks.get(i, None) for i in range(len(shape)))\n if isinstance(chunks, np.ndarray):\n chunks = chunks.tolist()\n if not chunks and shape and all(s == 0 for s in shape):\n chunks = ((0,),) * len(shape)\n\n if (\n shape\n and len(shape) == 1\n and len(chunks) > 1\n and all(isinstance(c, (Number, str)) for c in chunks)\n ):\n chunks = (chunks,)\n\n if shape and len(chunks) != len(shape):\n raise ValueError(\n \"Chunks and shape must be of the same length/dimension. \"\n \"Got chunks=%s, shape=%s\" % (chunks, shape)\n )\n if -1 in chunks or None in chunks:\n chunks = tuple(s if c == -1 or c is None else c for c, s in zip(chunks, shape))\n\n # If specifying chunk size in bytes, use that value to set the limit.\n # Verify there is only one consistent value of limit or chunk-bytes used.\n for c in chunks:\n if isinstance(c, str) and c != \"auto\":\n parsed = parse_bytes(c)\n if limit is None:\n limit = parsed\n elif parsed != limit:\n raise ValueError(\n \"Only one consistent value of limit or chunk is allowed.\"\n \"Used %s != %s\" % (parsed, limit)\n )\n # Substitute byte limits with 'auto' now that limit is set.\n chunks = tuple(\"auto\" if isinstance(c, str) and c != \"auto\" else c for c in chunks)\n\n if any(c == \"auto\" for c in chunks):\n chunks = auto_chunks(chunks, shape, limit, dtype, previous_chunks)\n\n if shape is not None:\n chunks = tuple(c if c not in {None, -1} else s for c, s in zip(chunks, shape))\n\n if chunks and shape is not None:\n chunks = sum(\n (\n blockdims_from_blockshape((s,), (c,))\n if not isinstance(c, (tuple, list))\n else (c,)\n for s, c in zip(shape, chunks)\n ),\n (),\n )\n for c in chunks:\n if not c:\n raise ValueError(\n \"Empty tuples are not allowed in chunks. Express \"\n \"zero length dimensions with 0(s) in chunks\"\n )\n\n if shape is not None:\n if len(chunks) != len(shape):\n raise ValueError(\n \"Input array has %d dimensions but the supplied \"\n \"chunks has only %d dimensions\" % (len(shape), len(chunks))\n )\n if not all(\n c == s or (math.isnan(c) or math.isnan(s))\n for c, s in zip(map(sum, chunks), shape)\n ):\n raise ValueError(\n \"Chunks do not add up to shape. \"\n \"Got chunks=%s, shape=%s\" % (chunks, shape)\n )\n\n return tuple(tuple(int(x) if not math.isnan(x) else x for x in c) for c in chunks)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py__compute_multiplier_auto_chunks.largest_block.np_prod_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py__compute_multiplier_auto_chunks.largest_block.np_prod_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2581, "end_line": 2658, "span_ids": ["_compute_multiplier", "auto_chunks"], "tokens": 580}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _compute_multiplier(limit: int, dtype, largest_block: int, result):\n \"\"\"\n Utility function for auto_chunk, to fin how much larger or smaller the ideal\n chunk size is relative to what we have now.\n \"\"\"\n return (\n limit\n / dtype.itemsize\n / largest_block\n / np.prod(list(r if r != 0 else 1 for r in result.values()))\n )\n\n\ndef auto_chunks(chunks, shape, limit, dtype, previous_chunks=None):\n \"\"\"Determine automatic chunks\n\n This takes in a chunks value that contains ``\"auto\"`` values in certain\n dimensions and replaces those values with concrete dimension sizes that try\n to get chunks to be of a certain size in bytes, provided by the ``limit=``\n keyword. If multiple dimensions are marked as ``\"auto\"`` then they will\n all respond to meet the desired byte limit, trying to respect the aspect\n ratio of their dimensions in ``previous_chunks=``, if given.\n\n Parameters\n ----------\n chunks: Tuple\n A tuple of either dimensions or tuples of explicit chunk dimensions\n Some entries should be \"auto\"\n shape: Tuple[int]\n limit: int, str\n The maximum allowable size of a chunk in bytes\n previous_chunks: Tuple[Tuple[int]]\n\n See also\n --------\n normalize_chunks: for full docstring and parameters\n \"\"\"\n if previous_chunks is not None:\n previous_chunks = tuple(\n c if isinstance(c, tuple) else (c,) for c in previous_chunks\n )\n chunks = list(chunks)\n\n autos = {i for i, c in enumerate(chunks) if c == \"auto\"}\n if not autos:\n return tuple(chunks)\n\n if limit is None:\n limit = config.get(\"array.chunk-size\")\n if isinstance(limit, str):\n limit = parse_bytes(limit)\n\n if dtype is None:\n raise TypeError(\"DType must be known for auto-chunking\")\n\n if dtype.hasobject:\n raise NotImplementedError(\n \"Can not use auto rechunking with object dtype. \"\n \"We are unable to estimate the size in bytes of object data\"\n )\n\n for x in tuple(chunks) + tuple(shape):\n if (\n isinstance(x, Number)\n and np.isnan(x)\n or isinstance(x, tuple)\n and np.isnan(x).any()\n ):\n raise ValueError(\n \"Can not perform automatic rechunking with unknown \"\n \"(nan) chunk sizes.%s\" % unknown_chunk_message\n )\n\n limit = max(1, limit)\n\n largest_block = np.prod(\n [cs if isinstance(cs, Number) else max(cs) for cs in chunks if cs != \"auto\"]\n )\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_auto_chunks.if_previous_chunks__auto_chunks.if_previous_chunks_.else_.return.tuple_chunks_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_auto_chunks.if_previous_chunks__auto_chunks.if_previous_chunks_.else_.return.tuple_chunks_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2660, "end_line": 2716, "span_ids": ["auto_chunks"], "tokens": 493}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def auto_chunks(chunks, shape, limit, dtype, previous_chunks=None):\n # ... other code\n\n if previous_chunks:\n # Base ideal ratio on the median chunk size of the previous chunks\n result = {a: np.median(previous_chunks[a]) for a in autos}\n\n ideal_shape = []\n for i, s in enumerate(shape):\n chunk_frequencies = frequencies(previous_chunks[i])\n mode, count = max(chunk_frequencies.items(), key=lambda kv: kv[1])\n if mode > 1 and count >= len(previous_chunks[i]) / 2:\n ideal_shape.append(mode)\n else:\n ideal_shape.append(s)\n\n # How much larger or smaller the ideal chunk size is relative to what we have now\n multiplier = _compute_multiplier(limit, dtype, largest_block, result)\n\n last_multiplier = 0\n last_autos = set()\n while (\n multiplier != last_multiplier or autos != last_autos\n ): # while things change\n last_multiplier = multiplier # record previous values\n last_autos = set(autos) # record previous values\n\n # Expand or contract each of the dimensions appropriately\n for a in sorted(autos):\n if ideal_shape[a] == 0:\n result[a] = 0\n continue\n proposed = result[a] * multiplier ** (1 / len(autos))\n if proposed > shape[a]: # we've hit the shape boundary\n autos.remove(a)\n largest_block *= shape[a]\n chunks[a] = shape[a]\n del result[a]\n else:\n result[a] = round_to(proposed, ideal_shape[a])\n\n # recompute how much multiplier we have left, repeat\n multiplier = _compute_multiplier(limit, dtype, largest_block, result)\n\n for k, v in result.items():\n chunks[k] = v\n return tuple(chunks)\n\n else:\n size = (limit / dtype.itemsize / largest_block) ** (1 / len(autos))\n small = [i for i in autos if shape[i] < size]\n if small:\n for i in small:\n chunks[i] = (shape[i],)\n return auto_chunks(chunks, shape, limit, dtype)\n\n for i in autos:\n chunks[i] = round_to(size, shape[i])\n\n return tuple(chunks)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_round_to__get_chunk_shape.return.s_len_s_None_sli": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_round_to__get_chunk_shape.return.s_len_s_None_sli", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2719, "end_line": 2743, "span_ids": ["round_to", "_get_chunk_shape"], "tokens": 224}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def round_to(c, s):\n \"\"\"Return a chunk dimension that is close to an even multiple or factor\n\n We want values for c that are nicely aligned with s.\n\n If c is smaller than s then we want the largest factor of s that is less than the\n desired chunk size, but not less than half, which is too much. If no such\n factor exists then we just go with the original chunk size and accept an\n uneven chunk at the end.\n\n If c is larger than s then we want the largest multiple of s that is still\n smaller than c.\n \"\"\"\n if c <= s:\n try:\n return max(f for f in factors(s) if c / 2 <= f <= c)\n except ValueError: # no matching factors within factor of two\n return max(1, int(c))\n else:\n return c // s * s\n\n\ndef _get_chunk_shape(a):\n s = np.asarray(a.shape, dtype=int)\n return s[len(s) * (None,) + (slice(None),)]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_from_array_from_array._Create_dask_array_from": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_from_array_from_array._Create_dask_array_from", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2746, "end_line": 2841, "span_ids": ["from_array"], "tokens": 962}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def from_array(\n x,\n chunks=\"auto\",\n name=None,\n lock=False,\n asarray=None,\n fancy=True,\n getitem=None,\n meta=None,\n):\n \"\"\"Create dask array from something that looks like an array\n\n Input must have a ``.shape``, ``.ndim``, ``.dtype`` and support numpy-style slicing.\n\n Parameters\n ----------\n x : array_like\n chunks : int, tuple\n How to chunk the array. Must be one of the following forms:\n\n - A blocksize like 1000.\n - A blockshape like (1000, 1000).\n - Explicit sizes of all blocks along all dimensions like\n ((1000, 1000, 500), (400, 400)).\n - A size in bytes, like \"100 MiB\" which will choose a uniform\n block-like shape\n - The word \"auto\" which acts like the above, but uses a configuration\n value ``array.chunk-size`` for the chunk size\n\n -1 or None as a blocksize indicate the size of the corresponding\n dimension.\n name : str, optional\n The key name to use for the array. Defaults to a hash of ``x``.\n By default, hash uses python's standard sha1. This behaviour can be\n changed by installing cityhash, xxhash or murmurhash. If installed,\n a large-factor speedup can be obtained in the tokenisation step.\n Use ``name=False`` to generate a random name instead of hashing (fast)\n\n .. note::\n\n Because this ``name`` is used as the key in task graphs, you should\n ensure that it uniquely identifies the data contained within. If\n you'd like to provide a descriptive name that is still unique, combine\n the descriptive name with :func:`dask.base.tokenize` of the\n ``array_like``. See :ref:`graphs` for more.\n\n lock : bool or Lock, optional\n If ``x`` doesn't support concurrent reads then provide a lock here, or\n pass in True to have dask.array create one for you.\n asarray : bool, optional\n If True then call np.asarray on chunks to convert them to numpy arrays.\n If False then chunks are passed through unchanged.\n If None (default) then we use True if the ``__array_function__`` method\n is undefined.\n fancy : bool, optional\n If ``x`` doesn't support fancy indexing (e.g. indexing with lists or\n arrays) then set to False. Default is True.\n meta : Array-like, optional\n The metadata for the resulting dask array. This is the kind of array\n that will result from slicing the input array.\n Defaults to the input array.\n\n Examples\n --------\n\n >>> x = h5py.File('...')['/data/path'] # doctest: +SKIP\n >>> a = da.from_array(x, chunks=(1000, 1000)) # doctest: +SKIP\n\n If your underlying datastore does not support concurrent reads then include\n the ``lock=True`` keyword argument or ``lock=mylock`` if you want multiple\n arrays to coordinate around the same lock.\n\n >>> a = da.from_array(x, chunks=(1000, 1000), lock=True) # doctest: +SKIP\n\n If your underlying datastore has a ``.chunks`` attribute (as h5py and zarr\n datasets do) then a multiple of that chunk shape will be used if you\n do not provide a chunk shape.\n\n >>> a = da.from_array(x, chunks='auto') # doctest: +SKIP\n >>> a = da.from_array(x, chunks='100 MiB') # doctest: +SKIP\n >>> a = da.from_array(x) # doctest: +SKIP\n\n If providing a name, ensure that it is unique\n\n >>> import dask.base\n >>> token = dask.base.tokenize(x) # doctest: +SKIP\n >>> a = da.from_array('myarray-' + token) # doctest: +SKIP\n\n Numpy ndarrays are eagerly sliced and then embedded in the graph.\n\n >>> import dask.array\n >>> a = dask.array.from_array(np.array([[1, 2], [3, 4]]), chunks=(1,1))\n >>> a.dask[a.name, 0, 0][0]\n array([1])\n\n \"\"\"\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_from_array.if_isinstance_x_Array__from_array.return.Array_dsk_name_chunks_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_from_array.if_isinstance_x_Array__from_array.return.Array_dsk_name_chunks_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2842, "end_line": 2918, "span_ids": ["from_array"], "tokens": 659}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def from_array(\n x,\n chunks=\"auto\",\n name=None,\n lock=False,\n asarray=None,\n fancy=True,\n getitem=None,\n meta=None,\n):\n if isinstance(x, Array):\n raise ValueError(\n \"Array is already a dask array. Use 'asarray' or \" \"'rechunk' instead.\"\n )\n elif is_dask_collection(x):\n warnings.warn(\n \"Passing an object to dask.array.from_array which is already a \"\n \"Dask collection. This can lead to unexpected behavior.\"\n )\n\n if isinstance(x, (list, tuple, memoryview) + np.ScalarType):\n x = np.array(x)\n\n if asarray is None:\n asarray = not hasattr(x, \"__array_function__\")\n\n previous_chunks = getattr(x, \"chunks\", None)\n\n chunks = normalize_chunks(\n chunks, x.shape, dtype=x.dtype, previous_chunks=previous_chunks\n )\n\n if name in (None, True):\n token = tokenize(x, chunks)\n original_name = \"array-original-\" + token\n name = name or \"array-\" + token\n elif name is False:\n original_name = name = \"array-\" + str(uuid.uuid1())\n else:\n original_name = name\n\n if lock is True:\n lock = SerializableLock()\n\n is_ndarray = type(x) is np.ndarray\n is_single_block = all(len(c) == 1 for c in chunks)\n # Always use the getter for h5py etc. Not using isinstance(x, np.ndarray)\n # because np.matrix is a subclass of np.ndarray.\n if is_ndarray and not is_single_block and not lock:\n # eagerly slice numpy arrays to prevent memory blowup\n # GH5367, GH5601\n slices = slices_from_chunks(chunks)\n keys = product([name], *(range(len(bds)) for bds in chunks))\n values = [x[slc] for slc in slices]\n dsk = dict(zip(keys, values))\n\n elif is_ndarray and is_single_block:\n # No slicing needed\n dsk = {(name,) + (0,) * x.ndim: x}\n else:\n if getitem is None:\n if fancy:\n getitem = getter\n else:\n getitem = getter_nofancy\n\n dsk = getem(\n original_name,\n chunks,\n getitem=getitem,\n shape=x.shape,\n out_name=name,\n lock=lock,\n asarray=asarray,\n dtype=x.dtype,\n )\n dsk[original_name] = x\n\n # Workaround for TileDB, its indexing is 1-based,\n # and doesn't seems to support 0-length slicing\n if x.__class__.__module__.split(\".\")[0] == \"tiledb\" and hasattr(x, \"_ctx_\"):\n return Array(dsk, name, chunks, dtype=x.dtype)\n\n if meta is None:\n meta = x\n\n return Array(dsk, name, chunks, meta=meta, dtype=getattr(x, \"dtype\", None))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_from_zarr_from_zarr.return.from_array_z_chunks_nam": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_from_zarr_from_zarr.return.from_array_z_chunks_nam", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2921, "end_line": 2964, "span_ids": ["from_zarr"], "tokens": 418}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def from_zarr(\n url, component=None, storage_options=None, chunks=None, name=None, **kwargs\n):\n \"\"\"Load array from the zarr storage format\n\n See https://zarr.readthedocs.io for details about the format.\n\n Parameters\n ----------\n url: Zarr Array or str or MutableMapping\n Location of the data. A URL can include a protocol specifier like s3://\n for remote data. Can also be any MutableMapping instance, which should\n be serializable if used in multiple processes.\n component: str or None\n If the location is a zarr group rather than an array, this is the\n subcomponent that should be loaded, something like ``'foo/bar'``.\n storage_options: dict\n Any additional parameters for the storage backend (ignored for local\n paths)\n chunks: tuple of ints or tuples of ints\n Passed to ``da.from_array``, allows setting the chunks on\n initialisation, if the chunking scheme in the on-disc dataset is not\n optimal for the calculations to follow.\n name : str, optional\n An optional keyname for the array. Defaults to hashing the input\n kwargs: passed to ``zarr.Array``.\n \"\"\"\n import zarr\n\n storage_options = storage_options or {}\n if isinstance(url, zarr.Array):\n z = url\n elif isinstance(url, str):\n from ..bytes.core import get_mapper\n\n mapper = get_mapper(url, **storage_options)\n z = zarr.Array(mapper, read_only=True, path=component, **kwargs)\n else:\n mapper = url\n z = zarr.Array(mapper, read_only=True, path=component, **kwargs)\n chunks = chunks if chunks is not None else z.chunks\n if name is None:\n name = \"from-zarr-\" + tokenize(z, component, storage_options, chunks, **kwargs)\n return from_array(z, chunks, name=name)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_to_zarr_to_zarr.return.arr_store_z_lock_False_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_to_zarr_to_zarr.return.arr_store_z_lock_False_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2967, "end_line": 3066, "span_ids": ["to_zarr"], "tokens": 752}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def to_zarr(\n arr,\n url,\n component=None,\n storage_options=None,\n overwrite=False,\n compute=True,\n return_stored=False,\n **kwargs,\n):\n \"\"\"Save array to the zarr storage format\n\n See https://zarr.readthedocs.io for details about the format.\n\n Parameters\n ----------\n arr: dask.array\n Data to store\n url: Zarr Array or str or MutableMapping\n Location of the data. A URL can include a protocol specifier like s3://\n for remote data. Can also be any MutableMapping instance, which should\n be serializable if used in multiple processes.\n component: str or None\n If the location is a zarr group rather than an array, this is the\n subcomponent that should be created/over-written.\n storage_options: dict\n Any additional parameters for the storage backend (ignored for local\n paths)\n overwrite: bool\n If given array already exists, overwrite=False will cause an error,\n where overwrite=True will replace the existing data. Note that this\n check is done at computation time, not during graph creation.\n compute, return_stored: see ``store()``\n kwargs: passed to the ``zarr.create()`` function, e.g., compression options\n\n Raises\n ------\n ValueError\n If ``arr`` has unknown chunk sizes, which is not supported by Zarr.\n\n See Also\n --------\n dask.array.Array.compute_chunk_sizes\n\n \"\"\"\n import zarr\n\n if np.isnan(arr.shape).any():\n raise ValueError(\n \"Saving a dask array with unknown chunk sizes is not \"\n \"currently supported by Zarr.%s\" % unknown_chunk_message\n )\n\n if isinstance(url, zarr.Array):\n z = url\n if isinstance(z.store, (dict, zarr.DictStore)) and \"distributed\" in config.get(\n \"scheduler\", \"\"\n ):\n raise RuntimeError(\n \"Cannot store into in memory Zarr Array using \"\n \"the Distributed Scheduler.\"\n )\n arr = arr.rechunk(z.chunks)\n return arr.store(z, lock=False, compute=compute, return_stored=return_stored)\n\n if not _check_regular_chunks(arr.chunks):\n raise ValueError(\n \"Attempt to save array to zarr with irregular \"\n \"chunking, please call `arr.rechunk(...)` first.\"\n )\n\n storage_options = storage_options or {}\n\n if isinstance(url, str):\n from ..bytes.core import get_mapper\n\n mapper = get_mapper(url, **storage_options)\n else:\n # assume the object passed is already a mapper\n mapper = url\n\n chunks = [c[0] for c in arr.chunks]\n\n # The zarr.create function has the side-effect of immediately\n # creating metadata on disk. This may not be desired,\n # particularly if compute=False. The caller may be creating many\n # arrays on a slow filesystem, with the desire that any I/O be\n # sharded across workers (not done serially on the originating\n # machine). Or the caller may decide later to not to do this\n # computation, and so nothing should be written to disk.\n z = delayed(zarr.create)(\n shape=arr.shape,\n chunks=chunks,\n dtype=arr.dtype,\n store=mapper,\n path=component,\n overwrite=overwrite,\n **kwargs,\n )\n return arr.store(z, lock=False, compute=compute, return_stored=return_stored)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py__check_regular_chunks__check_regular_chunks.return.True": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py__check_regular_chunks__check_regular_chunks.return.True", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3069, "end_line": 3106, "span_ids": ["_check_regular_chunks"], "tokens": 264}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _check_regular_chunks(chunkset):\n \"\"\"Check if the chunks are regular\n\n \"Regular\" in this context means that along every axis, the chunks all\n have the same size, except the last one, which may be smaller\n\n Parameters\n ----------\n chunkset: tuple of tuples of ints\n From the ``.chunks`` attribute of an ``Array``\n\n Returns\n -------\n True if chunkset passes, else False\n\n Examples\n --------\n >>> import dask.array as da\n >>> arr = da.zeros(10, chunks=(5, ))\n >>> _check_regular_chunks(arr.chunks)\n True\n\n >>> arr = da.zeros(10, chunks=((3, 3, 3, 1), ))\n >>> _check_regular_chunks(arr.chunks)\n True\n\n >>> arr = da.zeros(10, chunks=((3, 1, 3, 3), ))\n >>> _check_regular_chunks(arr.chunks)\n False\n \"\"\"\n for chunks in chunkset:\n if len(chunks) == 1:\n continue\n if len(set(chunks[:-1])) > 1:\n return False\n if chunks[-1] > chunks[0]:\n return False\n return True", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_from_delayed_from_delayed.return.Array_graph_name_chunks": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_from_delayed_from_delayed.return.Array_graph_name_chunks", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3224, "end_line": 3255, "span_ids": ["from_delayed"], "tokens": 335}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def from_delayed(value, shape, dtype=None, meta=None, name=None):\n \"\"\"Create a dask array from a dask delayed value\n\n This routine is useful for constructing dask arrays in an ad-hoc fashion\n using dask delayed, particularly when combined with stack and concatenate.\n\n The dask array will consist of a single chunk.\n\n Examples\n --------\n >>> import dask\n >>> import dask.array as da\n >>> import numpy as np\n >>> value = dask.delayed(np.ones)(5)\n >>> array = da.from_delayed(value, (5,), dtype=float)\n >>> array\n dask.array\n >>> array.compute()\n array([1., 1., 1., 1., 1.])\n \"\"\"\n from ..delayed import delayed, Delayed\n\n if not isinstance(value, Delayed) and hasattr(value, \"key\"):\n value = delayed(value)\n\n name = name or \"from-value-\" + tokenize(value, shape, dtype, meta)\n dsk = {(name,) + (0,) * len(shape): value.key}\n chunks = tuple((d,) for d in shape)\n # TODO: value._key may not be the name of the layer in value.dask\n # This should be fixed after we build full expression graphs\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[value])\n return Array(graph, name, chunks, dtype=dtype, meta=meta)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_from_func_from_func.return.Array_dsk_name_chunks_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_from_func_from_func.return.Array_dsk_name_chunks_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3142, "end_line": 3167, "span_ids": ["from_func"], "tokens": 261}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def from_func(func, shape, dtype=None, name=None, args=(), kwargs={}):\n \"\"\"Create dask array in a single block by calling a function\n\n Calling the provided function with func(*args, **kwargs) should return a\n NumPy array of the indicated shape and dtype.\n\n Examples\n --------\n\n >>> a = from_func(np.arange, (3,), dtype='i8', args=(3,))\n >>> a.compute()\n array([0, 1, 2])\n\n This works particularly well when coupled with dask.array functions like\n concatenate and stack:\n\n >>> arrays = [from_func(np.array, (), dtype='i8', args=(n,)) for n in range(5)]\n >>> stack(arrays).compute()\n array([0, 1, 2, 3, 4])\n \"\"\"\n name = name or \"from_func-\" + tokenize(func, shape, dtype, args, kwargs)\n if args or kwargs:\n func = partial(func, *args, **kwargs)\n dsk = {(name,) + (0,) * len(shape): (func,)}\n chunks = tuple((i,) for i in shape)\n return Array(dsk, name, chunks, dtype)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_common_blockdim_common_blockdim.return.tuple_out_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_common_blockdim_common_blockdim.return.tuple_out_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3170, "end_line": 3235, "span_ids": ["common_blockdim"], "tokens": 607}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def common_blockdim(blockdims):\n \"\"\"Find the common block dimensions from the list of block dimensions\n\n Currently only implements the simplest possible heuristic: the common\n block-dimension is the only one that does not span fully span a dimension.\n This is a conservative choice that allows us to avoid potentially very\n expensive rechunking.\n\n Assumes that each element of the input block dimensions has all the same\n sum (i.e., that they correspond to dimensions of the same size).\n\n Examples\n --------\n >>> common_blockdim([(3,), (2, 1)])\n (2, 1)\n >>> common_blockdim([(1, 2), (2, 1)])\n (1, 1, 1)\n >>> common_blockdim([(2, 2), (3, 1)]) # doctest: +SKIP\n Traceback (most recent call last):\n ...\n ValueError: Chunks do not align\n \"\"\"\n if not any(blockdims):\n return ()\n non_trivial_dims = set([d for d in blockdims if len(d) > 1])\n if len(non_trivial_dims) == 1:\n return first(non_trivial_dims)\n if len(non_trivial_dims) == 0:\n return max(blockdims, key=first)\n\n if np.isnan(sum(map(sum, blockdims))):\n raise ValueError(\n \"Arrays chunk sizes (%s) are unknown.\\n\\n\"\n \"A possible solution:\\n\"\n \" x.compute_chunk_sizes()\" % blockdims\n )\n\n if len(set(map(sum, non_trivial_dims))) > 1:\n raise ValueError(\"Chunks do not add up to same value\", blockdims)\n\n # We have multiple non-trivial chunks on this axis\n # e.g. (5, 2) and (4, 3)\n\n # We create a single chunk tuple with the same total length\n # that evenly divides both, e.g. (4, 1, 2)\n\n # To accomplish this we walk down all chunk tuples together, finding the\n # smallest element, adding it to the output, and subtracting it from all\n # other elements and remove the element itself. We stop once we have\n # burned through all of the chunk tuples.\n # For efficiency's sake we reverse the lists so that we can pop off the end\n rchunks = [list(ntd)[::-1] for ntd in non_trivial_dims]\n total = sum(first(non_trivial_dims))\n i = 0\n\n out = []\n while i < total:\n m = min(c[-1] for c in rchunks)\n out.append(m)\n for c in rchunks:\n c[-1] -= m\n if c[-1] == 0:\n c.pop()\n i += m\n\n return tuple(out)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_unify_chunks_unify_chunks.arrays._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_unify_chunks_unify_chunks.arrays._", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3238, "end_line": 3318, "span_ids": ["unify_chunks"], "tokens": 706}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def unify_chunks(*args, **kwargs):\n \"\"\"\n Unify chunks across a sequence of arrays\n\n This utility function is used within other common operations like\n ``map_blocks`` and ``blockwise``. It is not commonly used by end-users\n directly.\n\n Parameters\n ----------\n *args: sequence of Array, index pairs\n Sequence like (x, 'ij', y, 'jk', z, 'i')\n\n Examples\n --------\n >>> import dask.array as da\n >>> x = da.ones(10, chunks=((5, 2, 3),))\n >>> y = da.ones(10, chunks=((2, 3, 5),))\n >>> chunkss, arrays = unify_chunks(x, 'i', y, 'i')\n >>> chunkss\n {'i': (2, 3, 2, 3)}\n\n >>> x = da.ones((100, 10), chunks=(20, 5))\n >>> y = da.ones((10, 100), chunks=(4, 50))\n >>> chunkss, arrays = unify_chunks(x, 'ij', y, 'jk', 'constant', None)\n >>> chunkss # doctest: +SKIP\n {'k': (50, 50), 'i': (20, 20, 20, 20, 20), 'j': (4, 1, 3, 2)}\n\n >>> unify_chunks(0, None)\n ({}, [0])\n\n Returns\n -------\n chunkss : dict\n Map like {index: chunks}.\n arrays : list\n List of rechunked arrays.\n\n See Also\n --------\n common_blockdim\n \"\"\"\n if not args:\n return {}, []\n\n arginds = [\n (asanyarray(a) if ind is not None else a, ind) for a, ind in partition(2, args)\n ] # [x, ij, y, jk]\n args = list(concat(arginds)) # [(x, ij), (y, jk)]\n warn = kwargs.get(\"warn\", True)\n\n arrays, inds = zip(*arginds)\n if all(ind is None for ind in inds):\n return {}, list(arrays)\n if all(ind == inds[0] for ind in inds) and all(\n a.chunks == arrays[0].chunks for a in arrays\n ):\n return dict(zip(inds[0], arrays[0].chunks)), arrays\n\n nameinds = []\n blockdim_dict = dict()\n max_parts = 0\n for a, ind in arginds:\n if ind is not None:\n nameinds.append((a.name, ind))\n blockdim_dict[a.name] = a.chunks\n max_parts = max(max_parts, a.npartitions)\n else:\n nameinds.append((a, ind))\n\n chunkss = broadcast_dimensions(nameinds, blockdim_dict, consolidate=common_blockdim)\n nparts = np.prod(list(map(len, chunkss.values())))\n\n if warn and nparts and nparts >= max_parts * 10:\n warnings.warn(\n \"Increasing number of chunks by factor of %d\" % (nparts / max_parts),\n PerformanceWarning,\n stacklevel=3,\n )\n\n arrays = []\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_unify_chunks.for_a_i_in_arginds__unpack_singleton.return.x": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_unify_chunks.for_a_i_in_arginds__unpack_singleton.return.x", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3319, "end_line": 3351, "span_ids": ["unify_chunks", "unpack_singleton"], "tokens": 218}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def unify_chunks(*args, **kwargs):\n # ... other code\n for a, i in arginds:\n if i is None:\n arrays.append(a)\n else:\n chunks = tuple(\n chunkss[j]\n if a.shape[n] > 1\n else a.shape[n]\n if not np.isnan(sum(chunkss[j]))\n else None\n for n, j in enumerate(i)\n )\n if chunks != a.chunks and all(a.chunks):\n arrays.append(a.rechunk(chunks))\n else:\n arrays.append(a)\n return chunkss, arrays\n\n\ndef unpack_singleton(x):\n \"\"\"\n\n >>> unpack_singleton([[[[1]]]])\n 1\n >>> unpack_singleton(np.array(np.datetime64('2000-01-01')))\n array('2000-01-01', dtype='datetime64[D]')\n \"\"\"\n while isinstance(x, (list, tuple)):\n try:\n x = x[0]\n except (IndexError, TypeError, KeyError):\n break\n return x", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_block_block._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_block_block._", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3354, "end_line": 3441, "span_ids": ["block"], "tokens": 772}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def block(arrays, allow_unknown_chunksizes=False):\n \"\"\"\n Assemble an nd-array from nested lists of blocks.\n\n Blocks in the innermost lists are concatenated along the last\n dimension (-1), then these are concatenated along the second-last\n dimension (-2), and so on until the outermost list is reached\n\n Blocks can be of any dimension, but will not be broadcasted using the normal\n rules. Instead, leading axes of size 1 are inserted, to make ``block.ndim``\n the same for all blocks. This is primarily useful for working with scalars,\n and means that code like ``block([v, 1])`` is valid, where\n ``v.ndim == 1``.\n\n When the nested list is two levels deep, this allows block matrices to be\n constructed from their components.\n\n Parameters\n ----------\n arrays : nested list of array_like or scalars (but not tuples)\n If passed a single ndarray or scalar (a nested list of depth 0), this\n is returned unmodified (and not copied).\n\n Elements shapes must match along the appropriate axes (without\n broadcasting), but leading 1s will be prepended to the shape as\n necessary to make the dimensions match.\n\n allow_unknown_chunksizes: bool\n Allow unknown chunksizes, such as come from converting from dask\n dataframes. Dask.array is unable to verify that chunks line up. If\n data comes from differently aligned sources then this can cause\n unexpected results.\n\n Returns\n -------\n block_array : ndarray\n The array assembled from the given blocks.\n\n The dimensionality of the output is equal to the greatest of:\n * the dimensionality of all the inputs\n * the depth to which the input list is nested\n\n Raises\n ------\n ValueError\n * If list depths are mismatched - for instance, ``[[a, b], c]`` is\n illegal, and should be spelt ``[[a, b], [c]]``\n * If lists are empty - for instance, ``[[a, b], []]``\n\n See Also\n --------\n concatenate : Join a sequence of arrays together.\n stack : Stack arrays in sequence along a new dimension.\n hstack : Stack arrays in sequence horizontally (column wise).\n vstack : Stack arrays in sequence vertically (row wise).\n dstack : Stack arrays in sequence depth wise (along third dimension).\n vsplit : Split array into a list of multiple sub-arrays vertically.\n\n Notes\n -----\n\n When called with only scalars, ``block`` is equivalent to an ndarray\n call. So ``block([[1, 2], [3, 4]])`` is equivalent to\n ``array([[1, 2], [3, 4]])``.\n\n This function does not enforce that the blocks lie on a fixed grid.\n ``block([[a, b], [c, d]])`` is not restricted to arrays of the form::\n\n AAAbb\n AAAbb\n cccDD\n\n But is also allowed to produce, for some ``a, b, c, d``::\n\n AAAbb\n AAAbb\n cDDDD\n\n Since concatenation happens along the last axis first, `block` is _not_\n capable of producing the following directly::\n\n AAAbb\n cccbb\n cccDD\n\n Matlab's \"square bracket stacking\", ``[A, B, ...; p, q, ...]``, is\n equivalent to ``block([[A, B, ...], [p, q, ...]])``.\n \"\"\"\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_block._This_was_copied_almost__block.return.rec_map_reduce_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_block._This_was_copied_almost__block.return.rec_map_reduce_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3443, "end_line": 3521, "span_ids": ["block"], "tokens": 633}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def block(arrays, allow_unknown_chunksizes=False):\n\n # This was copied almost verbatim from numpy.core.shape_base.block\n\n def atleast_nd(x, ndim):\n x = asanyarray(x)\n diff = max(ndim - x.ndim, 0)\n if diff == 0:\n return x\n else:\n return x[(None,) * diff + (Ellipsis,)]\n\n def format_index(index):\n return \"arrays\" + \"\".join(\"[{}]\".format(i) for i in index)\n\n rec = _Recurser(recurse_if=lambda x: type(x) is list)\n\n # ensure that the lists are all matched in depth\n list_ndim = None\n any_empty = False\n for index, value, entering in rec.walk(arrays):\n if type(value) is tuple:\n # not strictly necessary, but saves us from:\n # - more than one way to do things - no point treating tuples like\n # lists\n # - horribly confusing behaviour that results when tuples are\n # treated like ndarray\n raise TypeError(\n \"{} is a tuple. \"\n \"Only lists can be used to arrange blocks, and np.block does \"\n \"not allow implicit conversion from tuple to ndarray.\".format(\n format_index(index)\n )\n )\n if not entering:\n curr_depth = len(index)\n elif len(value) == 0:\n curr_depth = len(index) + 1\n any_empty = True\n else:\n continue\n\n if list_ndim is not None and list_ndim != curr_depth:\n raise ValueError(\n \"List depths are mismatched. First element was at depth {}, \"\n \"but there is an element at depth {} ({})\".format(\n list_ndim, curr_depth, format_index(index)\n )\n )\n list_ndim = curr_depth\n\n # do this here so we catch depth mismatches first\n if any_empty:\n raise ValueError(\"Lists cannot be empty\")\n\n # convert all the arrays to ndarrays\n arrays = rec.map_reduce(arrays, f_map=asanyarray, f_reduce=list)\n\n # determine the maximum dimension of the elements\n elem_ndim = rec.map_reduce(arrays, f_map=lambda xi: xi.ndim, f_reduce=max)\n ndim = max(list_ndim, elem_ndim)\n\n # first axis to concatenate along\n first_axis = ndim - list_ndim\n\n # Make all the elements the same dimension\n arrays = rec.map_reduce(\n arrays, f_map=lambda xi: atleast_nd(xi, ndim), f_reduce=list\n )\n\n # concatenate innermost lists on the right, outermost on the left\n return rec.map_reduce(\n arrays,\n f_reduce=lambda xs, axis: concatenate(\n list(xs), axis=axis, allow_unknown_chunksizes=allow_unknown_chunksizes\n ),\n f_kwargs=lambda axis: dict(axis=(axis + 1)),\n axis=first_axis,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_concatenate_concatenate.inds._list_range_ndim_for_i_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_concatenate_concatenate.inds._list_range_ndim_for_i_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3640, "end_line": 3743, "span_ids": ["concatenate"], "tokens": 795}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def concatenate(seq, axis=0, allow_unknown_chunksizes=False):\n \"\"\"\n Concatenate arrays along an existing axis\n\n Given a sequence of dask Arrays form a new dask Array by stacking them\n along an existing dimension (axis=0 by default)\n\n Parameters\n ----------\n seq: list of dask.arrays\n axis: int\n Dimension along which to align all of the arrays\n allow_unknown_chunksizes: bool\n Allow unknown chunksizes, such as come from converting from dask\n dataframes. Dask.array is unable to verify that chunks line up. If\n data comes from differently aligned sources then this can cause\n unexpected results.\n\n Examples\n --------\n\n Create slices\n\n >>> import dask.array as da\n >>> import numpy as np\n\n >>> data = [da.from_array(np.ones((4, 4)), chunks=(2, 2))\n ... for i in range(3)]\n\n >>> x = da.concatenate(data, axis=0)\n >>> x.shape\n (12, 4)\n\n >>> da.concatenate(data, axis=1).shape\n (4, 12)\n\n Result is a new dask Array\n\n See Also\n --------\n stack\n \"\"\"\n from . import wrap\n\n seq = [asarray(a) for a in seq]\n\n if not seq:\n raise ValueError(\"Need array(s) to concatenate\")\n\n seq_metas = [meta_from_array(s) for s in seq]\n _concatenate = concatenate_lookup.dispatch(\n type(max(seq_metas, key=lambda x: getattr(x, \"__array_priority__\", 0)))\n )\n meta = _concatenate(seq_metas, axis=axis)\n\n # Promote types to match meta\n seq = [a.astype(meta.dtype) for a in seq]\n\n # Find output array shape\n ndim = len(seq[0].shape)\n shape = tuple(\n sum((a.shape[i] for a in seq)) if i == axis else seq[0].shape[i]\n for i in range(ndim)\n )\n\n # Drop empty arrays\n seq2 = [a for a in seq if a.size]\n if not seq2:\n seq2 = seq\n\n if axis < 0:\n axis = ndim + axis\n if axis >= ndim:\n msg = (\n \"Axis must be less than than number of dimensions\"\n \"\\nData has %d dimensions, but got axis=%d\"\n )\n raise ValueError(msg % (ndim, axis))\n\n n = len(seq2)\n if n == 0:\n try:\n return wrap.empty_like(meta, shape=shape, chunks=shape, dtype=meta.dtype)\n except TypeError:\n return wrap.empty(shape, chunks=shape, dtype=meta.dtype)\n elif n == 1:\n return seq2[0]\n\n if not allow_unknown_chunksizes and not all(\n i == axis or all(x.shape[i] == seq2[0].shape[i] for x in seq2)\n for i in range(ndim)\n ):\n if any(map(np.isnan, seq2[0].shape)):\n raise ValueError(\n \"Tried to concatenate arrays with unknown\"\n \" shape %s.\\n\\nTwo solutions:\\n\"\n \" 1. Force concatenation pass\"\n \" allow_unknown_chunksizes=True.\\n\"\n \" 2. Compute shapes with \"\n \"[x.compute_chunk_sizes() for x in seq]\" % str(seq2[0].shape)\n )\n raise ValueError(\"Shapes do not align: %s\", [x.shape for x in seq2])\n\n inds = [list(range(ndim)) for i in range(n)]\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_concatenate.for_i_ind_in_enumerate_i_concatenate.return.Array_graph_name_chunks": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_concatenate.for_i_ind_in_enumerate_i_concatenate.return.Array_graph_name_chunks", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3628, "end_line": 3660, "span_ids": ["concatenate"], "tokens": 320}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def concatenate(seq, axis=0, allow_unknown_chunksizes=False):\n # ... other code\n for i, ind in enumerate(inds):\n ind[axis] = -(i + 1)\n\n uc_args = list(concat(zip(seq2, inds)))\n _, seq2 = unify_chunks(*uc_args, warn=False)\n\n bds = [a.chunks for a in seq2]\n\n chunks = (\n seq2[0].chunks[:axis]\n + (sum([bd[axis] for bd in bds], ()),)\n + seq2[0].chunks[axis + 1 :]\n )\n\n cum_dims = [0] + list(accumulate(add, [len(a.chunks[axis]) for a in seq2]))\n\n names = [a.name for a in seq2]\n\n name = \"concatenate-\" + tokenize(names, axis)\n keys = list(product([name], *[range(len(bd)) for bd in chunks]))\n\n values = [\n (names[bisect(cum_dims, key[axis + 1]) - 1],)\n + key[1 : axis + 1]\n + (key[axis + 1] - cum_dims[bisect(cum_dims, key[axis + 1]) - 1],)\n + key[axis + 2 :]\n for key in keys\n ]\n\n dsk = dict(zip(keys, values))\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=seq2)\n\n return Array(graph, name, chunks, meta=meta)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_load_store_chunk_load_chunk.return.load_store_chunk_None_ou": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_load_store_chunk_load_chunk.return.load_store_chunk_None_ou", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3663, "end_line": 3714, "span_ids": ["load_chunk", "load_store_chunk", "store_chunk"], "tokens": 351}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def load_store_chunk(x, out, index, lock, return_stored, load_stored):\n \"\"\"\n A function inserted in a Dask graph for storing a chunk.\n\n Parameters\n ----------\n x: array-like\n An array (potentially a NumPy one)\n out: array-like\n Where to store results too.\n index: slice-like\n Where to store result from ``x`` in ``out``.\n lock: Lock-like or False\n Lock to use before writing to ``out``.\n return_stored: bool\n Whether to return ``out``.\n load_stored: bool\n Whether to return the array stored in ``out``.\n Ignored if ``return_stored`` is not ``True``.\n\n Examples\n --------\n\n >>> a = np.ones((5, 6))\n >>> b = np.empty(a.shape)\n >>> load_store_chunk(a, b, (slice(None), slice(None)), False, False, False)\n \"\"\"\n\n result = None\n if return_stored and not load_stored:\n result = out\n\n if lock:\n lock.acquire()\n try:\n if x is not None:\n out[index] = np.asanyarray(x)\n if return_stored and load_stored:\n result = out[index]\n finally:\n if lock:\n lock.release()\n\n return result\n\n\ndef store_chunk(x, out, index, lock, return_stored):\n return load_store_chunk(x, out, index, lock, return_stored, False)\n\n\ndef load_chunk(out, index, lock):\n return load_store_chunk(None, out, index, lock, True, True)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_insert_to_ooc_insert_to_ooc.return.dsk": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_insert_to_ooc_insert_to_ooc.return.dsk", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3717, "end_line": 3773, "span_ids": ["insert_to_ooc"], "tokens": 481}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def insert_to_ooc(\n arr, out, lock=True, region=None, return_stored=False, load_stored=False, tok=None\n):\n \"\"\"\n Creates a Dask graph for storing chunks from ``arr`` in ``out``.\n\n Parameters\n ----------\n arr: da.Array\n A dask array\n out: array-like\n Where to store results too.\n lock: Lock-like or bool, optional\n Whether to lock or with what (default is ``True``,\n which means a ``threading.Lock`` instance).\n region: slice-like, optional\n Where in ``out`` to store ``arr``'s results\n (default is ``None``, meaning all of ``out``).\n return_stored: bool, optional\n Whether to return ``out``\n (default is ``False``, meaning ``None`` is returned).\n load_stored: bool, optional\n Whether to handling loading from ``out`` at the same time.\n Ignored if ``return_stored`` is not ``True``.\n (default is ``False``, meaning defer to ``return_stored``).\n tok: str, optional\n Token to use when naming keys\n\n Examples\n --------\n >>> import dask.array as da\n >>> d = da.ones((5, 6), chunks=(2, 3))\n >>> a = np.empty(d.shape)\n >>> insert_to_ooc(d, a) # doctest: +SKIP\n \"\"\"\n\n if lock is True:\n lock = Lock()\n\n slices = slices_from_chunks(arr.chunks)\n if region:\n slices = [fuse_slice(region, slc) for slc in slices]\n\n name = \"store-%s\" % (tok or str(uuid.uuid1()))\n func = store_chunk\n args = ()\n if return_stored and load_stored:\n name = \"load-%s\" % name\n func = load_store_chunk\n args = args + (load_stored,)\n\n dsk = {\n (name,) + t[1:]: (func, t, out, slc, lock, return_stored) + args\n for t, slc in zip(core.flatten(arr.__dask_keys__()), slices)\n }\n\n return dsk", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_retrieve_from_ooc_retrieve_from_ooc.return.load_dsk": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_retrieve_from_ooc_retrieve_from_ooc.return.load_dsk", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3776, "end_line": 3806, "span_ids": ["retrieve_from_ooc"], "tokens": 251}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def retrieve_from_ooc(keys, dsk_pre, dsk_post=None):\n \"\"\"\n Creates a Dask graph for loading stored ``keys`` from ``dsk``.\n\n Parameters\n ----------\n keys: Sequence\n A sequence containing Dask graph keys to load\n dsk_pre: Mapping\n A Dask graph corresponding to a Dask Array before computation\n dsk_post: Mapping, optional\n A Dask graph corresponding to a Dask Array after computation\n\n Examples\n --------\n >>> import dask.array as da\n >>> d = da.ones((5, 6), chunks=(2, 3))\n >>> a = np.empty(d.shape)\n >>> g = insert_to_ooc(d, a)\n >>> retrieve_from_ooc(g.keys(), g) # doctest: +SKIP\n \"\"\"\n\n if not dsk_post:\n dsk_post = {k: k for k in keys}\n\n load_dsk = {\n (\"load-\" + k[0],) + k[1:]: (load_chunk, dsk_post[k]) + dsk_pre[k][3:-1]\n for k in keys\n }\n\n return load_dsk", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_asarray_asarray.return.from_array_a_getitem_get": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_asarray_asarray.return.from_array_a_getitem_get", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3809, "end_line": 3844, "span_ids": ["asarray"], "tokens": 311}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def asarray(a, **kwargs):\n \"\"\"Convert the input to a dask array.\n\n Parameters\n ----------\n a : array-like\n Input data, in any form that can be converted to a dask array.\n\n Returns\n -------\n out : dask array\n Dask array interpretation of a.\n\n Examples\n --------\n >>> import dask.array as da\n >>> import numpy as np\n >>> x = np.arange(3)\n >>> da.asarray(x)\n dask.array\n\n >>> y = [[1, 2, 3], [4, 5, 6]]\n >>> da.asarray(y)\n dask.array\n \"\"\"\n if isinstance(a, Array):\n return a\n elif hasattr(a, \"to_dask_array\"):\n return a.to_dask_array()\n elif type(a).__module__.startswith(\"xarray.\") and hasattr(a, \"data\"):\n return asarray(a.data)\n elif isinstance(a, (list, tuple)) and any(isinstance(i, Array) for i in a):\n return stack(a)\n elif not isinstance(getattr(a, \"shape\", None), Iterable):\n a = np.asarray(a)\n return from_array(a, getitem=getter_inline, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_asanyarray_asanyarray.return.from_array_a_chunks_a_sh": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_asanyarray_asanyarray.return.from_array_a_chunks_a_sh", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3847, "end_line": 3884, "span_ids": ["asanyarray"], "tokens": 337}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def asanyarray(a):\n \"\"\"Convert the input to a dask array.\n\n Subclasses of ``np.ndarray`` will be passed through as chunks unchanged.\n\n Parameters\n ----------\n a : array-like\n Input data, in any form that can be converted to a dask array.\n\n Returns\n -------\n out : dask array\n Dask array interpretation of a.\n\n Examples\n --------\n >>> import dask.array as da\n >>> import numpy as np\n >>> x = np.arange(3)\n >>> da.asanyarray(x)\n dask.array\n\n >>> y = [[1, 2, 3], [4, 5, 6]]\n >>> da.asanyarray(y)\n dask.array\n \"\"\"\n if isinstance(a, Array):\n return a\n elif hasattr(a, \"to_dask_array\"):\n return a.to_dask_array()\n elif type(a).__module__.startswith(\"xarray.\") and hasattr(a, \"data\"):\n return asanyarray(a.data)\n elif isinstance(a, (list, tuple)) and any(isinstance(i, Array) for i in a):\n return stack(a)\n elif not isinstance(getattr(a, \"shape\", None), Iterable):\n a = np.asanyarray(a)\n return from_array(a, chunks=a.shape, getitem=getter_inline, asarray=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_is_scalar_for_elemwise_is_scalar_for_elemwise.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_is_scalar_for_elemwise_is_scalar_for_elemwise.return._", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3887, "end_line": 3919, "span_ids": ["is_scalar_for_elemwise"], "tokens": 256}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def is_scalar_for_elemwise(arg):\n \"\"\"\n\n >>> is_scalar_for_elemwise(42)\n True\n >>> is_scalar_for_elemwise('foo')\n True\n >>> is_scalar_for_elemwise(True)\n True\n >>> is_scalar_for_elemwise(np.array(42))\n True\n >>> is_scalar_for_elemwise([1, 2, 3])\n True\n >>> is_scalar_for_elemwise(np.array([1, 2, 3]))\n False\n >>> is_scalar_for_elemwise(from_array(np.array(0), chunks=()))\n False\n >>> is_scalar_for_elemwise(np.dtype('i4'))\n True\n \"\"\"\n # the second half of shape_condition is essentially just to ensure that\n # dask series / frame are treated as scalars in elemwise.\n maybe_shape = getattr(arg, \"shape\", None)\n shape_condition = not isinstance(maybe_shape, Iterable) or any(\n is_dask_collection(x) for x in maybe_shape\n )\n\n return (\n np.isscalar(arg)\n or shape_condition\n or isinstance(arg, np.dtype)\n or (isinstance(arg, np.ndarray) and arg.ndim == 0)\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_broadcast_shapes_broadcast_shapes.return.tuple_reversed_out_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_broadcast_shapes_broadcast_shapes.return.tuple_reversed_out_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3922, "end_line": 3954, "span_ids": ["broadcast_shapes"], "tokens": 210}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def broadcast_shapes(*shapes):\n \"\"\"\n Determines output shape from broadcasting arrays.\n\n Parameters\n ----------\n shapes : tuples\n The shapes of the arguments.\n\n Returns\n -------\n output_shape : tuple\n\n Raises\n ------\n ValueError\n If the input shapes cannot be successfully broadcast together.\n \"\"\"\n if len(shapes) == 1:\n return shapes[0]\n out = []\n for sizes in zip_longest(*map(reversed, shapes), fillvalue=-1):\n if np.isnan(sizes).any():\n dim = np.nan\n else:\n dim = 0 if 0 in sizes else np.max(sizes)\n if any(i not in [-1, 0, 1, dim] and not np.isnan(i) for i in sizes):\n raise ValueError(\n \"operands could not be broadcast together with \"\n \"shapes {0}\".format(\" \".join(map(str, shapes)))\n )\n out.append(dim)\n return tuple(reversed(out))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_elemwise_elemwise.return.handle_out_out_result_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_elemwise_elemwise.return.handle_out_out_result_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3957, "end_line": 4037, "span_ids": ["elemwise"], "tokens": 682}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def elemwise(op, *args, **kwargs):\n \"\"\"Apply elementwise function across arguments\n\n Respects broadcasting rules\n\n Examples\n --------\n >>> elemwise(add, x, y) # doctest: +SKIP\n >>> elemwise(sin, x) # doctest: +SKIP\n\n See Also\n --------\n blockwise\n \"\"\"\n out = kwargs.pop(\"out\", None)\n if not set([\"name\", \"dtype\"]).issuperset(kwargs):\n msg = \"%s does not take the following keyword arguments %s\"\n raise TypeError(\n msg % (op.__name__, str(sorted(set(kwargs) - set([\"name\", \"dtype\"]))))\n )\n\n args = [np.asarray(a) if isinstance(a, (list, tuple)) else a for a in args]\n\n shapes = []\n for arg in args:\n shape = getattr(arg, \"shape\", ())\n if any(is_dask_collection(x) for x in shape):\n # Want to exclude Delayed shapes and dd.Scalar\n shape = ()\n shapes.append(shape)\n\n shapes = [s if isinstance(s, Iterable) else () for s in shapes]\n out_ndim = len(\n broadcast_shapes(*shapes)\n ) # Raises ValueError if dimensions mismatch\n expr_inds = tuple(range(out_ndim))[::-1]\n\n need_enforce_dtype = False\n if \"dtype\" in kwargs:\n dt = kwargs[\"dtype\"]\n else:\n # We follow NumPy's rules for dtype promotion, which special cases\n # scalars and 0d ndarrays (which it considers equivalent) by using\n # their values to compute the result dtype:\n # https://github.com/numpy/numpy/issues/6240\n # We don't inspect the values of 0d dask arrays, because these could\n # hold potentially very expensive calculations. Instead, we treat\n # them just like other arrays, and if necessary cast the result of op\n # to match.\n vals = [\n np.empty((1,) * max(1, a.ndim), dtype=a.dtype)\n if not is_scalar_for_elemwise(a)\n else a\n for a in args\n ]\n try:\n dt = apply_infer_dtype(op, vals, {}, \"elemwise\", suggest_dtype=False)\n except Exception:\n return NotImplemented\n need_enforce_dtype = any(\n not is_scalar_for_elemwise(a) and a.ndim == 0 for a in args\n )\n\n name = kwargs.get(\"name\", None) or \"%s-%s\" % (funcname(op), tokenize(op, dt, *args))\n\n blockwise_kwargs = dict(dtype=dt, name=name, token=funcname(op).strip(\"_\"))\n if need_enforce_dtype:\n blockwise_kwargs[\"enforce_dtype\"] = dt\n blockwise_kwargs[\"enforce_dtype_function\"] = op\n op = _enforce_dtype\n result = blockwise(\n op,\n expr_inds,\n *concat(\n (a, tuple(range(a.ndim)[::-1]) if not is_scalar_for_elemwise(a) else None)\n for a in args\n ),\n **blockwise_kwargs,\n )\n\n return handle_out(out, result)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_handle_out_handle_out.if_isinstance_out_Array_.else_.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_handle_out_handle_out.if_isinstance_out_Array_.else_.return.result", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4040, "end_line": 4070, "span_ids": ["handle_out"], "tokens": 229}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def handle_out(out, result):\n \"\"\"Handle out parameters\n\n If out is a dask.array then this overwrites the contents of that array with\n the result\n \"\"\"\n if isinstance(out, tuple):\n if len(out) == 1:\n out = out[0]\n elif len(out) > 1:\n raise NotImplementedError(\"The out parameter is not fully supported\")\n else:\n out = None\n if isinstance(out, Array):\n if out.shape != result.shape:\n raise ValueError(\n \"Mismatched shapes between result and out parameter. \"\n \"out=%s, result=%s\" % (str(out.shape), str(result.shape))\n )\n out._chunks = result.chunks\n out.dask = result.dask\n out._meta = result._meta\n out.name = result.name\n elif out is not None:\n msg = (\n \"The out parameter is not fully supported.\"\n \" Received type %s, expected Dask Array\" % type(out).__name__\n )\n raise NotImplementedError(msg)\n else:\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py__enforce_dtype__enforce_dtype.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py__enforce_dtype__enforce_dtype.return.result", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4073, "end_line": 4112, "span_ids": ["_enforce_dtype"], "tokens": 327}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _enforce_dtype(*args, **kwargs):\n \"\"\"Calls a function and converts its result to the given dtype.\n\n The parameters have deliberately been given unwieldy names to avoid\n clashes with keyword arguments consumed by blockwise\n\n A dtype of `object` is treated as a special case and not enforced,\n because it is used as a dummy value in some places when the result will\n not be a block in an Array.\n\n Parameters\n ----------\n enforce_dtype : dtype\n Result dtype\n enforce_dtype_function : callable\n The wrapped function, which will be passed the remaining arguments\n \"\"\"\n dtype = kwargs.pop(\"enforce_dtype\")\n function = kwargs.pop(\"enforce_dtype_function\")\n\n result = function(*args, **kwargs)\n if hasattr(result, \"dtype\") and dtype != result.dtype and dtype != object:\n if not np.can_cast(result, dtype, casting=\"same_kind\"):\n raise ValueError(\n \"Inferred dtype from function %r was %r \"\n \"but got %r, which can't be cast using \"\n \"casting='same_kind'\"\n % (funcname(function), str(dtype), str(result.dtype))\n )\n if np.isscalar(result):\n # scalar astype method doesn't take the keyword arguments, so\n # have to convert via 0-dimensional array and back.\n result = result.astype(dtype)\n else:\n try:\n result = result.astype(dtype, copy=False)\n except TypeError:\n # Missing copy kwarg\n result = result.astype(dtype)\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_broadcast_to_broadcast_to.return.Array_graph_name_chunks": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_broadcast_to_broadcast_to.return.Array_graph_name_chunks", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4115, "end_line": 4181, "span_ids": ["broadcast_to"], "tokens": 598}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def broadcast_to(x, shape, chunks=None):\n \"\"\"Broadcast an array to a new shape.\n\n Parameters\n ----------\n x : array_like\n The array to broadcast.\n shape : tuple\n The shape of the desired array.\n chunks : tuple, optional\n If provided, then the result will use these chunks instead of the same\n chunks as the source array. Setting chunks explicitly as part of\n broadcast_to is more efficient than rechunking afterwards. Chunks are\n only allowed to differ from the original shape along dimensions that\n are new on the result or have size 1 the input array.\n\n Returns\n -------\n broadcast : dask array\n\n See Also\n --------\n :func:`numpy.broadcast_to`\n \"\"\"\n x = asarray(x)\n shape = tuple(shape)\n\n if x.shape == shape and (chunks is None or chunks == x.chunks):\n return x\n\n ndim_new = len(shape) - x.ndim\n if ndim_new < 0 or any(\n new != old for new, old in zip(shape[ndim_new:], x.shape) if old != 1\n ):\n raise ValueError(\"cannot broadcast shape %s to shape %s\" % (x.shape, shape))\n\n if chunks is None:\n chunks = tuple((s,) for s in shape[:ndim_new]) + tuple(\n bd if old > 1 else (new,)\n for bd, old, new in zip(x.chunks, x.shape, shape[ndim_new:])\n )\n else:\n chunks = normalize_chunks(\n chunks, shape, dtype=x.dtype, previous_chunks=x.chunks\n )\n for old_bd, new_bd in zip(x.chunks, chunks[ndim_new:]):\n if old_bd != new_bd and old_bd != (1,):\n raise ValueError(\n \"cannot broadcast chunks %s to chunks %s: \"\n \"new chunks must either be along a new \"\n \"dimension or a dimension of size 1\" % (x.chunks, chunks)\n )\n\n name = \"broadcast_to-\" + tokenize(x, shape, chunks)\n dsk = {}\n\n enumerated_chunks = product(*(enumerate(bds) for bds in chunks))\n for new_index, chunk_shape in (zip(*ec) for ec in enumerated_chunks):\n old_index = tuple(\n 0 if bd == (1,) else i for bd, i in zip(x.chunks, new_index[ndim_new:])\n )\n old_key = (x.name,) + old_index\n new_key = (name,) + new_index\n dsk[new_key] = (np.broadcast_to, old_key, quote(chunk_shape))\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[x])\n return Array(graph, name, chunks, dtype=x.dtype)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_broadcast_arrays_broadcast_arrays.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_broadcast_arrays_broadcast_arrays.return.result", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4184, "end_line": 4204, "span_ids": ["broadcast_arrays"], "tokens": 167}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef broadcast_arrays(*args, **kwargs):\n subok = bool(kwargs.pop(\"subok\", False))\n\n to_array = asanyarray if subok else asarray\n args = tuple(to_array(e) for e in args)\n\n if kwargs:\n raise TypeError(\"unsupported keyword argument(s) provided\")\n\n # Unify uneven chunking\n inds = [list(reversed(range(x.ndim))) for x in args]\n uc_args = concat(zip(args, inds))\n _, args = unify_chunks(*uc_args, warn=False)\n\n shape = broadcast_shapes(*(e.shape for e in args))\n chunks = broadcast_chunks(*(e.chunks for e in args))\n\n result = [broadcast_to(e, shape=shape, chunks=chunks) for e in args]\n\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_offset_func_offset_func.return._offset": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_offset_func_offset_func.return._offset", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4207, "end_line": 4225, "span_ids": ["offset_func"], "tokens": 112}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def offset_func(func, offset, *args):\n \"\"\"Offsets inputs by offset\n\n >>> double = lambda x: x * 2\n >>> f = offset_func(double, (10,))\n >>> f(1)\n 22\n >>> f(300)\n 620\n \"\"\"\n\n def _offset(*args):\n args2 = list(map(add, args, offset))\n return func(*args2)\n\n with ignoring(Exception):\n _offset.__name__ = \"offset_\" + func.__name__\n\n return _offset", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_chunks_from_arrays_chunks_from_arrays.return.tuple_result_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_chunks_from_arrays_chunks_from_arrays.return.tuple_result_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4228, "end_line": 4261, "span_ids": ["chunks_from_arrays"], "tokens": 222}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def chunks_from_arrays(arrays):\n \"\"\"Chunks tuple from nested list of arrays\n\n >>> x = np.array([1, 2])\n >>> chunks_from_arrays([x, x])\n ((2, 2),)\n\n >>> x = np.array([[1, 2]])\n >>> chunks_from_arrays([[x], [x]])\n ((1, 1), (2,))\n\n >>> x = np.array([[1, 2]])\n >>> chunks_from_arrays([[x, x]])\n ((1,), (2, 2))\n\n >>> chunks_from_arrays([1, 1])\n ((1, 1),)\n \"\"\"\n if not arrays:\n return ()\n result = []\n dim = 0\n\n def shape(x):\n try:\n return x.shape\n except AttributeError:\n return (1,)\n\n while isinstance(arrays, (list, tuple)):\n result.append(tuple([shape(deepfirst(a))[dim] for a in arrays]))\n arrays = arrays[0]\n dim += 1\n return tuple(result)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_deepfirst_reshapelist.if_len_shape_1_.else_.return._reshapelist_shape_1_p": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_deepfirst_reshapelist.if_len_shape_1_.else_.return._reshapelist_shape_1_p", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4264, "end_line": 4294, "span_ids": ["reshapelist", "shapelist", "deepfirst"], "tokens": 223}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def deepfirst(seq):\n \"\"\"First element in a nested list\n\n >>> deepfirst([[[1, 2], [3, 4]], [5, 6], [7, 8]])\n 1\n \"\"\"\n if not isinstance(seq, (list, tuple)):\n return seq\n else:\n return deepfirst(seq[0])\n\n\ndef shapelist(a):\n \"\"\" Get the shape of nested list \"\"\"\n if type(a) is list:\n return tuple([len(a)] + list(shapelist(a[0])))\n else:\n return ()\n\n\ndef reshapelist(shape, seq):\n \"\"\"Reshape iterator to nested shape\n\n >>> reshapelist((2, 3), range(6))\n [[0, 1, 2], [3, 4, 5]]\n \"\"\"\n if len(shape) == 1:\n return list(seq)\n else:\n n = int(len(seq) / shape[0])\n return [reshapelist(shape[1:], part) for part in partition(n, seq)]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_transposelist_transposelist.return.reshapelist_newshape_res": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_transposelist_transposelist.return.reshapelist_newshape_res", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4297, "end_line": 4320, "span_ids": ["transposelist"], "tokens": 256}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def transposelist(arrays, axes, extradims=0):\n \"\"\"Permute axes of nested list\n\n >>> transposelist([[1,1,1],[1,1,1]], [2,1])\n [[[1, 1], [1, 1], [1, 1]]]\n\n >>> transposelist([[1,1,1],[1,1,1]], [2,1], extradims=1)\n [[[[1], [1]], [[1], [1]], [[1], [1]]]]\n \"\"\"\n if len(axes) != ndimlist(arrays):\n raise ValueError(\"Length of axes should equal depth of nested arrays\")\n if extradims < 0:\n raise ValueError(\"`newdims` should be positive\")\n if len(axes) > len(set(axes)):\n raise ValueError(\"`axes` should be unique\")\n\n ndim = max(axes) + 1\n shape = shapelist(arrays)\n newshape = [\n shape[axes.index(i)] if i in axes else 1 for i in range(ndim + extradims)\n ]\n\n result = list(core.flatten(arrays))\n return reshapelist(newshape, result)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_stack_stack.keys.list_product_name_ra": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_stack_stack.keys.list_product_name_ra", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4439, "end_line": 4531, "span_ids": ["stack"], "tokens": 766}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def stack(seq, axis=0, allow_unknown_chunksizes=False):\n \"\"\"\n Stack arrays along a new axis\n\n Given a sequence of dask arrays, form a new dask array by stacking them\n along a new dimension (axis=0 by default)\n\n Parameters\n ----------\n seq: list of dask.arrays\n axis: int\n Dimension along which to align all of the arrays\n allow_unknown_chunksizes: bool\n Allow unknown chunksizes, such as come from converting from dask\n dataframes. Dask.array is unable to verify that chunks line up. If\n data comes from differently aligned sources then this can cause\n unexpected results.\n\n Examples\n --------\n\n Create slices\n\n >>> import dask.array as da\n >>> import numpy as np\n\n >>> data = [da.from_array(np.ones((4, 4)), chunks=(2, 2))\n ... for i in range(3)]\n\n >>> x = da.stack(data, axis=0)\n >>> x.shape\n (3, 4, 4)\n\n >>> da.stack(data, axis=1).shape\n (4, 3, 4)\n\n >>> da.stack(data, axis=-1).shape\n (4, 4, 3)\n\n Result is a new dask Array\n\n See Also\n --------\n concatenate\n \"\"\"\n from . import wrap\n\n seq = [asarray(a) for a in seq]\n\n if not seq:\n raise ValueError(\"Need array(s) to stack\")\n if not allow_unknown_chunksizes and not all(x.shape == seq[0].shape for x in seq):\n idx = first(i for i in enumerate(seq) if i[1].shape != seq[0].shape)\n raise ValueError(\n \"Stacked arrays must have the same shape. \"\n \"The first array had shape {0}, while array \"\n \"{1} has shape {2}.\".format(seq[0].shape, idx[0] + 1, idx[1].shape)\n )\n\n meta = np.stack([meta_from_array(a) for a in seq], axis=axis)\n seq = [x.astype(meta.dtype) for x in seq]\n\n ndim = meta.ndim - 1\n if axis < 0:\n axis = ndim + axis + 1\n shape = tuple(\n len(seq)\n if i == axis\n else (seq[0].shape[i] if i < axis else seq[0].shape[i - 1])\n for i in range(meta.ndim)\n )\n\n seq2 = [a for a in seq if a.size]\n if not seq2:\n seq2 = seq\n\n n = len(seq2)\n if n == 0:\n try:\n return wrap.empty_like(meta, shape=shape, chunks=shape, dtype=meta.dtype)\n except TypeError:\n return wrap.empty(shape, chunks=shape, dtype=meta.dtype)\n\n ind = list(range(ndim))\n uc_args = list(concat((x, ind) for x in seq2))\n _, seq2 = unify_chunks(*uc_args)\n\n assert len(set(a.chunks for a in seq2)) == 1 # same chunks\n chunks = seq2[0].chunks[:axis] + ((1,) * n,) + seq2[0].chunks[axis:]\n\n names = [a.name for a in seq2]\n name = \"stack-\" + tokenize(names, axis)\n keys = list(product([name], *[range(len(bd)) for bd in chunks]))\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_stack.inputs_stack.return.Array_graph_name_chunks": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_stack.inputs_stack.return.Array_graph_name_chunks", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4417, "end_line": 4434, "span_ids": ["stack"], "tokens": 156}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def stack(seq, axis=0, allow_unknown_chunksizes=False):\n # ... other code\n\n inputs = [\n (names[key[axis + 1]],) + key[1 : axis + 1] + key[axis + 2 :] for key in keys\n ]\n values = [\n (\n getitem,\n inp,\n (slice(None, None, None),) * axis\n + (None,)\n + (slice(None, None, None),) * (ndim - axis),\n )\n for inp in inputs\n ]\n\n layer = dict(zip(keys, values))\n graph = HighLevelGraph.from_collections(name, layer, dependencies=seq2)\n\n return Array(graph, name, chunks, meta=meta)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_concatenate3_concatenate3.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_concatenate3_concatenate3.return.result", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4437, "end_line": 4504, "span_ids": ["concatenate3"], "tokens": 543}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def concatenate3(arrays):\n \"\"\"Recursive np.concatenate\n\n Input should be a nested list of numpy arrays arranged in the order they\n should appear in the array itself. Each array should have the same number\n of dimensions as the desired output and the nesting of the lists.\n\n >>> x = np.array([[1, 2]])\n >>> concatenate3([[x, x, x], [x, x, x]])\n array([[1, 2, 1, 2, 1, 2],\n [1, 2, 1, 2, 1, 2]])\n\n >>> concatenate3([[x, x], [x, x], [x, x]])\n array([[1, 2, 1, 2],\n [1, 2, 1, 2],\n [1, 2, 1, 2]])\n \"\"\"\n from .utils import IS_NEP18_ACTIVE\n\n # We need this as __array_function__ may not exist on older NumPy versions.\n # And to reduce verbosity.\n NDARRAY_ARRAY_FUNCTION = getattr(np.ndarray, \"__array_function__\", None)\n\n arrays = concrete(arrays)\n if not arrays:\n return np.empty(0)\n\n advanced = max(\n core.flatten(arrays, container=(list, tuple)),\n key=lambda x: getattr(x, \"__array_priority__\", 0),\n )\n\n if IS_NEP18_ACTIVE and not all(\n NDARRAY_ARRAY_FUNCTION\n is getattr(arr, \"__array_function__\", NDARRAY_ARRAY_FUNCTION)\n for arr in arrays\n ):\n try:\n x = unpack_singleton(arrays)\n return _concatenate2(arrays, axes=tuple(range(x.ndim)))\n except TypeError:\n pass\n\n if concatenate_lookup.dispatch(type(advanced)) is not np.concatenate:\n x = unpack_singleton(arrays)\n return _concatenate2(arrays, axes=list(range(x.ndim)))\n\n ndim = ndimlist(arrays)\n if not ndim:\n return arrays\n chunks = chunks_from_arrays(arrays)\n shape = tuple(map(sum, chunks))\n\n def dtype(x):\n try:\n return x.dtype\n except AttributeError:\n return type(x)\n\n result = np.empty(shape=shape, dtype=dtype(deepfirst(arrays)))\n\n for (idx, arr) in zip(slices_from_chunks(chunks), core.flatten(arrays)):\n if hasattr(arr, \"ndim\"):\n while arr.ndim < ndim:\n arr = arr[None, ...]\n result[idx] = arr\n\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_concatenate_axes_to_hdf5.with_h5py_File_filename_.store_list_data_values_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_concatenate_axes_to_hdf5.with_h5py_File_filename_.store_list_data_values_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4507, "end_line": 4563, "span_ids": ["concatenate_axes", "to_hdf5"], "tokens": 491}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def concatenate_axes(arrays, axes):\n \"\"\" Recursively call np.concatenate along axes \"\"\"\n if len(axes) != ndimlist(arrays):\n raise ValueError(\"Length of axes should equal depth of nested arrays\")\n\n extradims = max(0, deepfirst(arrays).ndim - (max(axes) + 1))\n return concatenate3(transposelist(arrays, axes, extradims=extradims))\n\n\ndef to_hdf5(filename, *args, **kwargs):\n \"\"\"Store arrays in HDF5 file\n\n This saves several dask arrays into several datapaths in an HDF5 file.\n It creates the necessary datasets and handles clean file opening/closing.\n\n >>> da.to_hdf5('myfile.hdf5', '/x', x) # doctest: +SKIP\n\n or\n\n >>> da.to_hdf5('myfile.hdf5', {'/x': x, '/y': y}) # doctest: +SKIP\n\n Optionally provide arguments as though to ``h5py.File.create_dataset``\n\n >>> da.to_hdf5('myfile.hdf5', '/x', x, compression='lzf', shuffle=True) # doctest: +SKIP\n\n This can also be used as a method on a single Array\n\n >>> x.to_hdf5('myfile.hdf5', '/x') # doctest: +SKIP\n\n See Also\n --------\n da.store\n h5py.File.create_dataset\n \"\"\"\n if len(args) == 1 and isinstance(args[0], dict):\n data = args[0]\n elif len(args) == 2 and isinstance(args[0], str) and isinstance(args[1], Array):\n data = {args[0]: args[1]}\n else:\n raise ValueError(\"Please provide {'/data/path': array} dictionary\")\n\n chunks = kwargs.pop(\"chunks\", True)\n\n import h5py\n\n with h5py.File(filename, mode=\"a\") as f:\n dsets = [\n f.require_dataset(\n dp,\n shape=x.shape,\n dtype=x.dtype,\n chunks=tuple([c[0] for c in x.chunks]) if chunks is True else chunks,\n **kwargs,\n )\n for dp, x in data.items()\n ]\n store(list(data.values()), dsets)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_interleave_none_keyname.return._name_i_tuple_k_for_k": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_interleave_none_keyname.return._name_i_tuple_k_for_k", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4566, "end_line": 4592, "span_ids": ["interleave_none", "keyname"], "tokens": 192}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def interleave_none(a, b):\n \"\"\"\n\n >>> interleave_none([0, None, 2, None], [1, 3])\n (0, 1, 2, 3)\n \"\"\"\n result = []\n i = j = 0\n n = len(a) + len(b)\n while i + j < n:\n if a[i] is not None:\n result.append(a[i])\n i += 1\n else:\n result.append(b[j])\n i += 1\n j += 1\n return tuple(result)\n\n\ndef keyname(name, i, okey):\n \"\"\"\n\n >>> keyname('x', 3, [None, None, 0, 2])\n ('x', 3, 0, 2)\n \"\"\"\n return (name, i) + tuple(k for k in okey if k is not None)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py__vindex__vindex.return.x": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py__vindex__vindex.return.x", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4595, "end_line": 4649, "span_ids": ["_vindex"], "tokens": 570}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _vindex(x, *indexes):\n \"\"\"Point wise indexing with broadcasting.\n\n >>> x = np.arange(56).reshape((7, 8))\n >>> x\n array([[ 0, 1, 2, 3, 4, 5, 6, 7],\n [ 8, 9, 10, 11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20, 21, 22, 23],\n [24, 25, 26, 27, 28, 29, 30, 31],\n [32, 33, 34, 35, 36, 37, 38, 39],\n [40, 41, 42, 43, 44, 45, 46, 47],\n [48, 49, 50, 51, 52, 53, 54, 55]])\n\n >>> d = from_array(x, chunks=(3, 4))\n >>> result = _vindex(d, [0, 1, 6, 0], [0, 1, 0, 7])\n >>> result.compute()\n array([ 0, 9, 48, 7])\n \"\"\"\n indexes = replace_ellipsis(x.ndim, indexes)\n\n nonfancy_indexes = []\n reduced_indexes = []\n for i, ind in enumerate(indexes):\n if isinstance(ind, Number):\n nonfancy_indexes.append(ind)\n elif isinstance(ind, slice):\n nonfancy_indexes.append(ind)\n reduced_indexes.append(slice(None))\n else:\n nonfancy_indexes.append(slice(None))\n reduced_indexes.append(ind)\n\n nonfancy_indexes = tuple(nonfancy_indexes)\n reduced_indexes = tuple(reduced_indexes)\n\n x = x[nonfancy_indexes]\n\n array_indexes = {}\n for i, (ind, size) in enumerate(zip(reduced_indexes, x.shape)):\n if not isinstance(ind, slice):\n ind = np.array(ind, copy=True)\n if ind.dtype.kind == \"b\":\n raise IndexError(\"vindex does not support indexing with boolean arrays\")\n if ((ind >= size) | (ind < -size)).any():\n raise IndexError(\n \"vindex key has entries out of bounds for \"\n \"indexing along axis %s of size %s: %r\" % (i, size, ind)\n )\n ind %= size\n array_indexes[i] = ind\n\n if array_indexes:\n x = _vindex_array(x, array_indexes)\n\n return x", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py__vindex_array__vindex_array.return.result_1d_reshape_broadca": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py__vindex_array__vindex_array.return.result_1d_reshape_broadca", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4768, "end_line": 4861, "span_ids": ["_vindex_array"], "tokens": 847}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _vindex_array(x, dict_indexes):\n \"\"\"Point wise indexing with only NumPy Arrays.\"\"\"\n\n try:\n broadcast_indexes = np.broadcast_arrays(*dict_indexes.values())\n except ValueError as e:\n # note: error message exactly matches numpy\n shapes_str = \" \".join(str(a.shape) for a in dict_indexes.values())\n raise IndexError(\n \"shape mismatch: indexing arrays could not be \"\n \"broadcast together with shapes \" + shapes_str\n ) from e\n broadcast_shape = broadcast_indexes[0].shape\n\n lookup = dict(zip(dict_indexes, broadcast_indexes))\n flat_indexes = [\n lookup[i].ravel().tolist() if i in lookup else None for i in range(x.ndim)\n ]\n flat_indexes.extend([None] * (x.ndim - len(flat_indexes)))\n\n flat_indexes = [\n list(index) if index is not None else index for index in flat_indexes\n ]\n bounds = [list(accumulate(add, (0,) + c)) for c in x.chunks]\n bounds2 = [b for i, b in zip(flat_indexes, bounds) if i is not None]\n axis = _get_axis(flat_indexes)\n token = tokenize(x, flat_indexes)\n out_name = \"vindex-merge-\" + token\n\n points = list()\n for i, idx in enumerate(zip(*[i for i in flat_indexes if i is not None])):\n block_idx = [bisect(b, ind) - 1 for b, ind in zip(bounds2, idx)]\n inblock_idx = [\n ind - bounds2[k][j] for k, (ind, j) in enumerate(zip(idx, block_idx))\n ]\n points.append((i, tuple(block_idx), tuple(inblock_idx)))\n\n chunks = [c for i, c in zip(flat_indexes, x.chunks) if i is None]\n chunks.insert(0, (len(points),) if points else (0,))\n chunks = tuple(chunks)\n\n if points:\n per_block = groupby(1, points)\n per_block = dict((k, v) for k, v in per_block.items() if v)\n\n other_blocks = list(\n product(\n *[\n list(range(len(c))) if i is None else [None]\n for i, c in zip(flat_indexes, x.chunks)\n ]\n )\n )\n\n full_slices = [slice(None, None) if i is None else None for i in flat_indexes]\n\n name = \"vindex-slice-\" + token\n vindex_merge_name = \"vindex-merge-\" + token\n dsk = {}\n for okey in other_blocks:\n for i, key in enumerate(per_block):\n dsk[keyname(name, i, okey)] = (\n _vindex_transpose,\n (\n _vindex_slice,\n (x.name,) + interleave_none(okey, key),\n interleave_none(\n full_slices, list(zip(*pluck(2, per_block[key])))\n ),\n ),\n axis,\n )\n dsk[keyname(vindex_merge_name, 0, okey)] = (\n _vindex_merge,\n [list(pluck(0, per_block[key])) for key in per_block],\n [keyname(name, i, okey) for i in range(len(per_block))],\n )\n\n result_1d = Array(\n HighLevelGraph.from_collections(out_name, dsk, dependencies=[x]),\n out_name,\n chunks,\n x.dtype,\n )\n return result_1d.reshape(broadcast_shape + result_1d.shape[1:])\n\n # output has a zero dimension, just create a new zero-shape array with the\n # same dtype\n from .wrap import empty\n\n result_1d = empty(\n tuple(map(sum, chunks)), chunks=chunks, dtype=x.dtype, name=out_name\n )\n return result_1d.reshape(broadcast_shape + result_1d.shape[1:])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py__get_axis__vindex_transpose.return.block_transpose_axes_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py__get_axis__vindex_transpose.return.block_transpose_axes_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4750, "end_line": 4779, "span_ids": ["_get_axis", "_vindex_slice", "_vindex_transpose"], "tokens": 287}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _get_axis(indexes):\n \"\"\"Get axis along which point-wise slicing results lie\n\n This is mostly a hack because I can't figure out NumPy's rule on this and\n can't be bothered to go reading.\n\n >>> _get_axis([[1, 2], None, [1, 2], None])\n 0\n >>> _get_axis([None, [1, 2], [1, 2], None])\n 1\n >>> _get_axis([None, None, [1, 2], [1, 2]])\n 2\n \"\"\"\n ndim = len(indexes)\n indexes = [slice(None, None) if i is None else [0] for i in indexes]\n x = np.empty((2,) * ndim)\n x2 = x[tuple(indexes)]\n return x2.shape.index(1)\n\n\ndef _vindex_slice(block, points):\n \"\"\" Pull out point-wise slices from block \"\"\"\n points = [p if isinstance(p, slice) else list(p) for p in points]\n return block[tuple(points)]\n\n\ndef _vindex_transpose(block, axis):\n \"\"\" Rotate block so that points are on the first dimension \"\"\"\n axes = [axis] + list(range(axis)) + list(range(axis + 1, block.ndim))\n return block.transpose(axes)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py__vindex_merge__vindex_merge.return.x": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py__vindex_merge__vindex_merge.return.x", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4782, "end_line": 4812, "span_ids": ["_vindex_merge"], "tokens": 226}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _vindex_merge(locations, values):\n \"\"\"\n\n >>> locations = [0], [2, 1]\n >>> values = [np.array([[1, 2, 3]]),\n ... np.array([[10, 20, 30], [40, 50, 60]])]\n\n >>> _vindex_merge(locations, values)\n array([[ 1, 2, 3],\n [40, 50, 60],\n [10, 20, 30]])\n \"\"\"\n locations = list(map(list, locations))\n values = list(values)\n\n n = sum(map(len, locations))\n\n shape = list(values[0].shape)\n shape[0] = n\n shape = tuple(shape)\n\n dtype = values[0].dtype\n\n x = np.empty(shape, dtype=dtype)\n\n ind = [slice(None, None) for i in range(x.ndim)]\n for loc, val in zip(locations, values):\n ind[0] = loc\n x[tuple(ind)] = val\n\n return x", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_to_npy_stack_to_npy_stack.compute_as_if_collection_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_to_npy_stack_to_npy_stack.compute_as_if_collection_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4815, "end_line": 4864, "span_ids": ["to_npy_stack"], "tokens": 453}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def to_npy_stack(dirname, x, axis=0):\n \"\"\"Write dask array to a stack of .npy files\n\n This partitions the dask.array along one axis and stores each block along\n that axis as a single .npy file in the specified directory\n\n Examples\n --------\n >>> x = da.ones((5, 10, 10), chunks=(2, 4, 4)) # doctest: +SKIP\n >>> da.to_npy_stack('data/', x, axis=0) # doctest: +SKIP\n\n The ``.npy`` files store numpy arrays for ``x[0:2], x[2:4], and x[4:5]``\n respectively, as is specified by the chunk size along the zeroth axis::\n\n $ tree data/\n data/\n |-- 0.npy\n |-- 1.npy\n |-- 2.npy\n |-- info\n\n The ``info`` file stores the dtype, chunks, and axis information of the array.\n You can load these stacks with the ``da.from_npy_stack`` function.\n\n >>> y = da.from_npy_stack('data/') # doctest: +SKIP\n\n See Also\n --------\n from_npy_stack\n \"\"\"\n\n chunks = tuple((c if i == axis else (sum(c),)) for i, c in enumerate(x.chunks))\n xx = x.rechunk(chunks)\n\n if not os.path.exists(dirname):\n os.mkdir(dirname)\n\n meta = {\"chunks\": chunks, \"dtype\": x.dtype, \"axis\": axis}\n\n with open(os.path.join(dirname, \"info\"), \"wb\") as f:\n pickle.dump(meta, f)\n\n name = \"to-npy-stack-\" + str(uuid.uuid1())\n dsk = {\n (name, i): (np.save, os.path.join(dirname, \"%d.npy\" % i), key)\n for i, key in enumerate(core.flatten(xx.__dask_keys__()))\n }\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[xx])\n compute_as_if_collection(Array, graph, list(dsk))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_from_collections_abc_impo_AxisError": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_from_collections_abc_impo_AxisError", "embedding": null, "metadata": {"file_path": "dask/array/creation.py", "file_name": "creation.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 27, "span_ids": ["imports"], "tokens": 149}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from collections.abc import Sequence\nfrom functools import partial, reduce\nfrom itertools import product\nfrom operator import add, getitem\nfrom numbers import Integral, Number\n\nimport numpy as np\nfrom tlz import accumulate, sliding_window\n\nfrom ..highlevelgraph import HighLevelGraph\nfrom ..base import tokenize\nfrom ..utils import derived_from\nfrom . import chunk\nfrom .core import (\n Array,\n asarray,\n normalize_chunks,\n stack,\n concatenate,\n block,\n broadcast_to,\n broadcast_arrays,\n cached_cumsum,\n)\nfrom .ufunc import rint\nfrom .wrap import empty, ones, zeros, full\nfrom .utils import AxisError, meta_from_array, zeros_like_safe", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_full_like__get_like_function_shapes_chunks.return.shape_chunks": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_full_like__get_like_function_shapes_chunks.return.shape_chunks", "embedding": null, "metadata": {"file_path": "dask/array/creation.py", "file_name": "creation.py", "file_type": "text/x-python", "category": "implementation", "start_line": 184, "end_line": 249, "span_ids": ["_get_like_function_shapes_chunks", "full_like"], "tokens": 491}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def full_like(a, fill_value, order=\"C\", dtype=None, chunks=None, name=None, shape=None):\n \"\"\"\n Return a full array with the same shape and type as a given array.\n\n Parameters\n ----------\n a : array_like\n The shape and data-type of `a` define these same attributes of\n the returned array.\n fill_value : scalar\n Fill value.\n dtype : data-type, optional\n Overrides the data type of the result.\n order : {'C', 'F'}, optional\n Whether to store multidimensional data in C- or Fortran-contiguous\n (row- or column-wise) order in memory.\n chunks : sequence of ints\n The number of samples on each block. Note that the last block will have\n fewer samples if ``len(array) % chunks != 0``.\n name : str, optional\n An optional keyname for the array. Defaults to hashing the input\n keyword arguments.\n shape : int or sequence of ints, optional.\n Overrides the shape of the result.\n\n Returns\n -------\n out : ndarray\n Array of `fill_value` with the same shape and type as `a`.\n\n See Also\n --------\n zeros_like : Return an array of zeros with shape and type of input.\n ones_like : Return an array of ones with shape and type of input.\n empty_like : Return an empty array with shape and type of input.\n zeros : Return a new array setting values to zero.\n ones : Return a new array setting values to one.\n empty : Return a new uninitialized array.\n full : Fill a new array.\n \"\"\"\n\n a = asarray(a, name=False)\n shape, chunks = _get_like_function_shapes_chunks(a, chunks, shape)\n return full(\n shape,\n fill_value,\n dtype=(dtype or a.dtype),\n order=order,\n chunks=chunks,\n name=name,\n meta=a._meta,\n )\n\n\ndef _get_like_function_shapes_chunks(a, chunks, shape):\n \"\"\"\n Helper function for finding shapes and chunks for *_like()\n array creation functions.\n \"\"\"\n if shape is None:\n shape = a.shape\n if chunks is None:\n chunks = a.chunks\n elif chunks is None:\n chunks = \"auto\"\n return shape, chunks", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_linspace_linspace.if_retstep_.else_.return.Array_dsk_name_chunks_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_linspace_linspace.if_retstep_.else_.return.Array_dsk_name_chunks_", "embedding": null, "metadata": {"file_path": "dask/array/creation.py", "file_name": "creation.py", "file_type": "text/x-python", "category": "implementation", "start_line": 230, "end_line": 301, "span_ids": ["linspace"], "tokens": 511}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def linspace(\n start, stop, num=50, endpoint=True, retstep=False, chunks=\"auto\", dtype=None\n):\n \"\"\"\n Return `num` evenly spaced values over the closed interval [`start`,\n `stop`].\n\n Parameters\n ----------\n start : scalar\n The starting value of the sequence.\n stop : scalar\n The last value of the sequence.\n num : int, optional\n Number of samples to include in the returned dask array, including the\n endpoints. Default is 50.\n endpoint : bool, optional\n If True, ``stop`` is the last sample. Otherwise, it is not included.\n Default is True.\n retstep : bool, optional\n If True, return (samples, step), where step is the spacing between\n samples. Default is False.\n chunks : int\n The number of samples on each block. Note that the last block will have\n fewer samples if `num % blocksize != 0`\n dtype : dtype, optional\n The type of the output array.\n\n Returns\n -------\n samples : dask array\n step : float, optional\n Only returned if ``retstep`` is True. Size of spacing between samples.\n\n\n See Also\n --------\n dask.array.arange\n \"\"\"\n num = int(num)\n\n if dtype is None:\n dtype = np.linspace(0, 1, 1).dtype\n\n chunks = normalize_chunks(chunks, (num,), dtype=dtype)\n\n range_ = stop - start\n\n div = (num - 1) if endpoint else num\n step = float(range_) / div\n\n name = \"linspace-\" + tokenize((start, stop, num, endpoint, chunks, dtype))\n\n dsk = {}\n blockstart = start\n\n for i, bs in enumerate(chunks[0]):\n bs_space = bs - 1 if endpoint else bs\n blockstop = blockstart + (bs_space * step)\n task = (\n partial(np.linspace, endpoint=endpoint, dtype=dtype),\n blockstart,\n blockstop,\n bs,\n )\n blockstart = blockstart + (step * bs)\n dsk[(name, i)] = task\n\n if retstep:\n return Array(dsk, name, chunks, dtype=dtype), step\n else:\n return Array(dsk, name, chunks, dtype=dtype)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_arange_arange.return.Array_dsk_name_chunks_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_arange_arange.return.Array_dsk_name_chunks_", "embedding": null, "metadata": {"file_path": "dask/array/creation.py", "file_name": "creation.py", "file_type": "text/x-python", "category": "implementation", "start_line": 304, "end_line": 379, "span_ids": ["arange"], "tokens": 597}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def arange(*args, **kwargs):\n \"\"\"\n Return evenly spaced values from `start` to `stop` with step size `step`.\n\n The values are half-open [start, stop), so including start and excluding\n stop. This is basically the same as python's range function but for dask\n arrays.\n\n When using a non-integer step, such as 0.1, the results will often not be\n consistent. It is better to use linspace for these cases.\n\n Parameters\n ----------\n start : int, optional\n The starting value of the sequence. The default is 0.\n stop : int\n The end of the interval, this value is excluded from the interval.\n step : int, optional\n The spacing between the values. The default is 1 when not specified.\n The last value of the sequence.\n chunks : int\n The number of samples on each block. Note that the last block will have\n fewer samples if ``len(array) % chunks != 0``.\n dtype : numpy.dtype\n Output dtype. Omit to infer it from start, stop, step\n\n Returns\n -------\n samples : dask array\n\n See Also\n --------\n dask.array.linspace\n \"\"\"\n if len(args) == 1:\n start = 0\n stop = args[0]\n step = 1\n elif len(args) == 2:\n start = args[0]\n stop = args[1]\n step = 1\n elif len(args) == 3:\n start, stop, step = args\n else:\n raise TypeError(\n \"\"\"\n arange takes 3 positional arguments: arange([start], stop, [step])\n \"\"\"\n )\n\n chunks = kwargs.pop(\"chunks\", \"auto\")\n\n num = int(max(np.ceil((stop - start) / step), 0))\n\n dtype = kwargs.pop(\"dtype\", None)\n if dtype is None:\n dtype = np.arange(start, stop, step * num if num else step).dtype\n\n chunks = normalize_chunks(chunks, (num,), dtype=dtype)\n\n if kwargs:\n raise TypeError(\"Unexpected keyword argument(s): %s\" % \",\".join(kwargs.keys()))\n\n name = \"arange-\" + tokenize((start, stop, step, chunks, dtype))\n dsk = {}\n elem_count = 0\n\n for i, bs in enumerate(chunks[0]):\n blockstart = start + (elem_count * step)\n blockstop = start + ((elem_count + bs) * step)\n task = (chunk.arange, blockstart, blockstop, step, bs, dtype)\n dsk[(name, i)] = task\n elem_count += bs\n\n return Array(dsk, name, chunks, dtype=dtype)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_meshgrid_meshgrid.return.grid": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_meshgrid_meshgrid.return.grid", "embedding": null, "metadata": {"file_path": "dask/array/creation.py", "file_name": "creation.py", "file_type": "text/x-python", "category": "implementation", "start_line": 382, "end_line": 418, "span_ids": ["meshgrid"], "tokens": 255}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef meshgrid(*xi, **kwargs):\n indexing = kwargs.pop(\"indexing\", \"xy\")\n sparse = bool(kwargs.pop(\"sparse\", False))\n\n if \"copy\" in kwargs:\n raise NotImplementedError(\"`copy` not supported\")\n\n if kwargs:\n raise TypeError(\"unsupported keyword argument(s) provided\")\n\n if indexing not in (\"ij\", \"xy\"):\n raise ValueError(\"`indexing` must be `'ij'` or `'xy'`\")\n\n xi = [asarray(e) for e in xi]\n xi = [e.flatten() for e in xi]\n\n if indexing == \"xy\" and len(xi) > 1:\n xi[0], xi[1] = xi[1], xi[0]\n\n grid = []\n for i in range(len(xi)):\n s = len(xi) * [None]\n s[i] = slice(None)\n s = tuple(s)\n\n r = xi[i][s]\n\n grid.append(r)\n\n if not sparse:\n grid = broadcast_arrays(*grid)\n\n if indexing == \"xy\" and len(xi) > 1:\n grid[0], grid[1] = grid[1], grid[0]\n\n return grid", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_indices_indices.return.grid": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_indices_indices.return.grid", "embedding": null, "metadata": {"file_path": "dask/array/creation.py", "file_name": "creation.py", "file_type": "text/x-python", "category": "implementation", "start_line": 421, "end_line": 472, "span_ids": ["indices"], "tokens": 411}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def indices(dimensions, dtype=int, chunks=\"auto\"):\n \"\"\"\n Implements NumPy's ``indices`` for Dask Arrays.\n\n Generates a grid of indices covering the dimensions provided.\n\n The final array has the shape ``(len(dimensions), *dimensions)``. The\n chunks are used to specify the chunking for axis 1 up to\n ``len(dimensions)``. The 0th axis always has chunks of length 1.\n\n Parameters\n ----------\n dimensions : sequence of ints\n The shape of the index grid.\n dtype : dtype, optional\n Type to use for the array. Default is ``int``.\n chunks : sequence of ints, str\n The size of each block. Must be one of the following forms:\n\n - A blocksize like (500, 1000)\n - A size in bytes, like \"100 MiB\" which will choose a uniform\n block-like shape\n - The word \"auto\" which acts like the above, but uses a configuration\n value ``array.chunk-size`` for the chunk size\n\n Note that the last block will have fewer samples if ``len(array) % chunks != 0``.\n\n Returns\n -------\n grid : dask array\n \"\"\"\n dimensions = tuple(dimensions)\n dtype = np.dtype(dtype)\n chunks = normalize_chunks(chunks, shape=dimensions, dtype=dtype)\n\n if len(dimensions) != len(chunks):\n raise ValueError(\"Need same number of chunks as dimensions.\")\n\n xi = []\n for i in range(len(dimensions)):\n xi.append(arange(dimensions[i], dtype=dtype, chunks=(chunks[i],)))\n\n grid = []\n if np.prod(dimensions):\n grid = meshgrid(*xi, indexing=\"ij\")\n\n if grid:\n grid = stack(grid)\n else:\n grid = empty((len(dimensions),) + dimensions, dtype=dtype, chunks=(1,) + chunks)\n\n return grid", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_eye_eye.return.Array_eye_name_eye_shap": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_eye_eye.return.Array_eye_name_eye_shap", "embedding": null, "metadata": {"file_path": "dask/array/creation.py", "file_name": "creation.py", "file_type": "text/x-python", "category": "implementation", "start_line": 475, "end_line": 537, "span_ids": ["eye"], "tokens": 571}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def eye(N, chunks=\"auto\", M=None, k=0, dtype=float):\n \"\"\"\n Return a 2-D Array with ones on the diagonal and zeros elsewhere.\n\n Parameters\n ----------\n N : int\n Number of rows in the output.\n chunks : int, str\n How to chunk the array. Must be one of the following forms:\n\n - A blocksize like 1000.\n - A size in bytes, like \"100 MiB\" which will choose a uniform\n block-like shape\n - The word \"auto\" which acts like the above, but uses a configuration\n value ``array.chunk-size`` for the chunk size\n M : int, optional\n Number of columns in the output. If None, defaults to `N`.\n k : int, optional\n Index of the diagonal: 0 (the default) refers to the main diagonal,\n a positive value refers to an upper diagonal, and a negative value\n to a lower diagonal.\n dtype : data-type, optional\n Data-type of the returned array.\n\n Returns\n -------\n I : Array of shape (N,M)\n An array where all elements are equal to zero, except for the `k`-th\n diagonal, whose values are equal to one.\n \"\"\"\n eye = {}\n if M is None:\n M = N\n\n if not isinstance(chunks, (int, str)):\n raise ValueError(\"chunks must be an int or string\")\n elif isinstance(chunks, str):\n chunks = normalize_chunks(chunks, shape=(N, M), dtype=dtype)\n chunks = chunks[0][0]\n token = tokenize(N, chunks, M, k, dtype)\n name_eye = \"eye-\" + token\n\n vchunks = [chunks] * (N // chunks)\n if N % chunks != 0:\n vchunks.append(N % chunks)\n hchunks = [chunks] * (M // chunks)\n if M % chunks != 0:\n hchunks.append(M % chunks)\n\n for i, vchunk in enumerate(vchunks):\n for j, hchunk in enumerate(hchunks):\n if (j - i - 1) * chunks <= k <= (j - i + 1) * chunks:\n eye[name_eye, i, j] = (\n np.eye,\n vchunk,\n hchunk,\n k - (j - i) * chunks,\n dtype,\n )\n else:\n eye[name_eye, i, j] = (np.zeros, (vchunk, hchunk), dtype)\n return Array(eye, name_eye, shape=(N, M), chunks=(chunks, chunks), dtype=dtype)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_diag_diag.return.Array_graph_name_chunk": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_diag_diag.return.Array_graph_name_chunk", "embedding": null, "metadata": {"file_path": "dask/array/creation.py", "file_name": "creation.py", "file_type": "text/x-python", "category": "implementation", "start_line": 540, "end_line": 586, "span_ids": ["diag"], "tokens": 486}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef diag(v):\n name = \"diag-\" + tokenize(v)\n\n meta = meta_from_array(v, 2 if v.ndim == 1 else 1)\n\n if isinstance(v, np.ndarray) or (\n hasattr(v, \"__array_function__\") and not isinstance(v, Array)\n ):\n if v.ndim == 1:\n chunks = ((v.shape[0],), (v.shape[0],))\n dsk = {(name, 0, 0): (np.diag, v)}\n elif v.ndim == 2:\n chunks = ((min(v.shape),),)\n dsk = {(name, 0): (np.diag, v)}\n else:\n raise ValueError(\"Array must be 1d or 2d only\")\n return Array(dsk, name, chunks, meta=meta)\n if not isinstance(v, Array):\n raise TypeError(\n \"v must be a dask array or numpy array, got {0}\".format(type(v))\n )\n if v.ndim != 1:\n if v.chunks[0] == v.chunks[1]:\n dsk = {\n (name, i): (np.diag, row[i]) for i, row in enumerate(v.__dask_keys__())\n }\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[v])\n return Array(graph, name, (v.chunks[0],), meta=meta)\n else:\n raise NotImplementedError(\n \"Extracting diagonals from non-square chunked arrays\"\n )\n chunks_1d = v.chunks[0]\n blocks = v.__dask_keys__()\n dsk = {}\n for i, m in enumerate(chunks_1d):\n for j, n in enumerate(chunks_1d):\n key = (name, i, j)\n if i == j:\n dsk[key] = (np.diag, blocks[i])\n else:\n dsk[key] = (np.zeros, (m, n))\n dsk[key] = (partial(zeros_like_safe, shape=(m, n)), meta)\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[v])\n return Array(graph, name, (chunks_1d, chunks_1d), meta=meta)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_diagonal_diagonal.return.Array_graph_name_shape_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_diagonal_diagonal.return.Array_graph_name_shape_", "embedding": null, "metadata": {"file_path": "dask/array/creation.py", "file_name": "creation.py", "file_type": "text/x-python", "category": "implementation", "start_line": 589, "end_line": 654, "span_ids": ["diagonal"], "tokens": 724}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef diagonal(a, offset=0, axis1=0, axis2=1):\n name = \"diagonal-\" + tokenize(a, offset, axis1, axis2)\n\n if a.ndim < 2:\n # NumPy uses `diag` as we do here.\n raise ValueError(\"diag requires an array of at least two dimensions\")\n\n def _axis_fmt(axis, name, ndim):\n if axis < 0:\n t = ndim + axis\n if t < 0:\n msg = \"{}: axis {} is out of bounds for array of dimension {}\"\n raise AxisError(msg.format(name, axis, ndim))\n axis = t\n return axis\n\n axis1 = _axis_fmt(axis1, \"axis1\", a.ndim)\n axis2 = _axis_fmt(axis2, \"axis2\", a.ndim)\n\n if axis1 == axis2:\n raise ValueError(\"axis1 and axis2 cannot be the same\")\n\n a = asarray(a)\n\n if axis1 > axis2:\n axis1, axis2 = axis2, axis1\n offset = -offset\n\n def _diag_len(dim1, dim2, offset):\n return max(0, min(min(dim1, dim2), dim1 + offset, dim2 - offset))\n\n diag_chunks = []\n chunk_offsets = []\n cum1 = cached_cumsum(a.chunks[axis1], initial_zero=True)[:-1]\n cum2 = cached_cumsum(a.chunks[axis2], initial_zero=True)[:-1]\n for co1, c1 in zip(cum1, a.chunks[axis1]):\n chunk_offsets.append([])\n for co2, c2 in zip(cum2, a.chunks[axis2]):\n k = offset + co1 - co2\n diag_chunks.append(_diag_len(c1, c2, k))\n chunk_offsets[-1].append(k)\n\n dsk = {}\n idx_set = set(range(a.ndim)) - set([axis1, axis2])\n n1 = len(a.chunks[axis1])\n n2 = len(a.chunks[axis2])\n for idx in product(*(range(len(a.chunks[i])) for i in idx_set)):\n for i, (i1, i2) in enumerate(product(range(n1), range(n2))):\n tsk = reduce(getitem, idx[:axis1], a.__dask_keys__())[i1]\n tsk = reduce(getitem, idx[axis1 : axis2 - 1], tsk)[i2]\n tsk = reduce(getitem, idx[axis2 - 1 :], tsk)\n k = chunk_offsets[i1][i2]\n dsk[(name,) + idx + (i,)] = (np.diagonal, tsk, k, axis1, axis2)\n\n left_shape = tuple(a.shape[i] for i in idx_set)\n right_shape = (_diag_len(a.shape[axis1], a.shape[axis2], offset),)\n shape = left_shape + right_shape\n\n left_chunks = tuple(a.chunks[i] for i in idx_set)\n right_shape = (tuple(diag_chunks),)\n chunks = left_chunks + right_shape\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[a])\n meta = meta_from_array(a, len(shape))\n return Array(graph, name, shape=shape, chunks=chunks, meta=meta)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_triu_triu.return.Array_graph_name_shape_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_triu_triu.return.Array_graph_name_shape_", "embedding": null, "metadata": {"file_path": "dask/array/creation.py", "file_name": "creation.py", "file_type": "text/x-python", "category": "implementation", "start_line": 657, "end_line": 712, "span_ids": ["triu"], "tokens": 511}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def triu(m, k=0):\n \"\"\"\n Upper triangle of an array with elements below the `k`-th diagonal zeroed.\n\n Parameters\n ----------\n m : array_like, shape (M, N)\n Input array.\n k : int, optional\n Diagonal below which to zero elements. `k = 0` (the default) is the\n main diagonal, `k < 0` is below it and `k > 0` is above.\n\n Returns\n -------\n triu : ndarray, shape (M, N)\n Upper triangle of `m`, of same shape and data-type as `m`.\n\n See Also\n --------\n tril : lower triangle of an array\n \"\"\"\n if m.ndim != 2:\n raise ValueError(\"input must be 2 dimensional\")\n if m.chunks[0][0] != m.chunks[1][0]:\n msg = (\n \"chunks must be a square. \"\n \"Use .rechunk method to change the size of chunks.\"\n )\n raise NotImplementedError(msg)\n\n rdim = len(m.chunks[0])\n hdim = len(m.chunks[1])\n chunk = m.chunks[0][0]\n\n token = tokenize(m, k)\n name = \"triu-\" + token\n\n triu_is_empty = True\n dsk = {}\n for i in range(rdim):\n for j in range(hdim):\n if chunk * (j - i + 1) < k:\n dsk[(name, i, j)] = (\n partial(zeros_like_safe, shape=(m.chunks[0][i], m.chunks[1][j])),\n m._meta,\n )\n elif chunk * (j - i - 1) < k <= chunk * (j - i + 1):\n dsk[(name, i, j)] = (np.triu, (m.name, i, j), k - (chunk * (j - i)))\n triu_is_empty = False\n else:\n dsk[(name, i, j)] = (m.name, i, j)\n triu_is_empty = False\n graph = HighLevelGraph.from_collections(\n name, dsk, dependencies=[] if triu_is_empty else [m]\n )\n return Array(graph, name, shape=m.shape, chunks=m.chunks, meta=m)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_tril_tril.return.Array_graph_name_shape_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_tril_tril.return.Array_graph_name_shape_", "embedding": null, "metadata": {"file_path": "dask/array/creation.py", "file_name": "creation.py", "file_type": "text/x-python", "category": "implementation", "start_line": 715, "end_line": 770, "span_ids": ["tril"], "tokens": 520}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def tril(m, k=0):\n \"\"\"\n Lower triangle of an array with elements above the `k`-th diagonal zeroed.\n\n Parameters\n ----------\n m : array_like, shape (M, M)\n Input array.\n k : int, optional\n Diagonal above which to zero elements. `k = 0` (the default) is the\n main diagonal, `k < 0` is below it and `k > 0` is above.\n\n Returns\n -------\n tril : ndarray, shape (M, M)\n Lower triangle of `m`, of same shape and data-type as `m`.\n\n See Also\n --------\n triu : upper triangle of an array\n \"\"\"\n if m.ndim != 2:\n raise ValueError(\"input must be 2 dimensional\")\n if not len(set(m.chunks[0] + m.chunks[1])) == 1:\n msg = (\n \"All chunks must be a square matrix to perform lu decomposition. \"\n \"Use .rechunk method to change the size of chunks.\"\n )\n raise ValueError(msg)\n\n rdim = len(m.chunks[0])\n hdim = len(m.chunks[1])\n chunk = m.chunks[0][0]\n\n token = tokenize(m, k)\n name = \"tril-\" + token\n\n tril_is_empty = True\n dsk = {}\n for i in range(rdim):\n for j in range(hdim):\n if chunk * (j - i + 1) < k:\n dsk[(name, i, j)] = (m.name, i, j)\n tril_is_empty = False\n elif chunk * (j - i - 1) < k <= chunk * (j - i + 1):\n dsk[(name, i, j)] = (np.tril, (m.name, i, j), k - (chunk * (j - i)))\n tril_is_empty = False\n else:\n dsk[(name, i, j)] = (\n partial(zeros_like_safe, shape=(m.chunks[0][i], m.chunks[1][j])),\n m._meta,\n )\n graph = HighLevelGraph.from_collections(\n name, dsk, dependencies=[] if tril_is_empty else [m]\n )\n return Array(graph, name, shape=m.shape, chunks=m.chunks, meta=m)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py__np_fromfunction_fromfunction.return.Array_dsk_name_chunks_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py__np_fromfunction_fromfunction.return.Array_dsk_name_chunks_", "embedding": null, "metadata": {"file_path": "dask/array/creation.py", "file_name": "creation.py", "file_type": "text/x-python", "category": "implementation", "start_line": 773, "end_line": 803, "span_ids": ["_np_fromfunction", "fromfunction"], "tokens": 258}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _np_fromfunction(func, shape, dtype, offset, func_kwargs):\n def offset_func(*args, **kwargs):\n args2 = list(map(add, args, offset))\n return func(*args2, **kwargs)\n\n return np.fromfunction(offset_func, shape, dtype=dtype, **func_kwargs)\n\n\n@derived_from(np)\ndef fromfunction(func, chunks=\"auto\", shape=None, dtype=None, **kwargs):\n chunks = normalize_chunks(chunks, shape, dtype=dtype)\n name = \"fromfunction-\" + tokenize(func, chunks, shape, dtype, kwargs)\n keys = product([name], *(range(len(bd)) for bd in chunks))\n\n def accumulate_gen(chunks):\n for bd in chunks:\n yield accumulate(add, (0,) + bd[:-1])\n\n aggdims = accumulate_gen(chunks)\n offsets = product(*aggdims)\n shapes = product(*chunks)\n dtype = dtype or float\n\n values = [\n (_np_fromfunction, func, shp, dtype, offset, kwargs)\n for offset, shp in zip(offsets, shapes)\n ]\n\n dsk = dict(zip(keys, values))\n\n return Array(dsk, name, chunks, dtype=dtype)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_repeat_repeat.return.concatenate_out_axis_axi": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_repeat_repeat.return.concatenate_out_axis_axi", "embedding": null, "metadata": {"file_path": "dask/array/creation.py", "file_name": "creation.py", "file_type": "text/x-python", "category": "implementation", "start_line": 806, "end_line": 853, "span_ids": ["repeat"], "tokens": 401}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef repeat(a, repeats, axis=None):\n if axis is None:\n if a.ndim == 1:\n axis = 0\n else:\n raise NotImplementedError(\"Must supply an integer axis value\")\n\n if not isinstance(repeats, Integral):\n raise NotImplementedError(\"Only integer valued repeats supported\")\n\n if -a.ndim <= axis < 0:\n axis += a.ndim\n elif not 0 <= axis <= a.ndim - 1:\n raise ValueError(\"axis(=%d) out of bounds\" % axis)\n\n if repeats == 0:\n return a[tuple(slice(None) if d != axis else slice(0) for d in range(a.ndim))]\n elif repeats == 1:\n return a\n\n cchunks = cached_cumsum(a.chunks[axis], initial_zero=True)\n slices = []\n for c_start, c_stop in sliding_window(2, cchunks):\n ls = np.linspace(c_start, c_stop, repeats).round(0)\n for ls_start, ls_stop in sliding_window(2, ls):\n if ls_start != ls_stop:\n slices.append(slice(ls_start, ls_stop))\n\n all_slice = slice(None, None, None)\n slices = [\n (all_slice,) * axis + (s,) + (all_slice,) * (a.ndim - axis - 1) for s in slices\n ]\n\n slabs = [a[slc] for slc in slices]\n\n out = []\n for slab in slabs:\n chunks = list(slab.chunks)\n assert len(chunks[axis]) == 1\n chunks[axis] = (chunks[axis][0] * repeats,)\n chunks = tuple(chunks)\n result = slab.map_blocks(\n np.repeat, repeats, axis=axis, chunks=chunks, dtype=slab.dtype\n )\n out.append(result)\n\n return concatenate(out, axis=axis)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_tile_tile.return.empty_shape_shape_out_dt": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_tile_tile.return.empty_shape_shape_out_dt", "embedding": null, "metadata": {"file_path": "dask/array/creation.py", "file_name": "creation.py", "file_type": "text/x-python", "category": "implementation", "start_line": 856, "end_line": 879, "span_ids": ["tile"], "tokens": 186}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef tile(A, reps):\n try:\n tup = tuple(reps)\n except TypeError:\n tup = (reps,)\n if any(i < 0 for i in tup):\n raise ValueError(\"Negative `reps` are not allowed.\")\n c = asarray(A)\n\n if all(tup):\n for nrep in tup[::-1]:\n c = nrep * [c]\n return block(c)\n\n d = len(tup)\n if d < c.ndim:\n tup = (1,) * (c.ndim - d) + tup\n if c.ndim < d:\n shape = (1,) * (d - c.ndim) + c.shape\n else:\n shape = c.shape\n shape_out = tuple(s * t for s, t in zip(shape, tup))\n return empty(shape=shape_out, dtype=c.dtype)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_expand_pad_value_expand_pad_value.return.pad_value": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_expand_pad_value_expand_pad_value.return.pad_value", "embedding": null, "metadata": {"file_path": "dask/array/creation.py", "file_name": "creation.py", "file_type": "text/x-python", "category": "implementation", "start_line": 882, "end_line": 916, "span_ids": ["expand_pad_value"], "tokens": 326}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def expand_pad_value(array, pad_value):\n if isinstance(pad_value, Number):\n pad_value = array.ndim * ((pad_value, pad_value),)\n elif (\n isinstance(pad_value, Sequence)\n and all(isinstance(pw, Number) for pw in pad_value)\n and len(pad_value) == 1\n ):\n pad_value = array.ndim * ((pad_value[0], pad_value[0]),)\n elif (\n isinstance(pad_value, Sequence)\n and len(pad_value) == 2\n and all(isinstance(pw, Number) for pw in pad_value)\n ):\n pad_value = array.ndim * (tuple(pad_value),)\n elif (\n isinstance(pad_value, Sequence)\n and len(pad_value) == array.ndim\n and all(isinstance(pw, Sequence) for pw in pad_value)\n and all((len(pw) == 2) for pw in pad_value)\n and all(all(isinstance(w, Number) for w in pw) for pw in pad_value)\n ):\n pad_value = tuple(tuple(pw) for pw in pad_value)\n elif (\n isinstance(pad_value, Sequence)\n and len(pad_value) == 1\n and isinstance(pad_value[0], Sequence)\n and len(pad_value[0]) == 2\n and all(isinstance(pw, Number) for pw in pad_value[0])\n ):\n pad_value = array.ndim * (tuple(pad_value[0]),)\n else:\n raise TypeError(\"`pad_value` must be composed of integral typed values.\")\n\n return pad_value", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_get_pad_shapes_chunks_get_pad_shapes_chunks.return.pad_shapes_pad_chunks": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_get_pad_shapes_chunks_get_pad_shapes_chunks.return.pad_shapes_pad_chunks", "embedding": null, "metadata": {"file_path": "dask/array/creation.py", "file_name": "creation.py", "file_type": "text/x-python", "category": "implementation", "start_line": 919, "end_line": 935, "span_ids": ["get_pad_shapes_chunks"], "tokens": 135}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def get_pad_shapes_chunks(array, pad_width, axes):\n \"\"\"\n Helper function for finding shapes and chunks of end pads.\n \"\"\"\n\n pad_shapes = [list(array.shape), list(array.shape)]\n pad_chunks = [list(array.chunks), list(array.chunks)]\n\n for d in axes:\n for i in range(2):\n pad_shapes[i][d] = pad_width[d][i]\n pad_chunks[i][d] = (pad_width[d][i],)\n\n pad_shapes = [tuple(s) for s in pad_shapes]\n pad_chunks = [tuple(c) for c in pad_chunks]\n\n return pad_shapes, pad_chunks", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_linear_ramp_chunk_linear_ramp_chunk.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_linear_ramp_chunk_linear_ramp_chunk.return.result", "embedding": null, "metadata": {"file_path": "dask/array/creation.py", "file_name": "creation.py", "file_type": "text/x-python", "category": "implementation", "start_line": 938, "end_line": 959, "span_ids": ["linear_ramp_chunk"], "tokens": 137}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def linear_ramp_chunk(start, stop, num, dim, step):\n \"\"\"\n Helper function to find the linear ramp for a chunk.\n \"\"\"\n\n num1 = num + 1\n\n shape = list(start.shape)\n shape[dim] = num\n shape = tuple(shape)\n\n dtype = np.dtype(start.dtype)\n\n result = np.empty(shape, dtype=dtype)\n for i in np.ndindex(start.shape):\n j = list(i)\n j[dim] = slice(None)\n j = tuple(j)\n\n result[j] = np.linspace(start[i], stop, num1, dtype=dtype)[1:][::step]\n\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_pad_edge_pad_edge.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_pad_edge_pad_edge.return.result", "embedding": null, "metadata": {"file_path": "dask/array/creation.py", "file_name": "creation.py", "file_type": "text/x-python", "category": "implementation", "start_line": 962, "end_line": 1022, "span_ids": ["pad_edge"], "tokens": 473}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def pad_edge(array, pad_width, mode, **kwargs):\n \"\"\"\n Helper function for padding edges.\n\n Handles the cases where the only the values on the edge are needed.\n \"\"\"\n\n kwargs = {k: expand_pad_value(array, v) for k, v in kwargs.items()}\n\n result = array\n for d in range(array.ndim):\n pad_shapes, pad_chunks = get_pad_shapes_chunks(result, pad_width, (d,))\n pad_arrays = [result, result]\n\n if mode == \"constant\":\n constant_values = kwargs[\"constant_values\"][d]\n constant_values = [asarray(c).astype(result.dtype) for c in constant_values]\n\n pad_arrays = [\n broadcast_to(v, s, c)\n for v, s, c in zip(constant_values, pad_shapes, pad_chunks)\n ]\n elif mode in [\"edge\", \"linear_ramp\"]:\n pad_slices = [result.ndim * [slice(None)], result.ndim * [slice(None)]]\n pad_slices[0][d] = slice(None, 1, None)\n pad_slices[1][d] = slice(-1, None, None)\n pad_slices = [tuple(sl) for sl in pad_slices]\n\n pad_arrays = [result[sl] for sl in pad_slices]\n\n if mode == \"edge\":\n pad_arrays = [\n broadcast_to(a, s, c)\n for a, s, c in zip(pad_arrays, pad_shapes, pad_chunks)\n ]\n elif mode == \"linear_ramp\":\n end_values = kwargs[\"end_values\"][d]\n\n pad_arrays = [\n a.map_blocks(\n linear_ramp_chunk,\n ev,\n pw,\n chunks=c,\n dtype=result.dtype,\n dim=d,\n step=(2 * i - 1),\n )\n for i, (a, ev, pw, c) in enumerate(\n zip(pad_arrays, end_values, pad_width[d], pad_chunks)\n )\n ]\n elif mode == \"empty\":\n pad_arrays = [\n empty(s, dtype=array.dtype, chunks=c)\n for s, c in zip(pad_shapes, pad_chunks)\n ]\n\n result = concatenate([pad_arrays[0], result, pad_arrays[1]], axis=d)\n\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_pad_reuse_pad_reuse.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_pad_reuse_pad_reuse.return.result", "embedding": null, "metadata": {"file_path": "dask/array/creation.py", "file_name": "creation.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1025, "end_line": 1079, "span_ids": ["pad_reuse"], "tokens": 408}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def pad_reuse(array, pad_width, mode, **kwargs):\n \"\"\"\n Helper function for padding boundaries with values in the array.\n\n Handles the cases where the padding is constructed from values in\n the array. Namely by reflecting them or tiling them to create periodic\n boundary constraints.\n \"\"\"\n\n if mode in {\"reflect\", \"symmetric\"}:\n reflect_type = kwargs.get(\"reflect\", \"even\")\n if reflect_type == \"odd\":\n raise NotImplementedError(\"`pad` does not support `reflect_type` of `odd`.\")\n if reflect_type != \"even\":\n raise ValueError(\n \"unsupported value for reflect_type, must be one of (`even`, `odd`)\"\n )\n\n result = np.empty(array.ndim * (3,), dtype=object)\n for idx in np.ndindex(result.shape):\n select = []\n orient = []\n for i, s, pw in zip(idx, array.shape, pad_width):\n if mode == \"wrap\":\n pw = pw[::-1]\n\n if i < 1:\n if mode == \"reflect\":\n select.append(slice(1, pw[0] + 1, None))\n else:\n select.append(slice(None, pw[0], None))\n elif i > 1:\n if mode == \"reflect\":\n select.append(slice(s - pw[1] - 1, s - 1, None))\n else:\n select.append(slice(s - pw[1], None, None))\n else:\n select.append(slice(None))\n\n if i != 1 and mode in [\"reflect\", \"symmetric\"]:\n orient.append(slice(None, None, -1))\n else:\n orient.append(slice(None))\n\n select = tuple(select)\n orient = tuple(orient)\n\n if mode == \"wrap\":\n idx = tuple(2 - i for i in idx)\n\n result[idx] = array[select][orient]\n\n result = block(result.tolist())\n\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_pad_stats_pad_stats.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_pad_stats_pad_stats.return.result", "embedding": null, "metadata": {"file_path": "dask/array/creation.py", "file_name": "creation.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1082, "end_line": 1145, "span_ids": ["pad_stats"], "tokens": 471}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def pad_stats(array, pad_width, mode, stat_length):\n \"\"\"\n Helper function for padding boundaries with statistics from the array.\n\n In cases where the padding requires computations of statistics from part\n or all of the array, this function helps compute those statistics as\n requested and then adds those statistics onto the boundaries of the array.\n \"\"\"\n\n if mode == \"median\":\n raise NotImplementedError(\"`pad` does not support `mode` of `median`.\")\n\n stat_length = expand_pad_value(array, stat_length)\n\n result = np.empty(array.ndim * (3,), dtype=object)\n for idx in np.ndindex(result.shape):\n axes = []\n select = []\n pad_shape = []\n pad_chunks = []\n for d, (i, s, c, w, l) in enumerate(\n zip(idx, array.shape, array.chunks, pad_width, stat_length)\n ):\n if i < 1:\n axes.append(d)\n select.append(slice(None, l[0], None))\n pad_shape.append(w[0])\n pad_chunks.append(w[0])\n elif i > 1:\n axes.append(d)\n select.append(slice(s - l[1], None, None))\n pad_shape.append(w[1])\n pad_chunks.append(w[1])\n else:\n select.append(slice(None))\n pad_shape.append(s)\n pad_chunks.append(c)\n\n axes = tuple(axes)\n select = tuple(select)\n pad_shape = tuple(pad_shape)\n pad_chunks = tuple(pad_chunks)\n\n result_idx = array[select]\n if axes:\n if mode == \"maximum\":\n result_idx = result_idx.max(axis=axes, keepdims=True)\n elif mode == \"mean\":\n result_idx = result_idx.mean(axis=axes, keepdims=True)\n elif mode == \"minimum\":\n result_idx = result_idx.min(axis=axes, keepdims=True)\n\n result_idx = broadcast_to(result_idx, pad_shape, chunks=pad_chunks)\n\n if mode == \"mean\":\n if np.issubdtype(array.dtype, np.integer):\n result_idx = rint(result_idx)\n result_idx = result_idx.astype(array.dtype)\n\n result[idx] = result_idx\n\n result = block(result.tolist())\n\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_wrapped_pad_func_pad_udf.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_wrapped_pad_func_pad_udf.return.result", "embedding": null, "metadata": {"file_path": "dask/array/creation.py", "file_name": "creation.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1148, "end_line": 1187, "span_ids": ["pad_udf", "wrapped_pad_func"], "tokens": 306}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def wrapped_pad_func(array, pad_func, iaxis_pad_width, iaxis, pad_func_kwargs):\n result = np.empty_like(array)\n for i in np.ndindex(array.shape[:iaxis] + array.shape[iaxis + 1 :]):\n i = i[:iaxis] + (slice(None),) + i[iaxis:]\n result[i] = pad_func(array[i], iaxis_pad_width, iaxis, pad_func_kwargs)\n\n return result\n\n\ndef pad_udf(array, pad_width, mode, **kwargs):\n \"\"\"\n Helper function for padding boundaries with a user defined function.\n\n In cases where the padding requires a custom user defined function be\n applied to the array, this function assists in the prepping and\n application of this function to the Dask Array to construct the desired\n boundaries.\n \"\"\"\n\n result = pad_edge(array, pad_width, \"constant\", constant_values=0)\n\n chunks = result.chunks\n for d in range(result.ndim):\n result = result.rechunk(\n chunks[:d] + (result.shape[d : d + 1],) + chunks[d + 1 :]\n )\n\n result = result.map_blocks(\n wrapped_pad_func,\n name=\"pad\",\n dtype=result.dtype,\n pad_func=mode,\n iaxis_pad_width=pad_width[d],\n iaxis=d,\n pad_func_kwargs=kwargs,\n )\n\n result = result.rechunk(chunks)\n\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_pad_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_pad_", "embedding": null, "metadata": {"file_path": "dask/array/creation.py", "file_name": "creation.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1190, "end_line": 1239, "span_ids": ["pad"], "tokens": 409}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef pad(array, pad_width, mode=\"constant\", **kwargs):\n array = asarray(array)\n\n pad_width = expand_pad_value(array, pad_width)\n\n if callable(mode):\n return pad_udf(array, pad_width, mode, **kwargs)\n\n # Make sure that no unsupported keywords were passed for the current mode\n allowed_kwargs = {\n \"empty\": [],\n \"edge\": [],\n \"wrap\": [],\n \"constant\": [\"constant_values\"],\n \"linear_ramp\": [\"end_values\"],\n \"maximum\": [\"stat_length\"],\n \"mean\": [\"stat_length\"],\n \"median\": [\"stat_length\"],\n \"minimum\": [\"stat_length\"],\n \"reflect\": [\"reflect_type\"],\n \"symmetric\": [\"reflect_type\"],\n }\n try:\n unsupported_kwargs = set(kwargs) - set(allowed_kwargs[mode])\n except KeyError as e:\n raise ValueError(\"mode '{}' is not supported\".format(mode)) from e\n if unsupported_kwargs:\n raise ValueError(\n \"unsupported keyword arguments for mode '{}': {}\".format(\n mode, unsupported_kwargs\n )\n )\n\n if mode in {\"maximum\", \"mean\", \"median\", \"minimum\"}:\n stat_length = kwargs.get(\"stat_length\", tuple((n, n) for n in array.shape))\n return pad_stats(array, pad_width, mode, stat_length)\n elif mode == \"constant\":\n kwargs.setdefault(\"constant_values\", 0)\n return pad_edge(array, pad_width, mode, **kwargs)\n elif mode == \"linear_ramp\":\n kwargs.setdefault(\"end_values\", 0)\n return pad_edge(array, pad_width, mode, **kwargs)\n elif mode in {\"edge\", \"empty\"}:\n return pad_edge(array, pad_width, mode)\n elif mode in [\"reflect\", \"symmetric\", \"wrap\"]:\n return pad_reuse(array, pad_width, mode, **kwargs)\n\n assert False, \"unreachable\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/einsumfuncs.py_np_chunk_einsum.return.chunk_reshape_chunk_shape": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/einsumfuncs.py_np_chunk_einsum.return.chunk_reshape_chunk_shape", "embedding": null, "metadata": {"file_path": "dask/array/einsumfuncs.py", "file_name": "einsumfuncs.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 20, "span_ids": ["imports", "chunk_einsum"], "tokens": 162}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import numpy as np\nfrom numpy.compat import basestring\n\nfrom .core import blockwise, asarray, einsum_lookup\nfrom ..utils import derived_from\n\neinsum_symbols = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\"\neinsum_symbols_set = set(einsum_symbols)\n\n\ndef chunk_einsum(*operands, **kwargs):\n subscripts = kwargs.pop(\"subscripts\")\n ncontract_inds = kwargs.pop(\"ncontract_inds\")\n dtype = kwargs.pop(\"kernel_dtype\")\n einsum = einsum_lookup.dispatch(type(operands[0]))\n chunk = einsum(subscripts, *operands, dtype=dtype, **kwargs)\n\n # Avoid concatenate=True in blockwise by adding 1's\n # for the contracted dimensions\n return chunk.reshape(chunk.shape + (1,) * ncontract_inds)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/einsumfuncs.py__This_function_duplicate_parse_einsum_input._Parse_ellipses": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/einsumfuncs.py__This_function_duplicate_parse_einsum_input._Parse_ellipses", "embedding": null, "metadata": {"file_path": "dask/array/einsumfuncs.py", "file_name": "einsumfuncs.py", "file_type": "text/x-python", "category": "implementation", "start_line": 23, "end_line": 109, "span_ids": ["chunk_einsum", "parse_einsum_input"], "tokens": 690}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# This function duplicates numpy's _parse_einsum_input() function\ndef parse_einsum_input(operands):\n \"\"\"\n A reproduction of numpy's _parse_einsum_input()\n which in itself is a reproduction of\n c side einsum parsing in python.\n\n Returns\n -------\n input_strings : str\n Parsed input strings\n output_string : str\n Parsed output string\n operands : list of array_like\n The operands to use in the numpy contraction\n Examples\n --------\n The operand list is simplified to reduce printing:\n >> a = np.random.rand(4, 4)\n >> b = np.random.rand(4, 4, 4)\n >> __parse_einsum_input(('...a,...a->...', a, b))\n ('za,xza', 'xz', [a, b])\n >> __parse_einsum_input((a, [Ellipsis, 0], b, [Ellipsis, 0]))\n ('za,xza', 'xz', [a, b])\n \"\"\"\n\n if len(operands) == 0:\n raise ValueError(\"No input operands\")\n\n if isinstance(operands[0], basestring):\n subscripts = operands[0].replace(\" \", \"\")\n operands = [asarray(o) for o in operands[1:]]\n\n # Ensure all characters are valid\n for s in subscripts:\n if s in \".,->\":\n continue\n if s not in einsum_symbols_set:\n raise ValueError(\"Character %s is not a valid symbol.\" % s)\n\n else:\n tmp_operands = list(operands)\n operand_list = []\n subscript_list = []\n for p in range(len(operands) // 2):\n operand_list.append(tmp_operands.pop(0))\n subscript_list.append(tmp_operands.pop(0))\n\n output_list = tmp_operands[-1] if len(tmp_operands) else None\n operands = [asarray(v) for v in operand_list]\n subscripts = \"\"\n last = len(subscript_list) - 1\n for num, sub in enumerate(subscript_list):\n for s in sub:\n if s is Ellipsis:\n subscripts += \"...\"\n elif isinstance(s, int):\n subscripts += einsum_symbols[s]\n else:\n raise TypeError(\n \"For this input type lists must contain \"\n \"either int or Ellipsis\"\n )\n if num != last:\n subscripts += \",\"\n\n if output_list is not None:\n subscripts += \"->\"\n for s in output_list:\n if s is Ellipsis:\n subscripts += \"...\"\n elif isinstance(s, int):\n subscripts += einsum_symbols[s]\n else:\n raise TypeError(\n \"For this input type lists must contain \"\n \"either int or Ellipsis\"\n )\n # Check for proper \"->\"\n if (\"-\" in subscripts) or (\">\" in subscripts):\n invalid = (subscripts.count(\"-\") > 1) or (subscripts.count(\">\") > 1)\n if invalid or (subscripts.count(\"->\") != 1):\n raise ValueError(\"Subscripts can only contain one '->'.\")\n\n # Parse ellipses\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/einsumfuncs.py_parse_einsum_input.if_in_subscripts__parse_einsum_input.return._input_subscripts_output": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/einsumfuncs.py_parse_einsum_input.if_in_subscripts__parse_einsum_input.return._input_subscripts_output", "embedding": null, "metadata": {"file_path": "dask/array/einsumfuncs.py", "file_name": "einsumfuncs.py", "file_type": "text/x-python", "category": "implementation", "start_line": 110, "end_line": 193, "span_ids": ["parse_einsum_input"], "tokens": 710}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def parse_einsum_input(operands):\n # ... other code\n if \".\" in subscripts:\n used = subscripts.replace(\".\", \"\").replace(\",\", \"\").replace(\"->\", \"\")\n unused = list(einsum_symbols_set - set(used))\n ellipse_inds = \"\".join(unused)\n longest = 0\n\n if \"->\" in subscripts:\n input_tmp, output_sub = subscripts.split(\"->\")\n split_subscripts = input_tmp.split(\",\")\n out_sub = True\n else:\n split_subscripts = subscripts.split(\",\")\n out_sub = False\n\n for num, sub in enumerate(split_subscripts):\n if \".\" in sub:\n if (sub.count(\".\") != 3) or (sub.count(\"...\") != 1):\n raise ValueError(\"Invalid Ellipses.\")\n\n # Take into account numerical values\n if operands[num].shape == ():\n ellipse_count = 0\n else:\n ellipse_count = max(operands[num].ndim, 1)\n ellipse_count -= len(sub) - 3\n\n if ellipse_count > longest:\n longest = ellipse_count\n\n if ellipse_count < 0:\n raise ValueError(\"Ellipses lengths do not match.\")\n elif ellipse_count == 0:\n split_subscripts[num] = sub.replace(\"...\", \"\")\n else:\n rep_inds = ellipse_inds[-ellipse_count:]\n split_subscripts[num] = sub.replace(\"...\", rep_inds)\n\n subscripts = \",\".join(split_subscripts)\n if longest == 0:\n out_ellipse = \"\"\n else:\n out_ellipse = ellipse_inds[-longest:]\n\n if out_sub:\n subscripts += \"->\" + output_sub.replace(\"...\", out_ellipse)\n else:\n # Special care for outputless ellipses\n output_subscript = \"\"\n tmp_subscripts = subscripts.replace(\",\", \"\")\n for s in sorted(set(tmp_subscripts)):\n if s not in einsum_symbols_set:\n raise ValueError(\"Character %s is not a valid symbol.\" % s)\n if tmp_subscripts.count(s) == 1:\n output_subscript += s\n normal_inds = \"\".join(sorted(set(output_subscript) - set(out_ellipse)))\n\n subscripts += \"->\" + out_ellipse + normal_inds\n\n # Build output string if does not exist\n if \"->\" in subscripts:\n input_subscripts, output_subscript = subscripts.split(\"->\")\n else:\n input_subscripts = subscripts\n # Build output subscripts\n tmp_subscripts = subscripts.replace(\",\", \"\")\n output_subscript = \"\"\n for s in sorted(set(tmp_subscripts)):\n if s not in einsum_symbols_set:\n raise ValueError(\"Character %s is not a valid symbol.\" % s)\n if tmp_subscripts.count(s) == 1:\n output_subscript += s\n\n # Make sure output subscripts are in the input\n for char in output_subscript:\n if char not in input_subscripts:\n raise ValueError(\"Output character %s did not appear in the input\" % char)\n\n # Make sure number operands is equivalent to the number of terms\n if len(input_subscripts.split(\",\")) != len(operands):\n raise ValueError(\n \"Number of einsum subscripts must be equal to the number of operands.\"\n )\n\n return (input_subscripts, output_subscript, operands)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/einsumfuncs.py_einsum_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/einsumfuncs.py_einsum_", "embedding": null, "metadata": {"file_path": "dask/array/einsumfuncs.py", "file_name": "einsumfuncs.py", "file_type": "text/x-python", "category": "implementation", "start_line": 196, "end_line": 252, "span_ids": ["einsum"], "tokens": 439}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef einsum(*operands, **kwargs):\n dtype = kwargs.pop(\"dtype\", None)\n optimize = kwargs.pop(\"optimize\", False)\n split_every = kwargs.pop(\"split_every\", None)\n\n einsum_dtype = dtype\n\n inputs, outputs, ops = parse_einsum_input(operands)\n subscripts = \"->\".join((inputs, outputs))\n\n # Infer the output dtype from operands\n if dtype is None:\n dtype = np.result_type(*[o.dtype for o in ops])\n\n if optimize is not False:\n # Avoid computation of dask arrays within np.einsum_path\n # by passing in small numpy arrays broadcasted\n # up to the right shape\n fake_ops = [np.broadcast_to(o.dtype.type(0), shape=o.shape) for o in ops]\n optimize, _ = np.einsum_path(subscripts, *fake_ops, optimize=optimize)\n\n inputs = [tuple(i) for i in inputs.split(\",\")]\n\n # Set of all indices\n all_inds = set(a for i in inputs for a in i)\n\n # Which indices are contracted?\n contract_inds = all_inds - set(outputs)\n ncontract_inds = len(contract_inds)\n\n # Introduce the contracted indices into the blockwise product\n # so that we get numpy arrays, not lists\n result = blockwise(\n chunk_einsum,\n tuple(outputs) + tuple(contract_inds),\n *(a for ap in zip(ops, inputs) for a in ap),\n # blockwise parameters\n adjust_chunks={ind: 1 for ind in contract_inds},\n dtype=dtype,\n # np.einsum parameters\n subscripts=subscripts,\n kernel_dtype=einsum_dtype,\n ncontract_inds=ncontract_inds,\n optimize=optimize,\n **kwargs\n )\n\n # Now reduce over any extra contraction dimensions\n if ncontract_inds > 0:\n size = len(outputs)\n return result.sum(\n axis=list(range(size, size + ncontract_inds)), split_every=split_every\n )\n\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/fft.py_inspect__hfft_out_chunks.return.chunks": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/fft.py_inspect__hfft_out_chunks.return.chunks", "embedding": null, "metadata": {"file_path": "dask/array/fft.py", "file_name": "fft.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 80, "span_ids": ["_hfft_out_chunks", "imports", "_rfft_out_chunks", "_irfft_out_chunks", "_fft_out_chunks"], "tokens": 533}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import inspect\nfrom collections.abc import Sequence\n\nimport numpy as np\n\ntry:\n import scipy\n import scipy.fftpack\nexcept ImportError:\n scipy = None\n\nfrom .core import concatenate as _concatenate\nfrom .creation import arange as _arange\nfrom ..utils import derived_from, skip_doctest\n\n\nchunk_error = (\n \"Dask array only supports taking an FFT along an axis that \\n\"\n \"has a single chunk. An FFT operation was tried on axis %s \\n\"\n \"which has chunks %s. To change the array's chunks use \"\n \"dask.Array.rechunk.\"\n)\n\nfft_preamble = \"\"\"\n Wrapping of %s\n\n The axis along which the FFT is applied must have only one chunk. To change\n the array's chunking use dask.Array.rechunk.\n\n The %s docstring follows below:\n\n \"\"\"\n\n\ndef _fft_out_chunks(a, s, axes):\n \"\"\" For computing the output chunks of [i]fft*\"\"\"\n if s is None:\n return a.chunks\n chunks = list(a.chunks)\n for i, axis in enumerate(axes):\n chunks[axis] = (s[i],)\n return chunks\n\n\ndef _rfft_out_chunks(a, s, axes):\n \"\"\" For computing the output chunks of rfft*\"\"\"\n if s is None:\n s = [a.chunks[axis][0] for axis in axes]\n s = list(s)\n s[-1] = s[-1] // 2 + 1\n chunks = list(a.chunks)\n for i, axis in enumerate(axes):\n chunks[axis] = (s[i],)\n return chunks\n\n\ndef _irfft_out_chunks(a, s, axes):\n \"\"\" For computing the output chunks of irfft*\"\"\"\n if s is None:\n s = [a.chunks[axis][0] for axis in axes]\n s[-1] = 2 * (s[-1] - 1)\n chunks = list(a.chunks)\n for i, axis in enumerate(axes):\n chunks[axis] = (s[i],)\n return chunks\n\n\ndef _hfft_out_chunks(a, s, axes):\n assert len(axes) == 1\n\n axis = axes[0]\n\n if s is None:\n s = [2 * (a.chunks[axis][0] - 1)]\n\n n = s[0]\n\n chunks = list(a.chunks)\n chunks[axis] = (n,)\n return chunks", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/fft.py__ihfft_out_chunks__out_chunk_fns._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/fft.py__ihfft_out_chunks__out_chunk_fns._", "embedding": null, "metadata": {"file_path": "dask/array/fft.py", "file_name": "fft.py", "file_type": "text/x-python", "category": "implementation", "start_line": 83, "end_line": 111, "span_ids": ["impl:11", "_ihfft_out_chunks"], "tokens": 194}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _ihfft_out_chunks(a, s, axes):\n assert len(axes) == 1\n\n axis = axes[0]\n\n if s is None:\n s = [a.chunks[axis][0]]\n else:\n assert len(s) == 1\n\n n = s[0]\n\n chunks = list(a.chunks)\n if n % 2 == 0:\n m = (n // 2) + 1\n else:\n m = (n + 1) // 2\n chunks[axis] = (m,)\n return chunks\n\n\n_out_chunk_fns = {\n \"fft\": _fft_out_chunks,\n \"ifft\": _fft_out_chunks,\n \"rfft\": _rfft_out_chunks,\n \"irfft\": _irfft_out_chunks,\n \"hfft\": _hfft_out_chunks,\n \"ihfft\": _ihfft_out_chunks,\n}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/fft.py_fft_wrap_fft_wrap.try_.except_KeyError_.raise_ValueError_Given_u": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/fft.py_fft_wrap_fft_wrap.try_.except_KeyError_.raise_ValueError_Given_u", "embedding": null, "metadata": {"file_path": "dask/array/fft.py", "file_name": "fft.py", "file_type": "text/x-python", "category": "implementation", "start_line": 114, "end_line": 155, "span_ids": ["fft_wrap"], "tokens": 323}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def fft_wrap(fft_func, kind=None, dtype=None):\n \"\"\"Wrap 1D, 2D, and ND real and complex FFT functions\n\n Takes a function that behaves like ``numpy.fft`` functions and\n a specified kind to match it to that are named after the functions\n in the ``numpy.fft`` API.\n\n Supported kinds include:\n\n * fft\n * fft2\n * fftn\n * ifft\n * ifft2\n * ifftn\n * rfft\n * rfft2\n * rfftn\n * irfft\n * irfft2\n * irfftn\n * hfft\n * ihfft\n\n Examples\n --------\n >>> import dask.array.fft as dff\n >>> parallel_fft = dff.fft_wrap(np.fft.fft)\n >>> parallel_ifft = dff.fft_wrap(np.fft.ifft)\n \"\"\"\n if scipy is not None:\n if fft_func is scipy.fftpack.rfft:\n raise ValueError(\"SciPy's `rfft` doesn't match the NumPy API.\")\n elif fft_func is scipy.fftpack.irfft:\n raise ValueError(\"SciPy's `irfft` doesn't match the NumPy API.\")\n\n if kind is None:\n kind = fft_func.__name__\n try:\n out_chunk_fn = _out_chunk_fns[kind.rstrip(\"2n\")]\n except KeyError:\n raise ValueError(\"Given unknown `kind` %s.\" % kind)\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/fft.py_fft_wrap.func_fft_wrap.func.return.a_map_blocks_fft_func_a": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/fft.py_fft_wrap.func_fft_wrap.func.return.a_map_blocks_fft_func_a", "embedding": null, "metadata": {"file_path": "dask/array/fft.py", "file_name": "fft.py", "file_type": "text/x-python", "category": "implementation", "start_line": 156, "end_line": 191, "span_ids": ["fft_wrap"], "tokens": 300}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def fft_wrap(fft_func, kind=None, dtype=None):\n # ... other code\n\n def func(a, s=None, axes=None):\n if axes is None:\n if kind.endswith(\"2\"):\n axes = (-2, -1)\n elif kind.endswith(\"n\"):\n if s is None:\n axes = tuple(range(a.ndim))\n else:\n axes = tuple(range(len(s)))\n else:\n axes = (-1,)\n else:\n if len(set(axes)) < len(axes):\n raise ValueError(\"Duplicate axes not allowed.\")\n\n _dtype = dtype\n if _dtype is None:\n sample = np.ones(a.ndim * (8,), dtype=a.dtype)\n try:\n _dtype = fft_func(sample, axes=axes).dtype\n except TypeError:\n _dtype = fft_func(sample).dtype\n\n for each_axis in axes:\n if len(a.chunks[each_axis]) != 1:\n raise ValueError(chunk_error % (each_axis, a.chunks[each_axis]))\n\n chunks = out_chunk_fn(a, s, axes)\n\n args = (s, axes)\n if kind.endswith(\"fft\"):\n axis = None if axes is None else axes[0]\n n = None if s is None else s[0]\n args = (n, axis)\n\n return a.map_blocks(fft_func, *args, dtype=_dtype, chunks=chunks)\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/fft.py_fft_wrap.if_kind_endswith_fft__fft_wrap.return.func": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/fft.py_fft_wrap.if_kind_endswith_fft__fft_wrap.return.func", "embedding": null, "metadata": {"file_path": "dask/array/fft.py", "file_name": "fft.py", "file_type": "text/x-python", "category": "implementation", "start_line": 194, "end_line": 216, "span_ids": ["fft_wrap"], "tokens": 192}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def fft_wrap(fft_func, kind=None, dtype=None):\n # ... other code\n\n if kind.endswith(\"fft\"):\n _func = func\n\n def func(a, n=None, axis=None):\n s = None\n if n is not None:\n s = (n,)\n\n axes = None\n if axis is not None:\n axes = (axis,)\n\n return _func(a, s, axes)\n\n func_mod = inspect.getmodule(fft_func)\n func_name = fft_func.__name__\n func_fullname = func_mod.__name__ + \".\" + func_name\n if fft_func.__doc__ is not None:\n func.__doc__ = fft_preamble % (2 * (func_fullname,))\n func.__doc__ += fft_func.__doc__\n func.__doc__ = skip_doctest(func.__doc__)\n func.__name__ = func_name\n return func", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/fft.py_fft_rfftfreq.return.r": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/fft.py_fft_rfftfreq.return.r", "embedding": null, "metadata": {"file_path": "dask/array/fft.py", "file_name": "fft.py", "file_type": "text/x-python", "category": "implementation", "start_line": 217, "end_line": 258, "span_ids": ["fftfreq", "impl:13", "rfftfreq", "_fftfreq_block"], "tokens": 331}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "fft = fft_wrap(np.fft.fft)\nfft2 = fft_wrap(np.fft.fft2)\nfftn = fft_wrap(np.fft.fftn)\nifft = fft_wrap(np.fft.ifft)\nifft2 = fft_wrap(np.fft.ifft2)\nifftn = fft_wrap(np.fft.ifftn)\nrfft = fft_wrap(np.fft.rfft)\nrfft2 = fft_wrap(np.fft.rfft2)\nrfftn = fft_wrap(np.fft.rfftn)\nirfft = fft_wrap(np.fft.irfft)\nirfft2 = fft_wrap(np.fft.irfft2)\nirfftn = fft_wrap(np.fft.irfftn)\nhfft = fft_wrap(np.fft.hfft)\nihfft = fft_wrap(np.fft.ihfft)\n\n\ndef _fftfreq_block(i, n, d):\n r = i.copy()\n r[i >= (n + 1) // 2] -= n\n r /= n * d\n return r\n\n\n@derived_from(np.fft)\ndef fftfreq(n, d=1.0, chunks=\"auto\"):\n n = int(n)\n d = float(d)\n\n r = _arange(n, dtype=float, chunks=chunks)\n\n return r.map_blocks(_fftfreq_block, dtype=float, n=n, d=d)\n\n\n@derived_from(np.fft)\ndef rfftfreq(n, d=1.0, chunks=\"auto\"):\n n = int(n)\n d = float(d)\n\n r = _arange(n // 2 + 1, dtype=float, chunks=chunks)\n r /= n * d\n\n return r", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/fft.py__fftshift_helper_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/fft.py__fftshift_helper_", "embedding": null, "metadata": {"file_path": "dask/array/fft.py", "file_name": "fft.py", "file_type": "text/x-python", "category": "implementation", "start_line": 261, "end_line": 296, "span_ids": ["ifftshift", "_fftshift_helper", "fftshift"], "tokens": 241}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _fftshift_helper(x, axes=None, inverse=False):\n if axes is None:\n axes = list(range(x.ndim))\n elif not isinstance(axes, Sequence):\n axes = (axes,)\n\n y = x\n for i in axes:\n n = y.shape[i]\n n_2 = (n + int(inverse is False)) // 2\n\n l = y.ndim * [slice(None)]\n l[i] = slice(None, n_2)\n l = tuple(l)\n\n r = y.ndim * [slice(None)]\n r[i] = slice(n_2, None)\n r = tuple(r)\n\n y = _concatenate([y[r], y[l]], axis=i)\n\n if len(x.chunks[i]) == 1:\n y = y.rechunk({i: x.chunks[i]})\n\n return y\n\n\n@derived_from(np.fft)\ndef fftshift(x, axes=None):\n return _fftshift_helper(x, axes=axes, inverse=False)\n\n\n@derived_from(np.fft)\ndef ifftshift(x, axes=None):\n return _fftshift_helper(x, axes=axes, inverse=True)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py_np__SIGNATURE._0_1_format__IN": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py_np__SIGNATURE._0_1_format__IN", "embedding": null, "metadata": {"file_path": "dask/array/gufunc.py", "file_name": "gufunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 24, "span_ids": ["imports"], "tokens": 265}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import numpy as np\nimport re\n\nfrom tlz import concat, merge, unique\n\nfrom .core import Array, asarray, blockwise, getitem, apply_infer_dtype\nfrom .utils import meta_from_array\nfrom ..highlevelgraph import HighLevelGraph\nfrom ..core import flatten\n\n\n# Modified version of `numpy.lib.function_base._parse_gufunc_signature`\n# Modifications:\n# - Allow for zero input arguments\n# See https://docs.scipy.org/doc/numpy/reference/c-api/generalized-ufuncs.html\n_DIMENSION_NAME = r\"\\w+\"\n_CORE_DIMENSION_LIST = \"(?:{0:}(?:,{0:})*,?)?\".format(_DIMENSION_NAME)\n_ARGUMENT = r\"\\({}\\)\".format(_CORE_DIMENSION_LIST)\n_INPUT_ARGUMENTS = \"(?:{0:}(?:,{0:})*,?)?\".format(_ARGUMENT)\n_OUTPUT_ARGUMENTS = \"{0:}(?:,{0:})*\".format(\n _ARGUMENT\n) # Use `'{0:}(?:,{0:})*,?'` if gufunc-\n# signature should be allowed for length 1 tuple returns\n_SIGNATURE = \"^{0:}->{1:}$\".format(_INPUT_ARGUMENTS, _OUTPUT_ARGUMENTS)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py__parse_gufunc_signature__parse_gufunc_signature.return.ins_outs": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py__parse_gufunc_signature__parse_gufunc_signature.return.ins_outs", "embedding": null, "metadata": {"file_path": "dask/array/gufunc.py", "file_name": "gufunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 27, "end_line": 55, "span_ids": ["_parse_gufunc_signature"], "tokens": 260}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _parse_gufunc_signature(signature):\n \"\"\"\n Parse string signatures for a generalized universal function.\n\n Arguments\n ---------\n signature : string\n Generalized universal function signature, e.g., ``(m,n),(n,p)->(m,p)``\n for ``np.matmul``.\n\n Returns\n -------\n Tuple of input and output core dimensions parsed from the signature, each\n of the form List[Tuple[str, ...]], except for one output. For one output\n core dimension is not a list, but of the form Tuple[str, ...]\n \"\"\"\n signature = signature.replace(\" \", \"\")\n if not re.match(_SIGNATURE, signature):\n raise ValueError(\"Not a valid gufunc signature: {}\".format(signature))\n in_txt, out_txt = signature.split(\"->\")\n ins = [\n tuple(re.findall(_DIMENSION_NAME, arg)) for arg in re.findall(_ARGUMENT, in_txt)\n ]\n outs = [\n tuple(re.findall(_DIMENSION_NAME, arg))\n for arg in re.findall(_ARGUMENT, out_txt)\n ]\n outs = outs[0] if ((len(outs) == 1) and (out_txt[-1] != \",\")) else outs\n return ins, outs", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py__validate_normalize_axes__validate_normalize_axes._Assert_we_have_as_many_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py__validate_normalize_axes__validate_normalize_axes._Assert_we_have_as_many_", "embedding": null, "metadata": {"file_path": "dask/array/gufunc.py", "file_name": "gufunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 58, "end_line": 143, "span_ids": ["_validate_normalize_axes"], "tokens": 784}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _validate_normalize_axes(axes, axis, keepdims, input_coredimss, output_coredimss):\n \"\"\"\n Validates logic of `axes`/`axis`/`keepdims` arguments and normalize them.\n Refer to [1]_ for details\n\n Arguments\n ---------\n axes: List of tuples\n axis: int\n keepdims: bool\n input_coredimss: List of Tuple of dims\n output_coredimss: List of Tuple of dims\n\n Returns\n -------\n input_axes: List of tuple of int\n output_axes: List of tuple of int\n\n References\n ----------\n .. [1] https://docs.scipy.org/doc/numpy/reference/ufuncs.html#optional-keyword-arguments\n \"\"\"\n nin = len(input_coredimss)\n nout = 1 if not isinstance(output_coredimss, list) else len(output_coredimss)\n\n if axes is not None and axis is not None:\n raise ValueError(\n \"Only one of `axis` or `axes` keyword arguments should be given\"\n )\n if axes and not isinstance(axes, list):\n raise ValueError(\"`axes` has to be of type list\")\n\n output_coredimss = output_coredimss if nout > 1 else [output_coredimss]\n filtered_core_dims = list(filter(len, input_coredimss))\n nr_outputs_with_coredims = len([True for x in output_coredimss if len(x) > 0])\n\n if keepdims:\n if nr_outputs_with_coredims > 0:\n raise ValueError(\"`keepdims` can only be used for scalar outputs\")\n output_coredimss = len(output_coredimss) * [filtered_core_dims[0]]\n\n core_dims = input_coredimss + output_coredimss\n if axis is not None:\n if not isinstance(axis, int):\n raise ValueError(\"`axis` argument has to be an integer value\")\n if filtered_core_dims:\n cd0 = filtered_core_dims[0]\n if len(cd0) != 1:\n raise ValueError(\n \"`axis` can be used only, if one core dimension is present\"\n )\n for cd in filtered_core_dims:\n if cd0 != cd:\n raise ValueError(\n \"To use `axis`, all core dimensions have to be equal\"\n )\n\n # Expand dafaults or axis\n if axes is None:\n if axis is not None:\n axes = [(axis,) if cd else tuple() for cd in core_dims]\n else:\n axes = [tuple(range(-len(icd), 0)) for icd in core_dims]\n elif not isinstance(axes, list):\n raise ValueError(\"`axes` argument has to be a list\")\n axes = [(a,) if isinstance(a, int) else a for a in axes]\n\n if (\n (nr_outputs_with_coredims == 0)\n and (nin != len(axes))\n and (nin + nout != len(axes))\n ) or ((nr_outputs_with_coredims > 0) and (nin + nout != len(axes))):\n raise ValueError(\n \"The number of `axes` entries is not equal the number of input and output arguments\"\n )\n\n # Treat outputs\n output_axes = axes[nin:]\n output_axes = (\n output_axes\n if output_axes\n else [tuple(range(-len(ocd), 0)) for ocd in output_coredimss]\n )\n input_axes = axes[:nin]\n\n # Assert we have as many axes as output core dimensions\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py__validate_normalize_axes.for_idx_iax_icd_in_en__validate_normalize_axes.return.input_axes_output_axes": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py__validate_normalize_axes.for_idx_iax_icd_in_en__validate_normalize_axes.return.input_axes_output_axes", "embedding": null, "metadata": {"file_path": "dask/array/gufunc.py", "file_name": "gufunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 144, "end_line": 172, "span_ids": ["_validate_normalize_axes"], "tokens": 291}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _validate_normalize_axes(axes, axis, keepdims, input_coredimss, output_coredimss):\n # ... other code\n for idx, (iax, icd) in enumerate(zip(input_axes, input_coredimss)):\n if len(iax) != len(icd):\n raise ValueError(\n \"The number of `axes` entries for argument #{} is not equal \"\n \"the number of respective input core dimensions in signature\".format(\n idx\n )\n )\n if not keepdims:\n for idx, (oax, ocd) in enumerate(zip(output_axes, output_coredimss)):\n if len(oax) != len(ocd):\n raise ValueError(\n \"The number of `axes` entries for argument #{} is not equal \"\n \"the number of respective output core dimensions in signature\".format(\n idx\n )\n )\n else:\n if input_coredimss:\n icd0 = input_coredimss[0]\n for icd in input_coredimss:\n if icd0 != icd:\n raise ValueError(\n \"To use `keepdims`, all core dimensions have to be equal\"\n )\n iax0 = input_axes[0]\n output_axes = [iax0 for _ in output_coredimss]\n\n return input_axes, output_axes", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py_apply_gufunc_apply_gufunc._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py_apply_gufunc_apply_gufunc._", "embedding": null, "metadata": {"file_path": "dask/array/gufunc.py", "file_name": "gufunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 175, "end_line": 277, "span_ids": ["apply_gufunc"], "tokens": 1259}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def apply_gufunc(func, signature, *args, **kwargs):\n \"\"\"\n Apply a generalized ufunc or similar python function to arrays.\n\n ``signature`` determines if the function consumes or produces core\n dimensions. The remaining dimensions in given input arrays (``*args``)\n are considered loop dimensions and are required to broadcast\n naturally against each other.\n\n In other terms, this function is like ``np.vectorize``, but for\n the blocks of dask arrays. If the function itself shall also\n be vectorized use ``vectorize=True`` for convenience.\n\n Parameters\n ----------\n func : callable\n Function to call like ``func(*args, **kwargs)`` on input arrays\n (``*args``) that returns an array or tuple of arrays. If multiple\n arguments with non-matching dimensions are supplied, this function is\n expected to vectorize (broadcast) over axes of positional arguments in\n the style of NumPy universal functions [1]_ (if this is not the case,\n set ``vectorize=True``). If this function returns multiple outputs,\n ``output_core_dims`` has to be set as well.\n signature: string\n Specifies what core dimensions are consumed and produced by ``func``.\n According to the specification of numpy.gufunc signature [2]_\n *args : numeric\n Input arrays or scalars to the callable function.\n axes: List of tuples, optional, keyword only\n A list of tuples with indices of axes a generalized ufunc should operate on.\n For instance, for a signature of ``\"(i,j),(j,k)->(i,k)\"`` appropriate for\n matrix multiplication, the base elements are two-dimensional matrices\n and these are taken to be stored in the two last axes of each argument. The\n corresponding axes keyword would be ``[(-2, -1), (-2, -1), (-2, -1)]``.\n For simplicity, for generalized ufuncs that operate on 1-dimensional arrays\n (vectors), a single integer is accepted instead of a single-element tuple,\n and for generalized ufuncs for which all outputs are scalars, the output\n tuples can be omitted.\n axis: int, optional, keyword only\n A single axis over which a generalized ufunc should operate. This is a short-cut\n for ufuncs that operate over a single, shared core dimension, equivalent to passing\n in axes with entries of (axis,) for each single-core-dimension argument and ``()`` for\n all others. For instance, for a signature ``\"(i),(i)->()\"``, it is equivalent to passing\n in ``axes=[(axis,), (axis,), ()]``.\n keepdims: bool, optional, keyword only\n If this is set to True, axes which are reduced over will be left in the result as\n a dimension with size one, so that the result will broadcast correctly against the\n inputs. This option can only be used for generalized ufuncs that operate on inputs\n that all have the same number of core dimensions and with outputs that have no core\n dimensions , i.e., with signatures like ``\"(i),(i)->()\"`` or ``\"(m,m)->()\"``.\n If used, the location of the dimensions in the output can be controlled with axes\n and axis.\n output_dtypes : Optional, dtype or list of dtypes, keyword only\n Valid numpy dtype specification or list thereof.\n If not given, a call of ``func`` with a small set of data\n is performed in order to try to automatically determine the\n output dtypes.\n output_sizes : dict, optional, keyword only\n Optional mapping from dimension names to sizes for outputs. Only used if\n new core dimensions (not found on inputs) appear on outputs.\n vectorize: bool, keyword only\n If set to ``True``, ``np.vectorize`` is applied to ``func`` for\n convenience. Defaults to ``False``.\n allow_rechunk: Optional, bool, keyword only\n Allows rechunking, otherwise chunk sizes need to match and core\n dimensions are to consist only of one chunk.\n Warning: enabling this can increase memory usage significantly.\n Defaults to ``False``.\n meta: Optional, tuple, keyword only\n tuple of empty ndarrays describing the shape and dtype of the output of the gufunc.\n Defaults to ``None``.\n **kwargs : dict\n Extra keyword arguments to pass to `func`\n\n Returns\n -------\n Single dask.array.Array or tuple of dask.array.Array\n\n Examples\n --------\n >>> import dask.array as da\n >>> import numpy as np\n >>> def stats(x):\n ... return np.mean(x, axis=-1), np.std(x, axis=-1)\n >>> a = da.random.normal(size=(10,20,30), chunks=(5, 10, 30))\n >>> mean, std = da.apply_gufunc(stats, \"(i)->(),()\", a)\n >>> mean.compute().shape\n (10, 20)\n\n\n >>> def outer_product(x, y):\n ... return np.einsum(\"i,j->ij\", x, y)\n >>> a = da.random.normal(size=( 20,30), chunks=(10, 30))\n >>> b = da.random.normal(size=(10, 1,40), chunks=(5, 1, 40))\n >>> c = da.apply_gufunc(outer_product, \"(i),(j)->(i,j)\", a, b, vectorize=True)\n >>> c.compute().shape\n (10, 20, 30, 40)\n\n References\n ----------\n .. [1] https://docs.scipy.org/doc/numpy/reference/ufuncs.html\n .. [2] https://docs.scipy.org/doc/numpy/reference/c-api/generalized-ufuncs.html\n \"\"\"\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py_apply_gufunc.axes_apply_gufunc.max_loopdims.max_num_loopdims_if_num_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py_apply_gufunc.axes_apply_gufunc.max_loopdims.max_num_loopdims_if_num_", "embedding": null, "metadata": {"file_path": "dask/array/gufunc.py", "file_name": "gufunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 278, "end_line": 363, "span_ids": ["apply_gufunc"], "tokens": 803}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def apply_gufunc(func, signature, *args, **kwargs):\n axes = kwargs.pop(\"axes\", None)\n axis = kwargs.pop(\"axis\", None)\n keepdims = kwargs.pop(\"keepdims\", False)\n output_dtypes = kwargs.pop(\"output_dtypes\", None)\n output_sizes = kwargs.pop(\"output_sizes\", None)\n vectorize = kwargs.pop(\"vectorize\", None)\n allow_rechunk = kwargs.pop(\"allow_rechunk\", False)\n meta = kwargs.pop(\"meta\", None)\n\n # Input processing:\n ## Signature\n if not isinstance(signature, str):\n raise TypeError(\"`signature` has to be of type string\")\n input_coredimss, output_coredimss = _parse_gufunc_signature(signature)\n\n ## Determine nout: nout = None for functions of one direct return; nout = int for return tuples\n nout = None if not isinstance(output_coredimss, list) else len(output_coredimss)\n\n ## Determine and handle output_dtypes\n if output_dtypes is None:\n if vectorize:\n tempfunc = np.vectorize(func, signature=signature)\n else:\n tempfunc = func\n output_dtypes = apply_infer_dtype(\n tempfunc, args, kwargs, \"apply_gufunc\", \"output_dtypes\", nout\n )\n\n if isinstance(output_dtypes, (tuple, list)):\n if nout is None:\n if len(output_dtypes) > 1:\n raise ValueError(\n (\n \"Must specify single dtype or list of one dtype \"\n \"for `output_dtypes` for function with one output\"\n )\n )\n otypes = output_dtypes\n output_dtypes = output_dtypes[0]\n else:\n otypes = output_dtypes\n else:\n if nout is not None:\n raise ValueError(\n \"Must specify tuple of dtypes for `output_dtypes` for function with multiple outputs\"\n )\n otypes = [output_dtypes]\n\n ## Vectorize function, if required\n if vectorize:\n func = np.vectorize(func, signature=signature, otypes=otypes)\n\n ## Miscellaneous\n if output_sizes is None:\n output_sizes = {}\n\n ## Axes\n input_axes, output_axes = _validate_normalize_axes(\n axes, axis, keepdims, input_coredimss, output_coredimss\n )\n\n # Main code:\n ## Cast all input arrays to dask\n args = [asarray(a) for a in args]\n\n if len(input_coredimss) != len(args):\n ValueError(\n \"According to `signature`, `func` requires %d arguments, but %s given\"\n % (len(input_coredimss), len(args))\n )\n\n ## Axes: transpose input arguments\n transposed_args = []\n for arg, iax, input_coredims in zip(args, input_axes, input_coredimss):\n shape = arg.shape\n iax = tuple(a if a < 0 else a - len(shape) for a in iax)\n tidc = tuple(i for i in range(-len(shape) + 0, 0) if i not in iax) + iax\n transposed_arg = arg.transpose(tidc)\n transposed_args.append(transposed_arg)\n args = transposed_args\n\n ## Assess input args for loop dims\n input_shapes = [a.shape for a in args]\n input_chunkss = [a.chunks for a in args]\n num_loopdims = [len(s) - len(cd) for s, cd in zip(input_shapes, input_coredimss)]\n max_loopdims = max(num_loopdims) if num_loopdims else None\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py_apply_gufunc.core_input_shapes_apply_gufunc._Modifying_blockwise_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py_apply_gufunc.core_input_shapes_apply_gufunc._Modifying_blockwise_", "embedding": null, "metadata": {"file_path": "dask/array/gufunc.py", "file_name": "gufunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 364, "end_line": 423, "span_ids": ["apply_gufunc"], "tokens": 646}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def apply_gufunc(func, signature, *args, **kwargs):\n # ... other code\n core_input_shapes = [\n dict(zip(icd, s[n:]))\n for s, n, icd in zip(input_shapes, num_loopdims, input_coredimss)\n ]\n core_shapes = merge(*core_input_shapes)\n core_shapes.update(output_sizes)\n\n loop_input_dimss = [\n tuple(\"__loopdim%d__\" % d for d in range(max_loopdims - n, max_loopdims))\n for n in num_loopdims\n ]\n input_dimss = [l + c for l, c in zip(loop_input_dimss, input_coredimss)]\n\n loop_output_dims = max(loop_input_dimss, key=len) if loop_input_dimss else tuple()\n\n ## Assess input args for same size and chunk sizes\n ### Collect sizes and chunksizes of all dims in all arrays\n dimsizess = {}\n chunksizess = {}\n for dims, shape, chunksizes in zip(input_dimss, input_shapes, input_chunkss):\n for dim, size, chunksize in zip(dims, shape, chunksizes):\n dimsizes = dimsizess.get(dim, [])\n dimsizes.append(size)\n dimsizess[dim] = dimsizes\n chunksizes_ = chunksizess.get(dim, [])\n chunksizes_.append(chunksize)\n chunksizess[dim] = chunksizes_\n ### Assert correct partitioning, for case:\n for dim, sizes in dimsizess.items():\n #### Check that the arrays have same length for same dimensions or dimension `1`\n if set(sizes).union({1}) != {1, max(sizes)}:\n raise ValueError(\n \"Dimension `'{}'` with different lengths in arrays\".format(dim)\n )\n if not allow_rechunk:\n chunksizes = chunksizess[dim]\n #### Check if core dimensions consist of only one chunk\n if (dim in core_shapes) and (chunksizes[0][0] < core_shapes[dim]):\n raise ValueError(\n \"Core dimension `'{}'` consists of multiple chunks. To fix, rechunk into a single \\\nchunk along this dimension or set `allow_rechunk=True`, but beware that this may increase memory usage \\\nsignificantly.\".format(\n dim\n )\n )\n #### Check if loop dimensions consist of same chunksizes, when they have sizes > 1\n relevant_chunksizes = list(\n unique(c for s, c in zip(sizes, chunksizes) if s > 1)\n )\n if len(relevant_chunksizes) > 1:\n raise ValueError(\n \"Dimension `'{}'` with different chunksize present\".format(dim)\n )\n\n ## Apply function - use blockwise here\n arginds = list(concat(zip(args, input_dimss)))\n\n ### Use existing `blockwise` but only with loopdims to enforce\n ### concatenation for coredims that appear also at the output\n ### Modifying `blockwise` could improve things here.\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py_apply_gufunc.if_meta_is_not_None__apply_gufunc._Undo_from_above": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py_apply_gufunc.if_meta_is_not_None__apply_gufunc._Undo_from_above", "embedding": null, "metadata": {"file_path": "dask/array/gufunc.py", "file_name": "gufunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 424, "end_line": 503, "span_ids": ["apply_gufunc"], "tokens": 745}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def apply_gufunc(func, signature, *args, **kwargs):\n # ... other code\n if meta is not None:\n tmp = blockwise(\n func, loop_output_dims, *arginds, concatenate=True, meta=meta, **kwargs\n )\n else:\n try:\n tmp = blockwise( # First try to compute meta\n func, loop_output_dims, *arginds, concatenate=True, **kwargs\n )\n except ValueError:\n # If computing meta doesn't work, provide it explicitly based on\n # provided dtypes\n sample = arginds[0]._meta\n if isinstance(output_dtypes, tuple):\n meta = tuple(\n meta_from_array(sample, dtype=odt)\n for ocd, odt in zip(output_coredimss, output_dtypes)\n )\n else:\n meta = tuple(\n meta_from_array(sample, dtype=odt)\n for ocd, odt in zip((output_coredimss,), (output_dtypes,))\n )\n tmp = blockwise(\n func, loop_output_dims, *arginds, concatenate=True, meta=meta, **kwargs\n )\n\n if isinstance(tmp._meta, tuple):\n metas = tmp._meta\n else:\n metas = (tmp._meta,)\n\n ## Prepare output shapes\n loop_output_shape = tmp.shape\n loop_output_chunks = tmp.chunks\n keys = list(flatten(tmp.__dask_keys__()))\n name, token = keys[0][0].split(\"-\")\n\n ### *) Treat direct output\n if nout is None:\n output_coredimss = [output_coredimss]\n output_dtypes = [output_dtypes]\n\n ## Split output\n leaf_arrs = []\n for i, (ocd, oax, meta) in enumerate(zip(output_coredimss, output_axes, metas)):\n core_output_shape = tuple(core_shapes[d] for d in ocd)\n core_chunkinds = len(ocd) * (0,)\n output_shape = loop_output_shape + core_output_shape\n output_chunks = loop_output_chunks + core_output_shape\n leaf_name = \"%s_%d-%s\" % (name, i, token)\n leaf_dsk = {\n (leaf_name,)\n + key[1:]\n + core_chunkinds: ((getitem, key, i) if nout else key)\n for key in keys\n }\n graph = HighLevelGraph.from_collections(leaf_name, leaf_dsk, dependencies=[tmp])\n meta = meta_from_array(meta, len(output_shape))\n leaf_arr = Array(\n graph, leaf_name, chunks=output_chunks, shape=output_shape, meta=meta\n )\n\n ### Axes:\n if keepdims:\n slices = len(leaf_arr.shape) * (slice(None),) + len(oax) * (np.newaxis,)\n leaf_arr = leaf_arr[slices]\n\n tidcs = [None] * len(leaf_arr.shape)\n for i, oa in zip(range(-len(oax), 0), oax):\n tidcs[oa] = i\n j = 0\n for i in range(len(tidcs)):\n if tidcs[i] is None:\n tidcs[i] = j\n j += 1\n leaf_arr = leaf_arr.transpose(tidcs)\n leaf_arrs.append(leaf_arr)\n\n return (*leaf_arrs,) if nout else leaf_arrs[0] # Undo *) from above", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py_gufunc_gufunc._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py_gufunc_gufunc._", "embedding": null, "metadata": {"file_path": "dask/array/gufunc.py", "file_name": "gufunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 506, "end_line": 593, "span_ids": ["gufunc"], "tokens": 1114}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class gufunc(object):\n \"\"\"\n Binds `pyfunc` into ``dask.array.apply_gufunc`` when called.\n\n Parameters\n ----------\n pyfunc : callable\n Function to call like ``func(*args, **kwargs)`` on input arrays\n (``*args``) that returns an array or tuple of arrays. If multiple\n arguments with non-matching dimensions are supplied, this function is\n expected to vectorize (broadcast) over axes of positional arguments in\n the style of NumPy universal functions [1]_ (if this is not the case,\n set ``vectorize=True``). If this function returns multiple outputs,\n ``output_core_dims`` has to be set as well.\n signature : String, keyword only\n Specifies what core dimensions are consumed and produced by ``func``.\n According to the specification of numpy.gufunc signature [2]_\n axes: List of tuples, optional, keyword only\n A list of tuples with indices of axes a generalized ufunc should operate on.\n For instance, for a signature of ``\"(i,j),(j,k)->(i,k)\"`` appropriate for\n matrix multiplication, the base elements are two-dimensional matrices\n and these are taken to be stored in the two last axes of each argument. The\n corresponding axes keyword would be ``[(-2, -1), (-2, -1), (-2, -1)]``.\n For simplicity, for generalized ufuncs that operate on 1-dimensional arrays\n (vectors), a single integer is accepted instead of a single-element tuple,\n and for generalized ufuncs for which all outputs are scalars, the output\n tuples can be omitted.\n axis: int, optional, keyword only\n A single axis over which a generalized ufunc should operate. This is a short-cut\n for ufuncs that operate over a single, shared core dimension, equivalent to passing\n in axes with entries of (axis,) for each single-core-dimension argument and ``()`` for\n all others. For instance, for a signature ``\"(i),(i)->()\"``, it is equivalent to passing\n in ``axes=[(axis,), (axis,), ()]``.\n keepdims: bool, optional, keyword only\n If this is set to True, axes which are reduced over will be left in the result as\n a dimension with size one, so that the result will broadcast correctly against the\n inputs. This option can only be used for generalized ufuncs that operate on inputs\n that all have the same number of core dimensions and with outputs that have no core\n dimensions , i.e., with signatures like ``\"(i),(i)->()\"`` or ``\"(m,m)->()\"``.\n If used, the location of the dimensions in the output can be controlled with axes\n and axis.\n output_dtypes : Optional, dtype or list of dtypes, keyword only\n Valid numpy dtype specification or list thereof.\n If not given, a call of ``func`` with a small set of data\n is performed in order to try to automatically determine the\n output dtypes.\n output_sizes : dict, optional, keyword only\n Optional mapping from dimension names to sizes for outputs. Only used if\n new core dimensions (not found on inputs) appear on outputs.\n vectorize: bool, keyword only\n If set to ``True``, ``np.vectorize`` is applied to ``func`` for\n convenience. Defaults to ``False``.\n allow_rechunk: Optional, bool, keyword only\n Allows rechunking, otherwise chunk sizes need to match and core\n dimensions are to consist only of one chunk.\n Warning: enabling this can increase memory usage significantly.\n Defaults to ``False``.\n\n Returns\n -------\n Wrapped function\n\n Examples\n --------\n >>> import dask.array as da\n >>> import numpy as np\n >>> a = da.random.normal(size=(10,20,30), chunks=(5, 10, 30))\n >>> def stats(x):\n ... return np.mean(x, axis=-1), np.std(x, axis=-1)\n >>> gustats = da.gufunc(stats, signature=\"(i)->(),()\", output_dtypes=(float, float))\n >>> mean, std = gustats(a)\n >>> mean.compute().shape\n (10, 20)\n\n >>> a = da.random.normal(size=( 20,30), chunks=(10, 30))\n >>> b = da.random.normal(size=(10, 1,40), chunks=(5, 1, 40))\n >>> def outer_product(x, y):\n ... return np.einsum(\"i,j->ij\", x, y)\n >>> guouter_product = da.gufunc(outer_product, signature=\"(i),(j)->(i,j)\", output_dtypes=float, vectorize=True)\n >>> c = guouter_product(a, b)\n >>> c.compute().shape\n (10, 20, 30, 40)\n\n References\n ----------\n .. [1] https://docs.scipy.org/doc/numpy/reference/ufuncs.html\n .. [2] https://docs.scipy.org/doc/numpy/reference/c-api/generalized-ufuncs.html\n \"\"\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py_gufunc.__init___gufunc.__call__.return.apply_gufunc_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py_gufunc.__init___gufunc.__call__.return.apply_gufunc_", "embedding": null, "metadata": {"file_path": "dask/array/gufunc.py", "file_name": "gufunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 595, "end_line": 641, "span_ids": ["gufunc.__call__", "gufunc.__init__"], "tokens": 370}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class gufunc(object):\n\n def __init__(self, pyfunc, **kwargs):\n self.pyfunc = pyfunc\n self.signature = kwargs.pop(\"signature\", None)\n self.vectorize = kwargs.pop(\"vectorize\", False)\n self.axes = kwargs.pop(\"axes\", None)\n self.axis = kwargs.pop(\"axis\", None)\n self.keepdims = kwargs.pop(\"keepdims\", False)\n self.output_sizes = kwargs.pop(\"output_sizes\", None)\n self.output_dtypes = kwargs.pop(\"output_dtypes\", None)\n self.allow_rechunk = kwargs.pop(\"allow_rechunk\", False)\n if kwargs:\n raise TypeError(\"Unsupported keyword argument(s) provided\")\n\n self.__doc__ = \"\"\"\n Bound ``dask.array.gufunc``\n func: ``{func}``\n signature: ``'{signature}'``\n\n Parameters\n ----------\n *args : numpy/dask arrays or scalars\n Arrays to which to apply to ``func``. Core dimensions as specified in\n ``signature`` need to come last.\n **kwargs : dict\n Extra keyword arguments to pass to ``func``\n\n Returns\n -------\n Single dask.array.Array or tuple of dask.array.Array\n \"\"\".format(\n func=str(self.pyfunc), signature=self.signature\n )\n\n def __call__(self, *args, **kwargs):\n return apply_gufunc(\n self.pyfunc,\n self.signature,\n *args,\n vectorize=self.vectorize,\n axes=self.axes,\n axis=self.axis,\n keepdims=self.keepdims,\n output_sizes=self.output_sizes,\n output_dtypes=self.output_dtypes,\n allow_rechunk=self.allow_rechunk or kwargs.pop(\"allow_rechunk\", False),\n **kwargs,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py_as_gufunc_as_gufunc._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py_as_gufunc_as_gufunc._", "embedding": null, "metadata": {"file_path": "dask/array/gufunc.py", "file_name": "gufunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 644, "end_line": 726, "span_ids": ["as_gufunc"], "tokens": 1031}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def as_gufunc(signature=None, **kwargs):\n \"\"\"\n Decorator for ``dask.array.gufunc``.\n\n Parameters\n ----------\n signature : String\n Specifies what core dimensions are consumed and produced by ``func``.\n According to the specification of numpy.gufunc signature [2]_\n axes: List of tuples, optional, keyword only\n A list of tuples with indices of axes a generalized ufunc should operate on.\n For instance, for a signature of ``\"(i,j),(j,k)->(i,k)\"`` appropriate for\n matrix multiplication, the base elements are two-dimensional matrices\n and these are taken to be stored in the two last axes of each argument. The\n corresponding axes keyword would be ``[(-2, -1), (-2, -1), (-2, -1)]``.\n For simplicity, for generalized ufuncs that operate on 1-dimensional arrays\n (vectors), a single integer is accepted instead of a single-element tuple,\n and for generalized ufuncs for which all outputs are scalars, the output\n tuples can be omitted.\n axis: int, optional, keyword only\n A single axis over which a generalized ufunc should operate. This is a short-cut\n for ufuncs that operate over a single, shared core dimension, equivalent to passing\n in axes with entries of (axis,) for each single-core-dimension argument and ``()`` for\n all others. For instance, for a signature ``\"(i),(i)->()\"``, it is equivalent to passing\n in ``axes=[(axis,), (axis,), ()]``.\n keepdims: bool, optional, keyword only\n If this is set to True, axes which are reduced over will be left in the result as\n a dimension with size one, so that the result will broadcast correctly against the\n inputs. This option can only be used for generalized ufuncs that operate on inputs\n that all have the same number of core dimensions and with outputs that have no core\n dimensions , i.e., with signatures like ``\"(i),(i)->()\"`` or ``\"(m,m)->()\"``.\n If used, the location of the dimensions in the output can be controlled with axes\n and axis.\n output_dtypes : Optional, dtype or list of dtypes, keyword only\n Valid numpy dtype specification or list thereof.\n If not given, a call of ``func`` with a small set of data\n is performed in order to try to automatically determine the\n output dtypes.\n output_sizes : dict, optional, keyword only\n Optional mapping from dimension names to sizes for outputs. Only used if\n new core dimensions (not found on inputs) appear on outputs.\n vectorize: bool, keyword only\n If set to ``True``, ``np.vectorize`` is applied to ``func`` for\n convenience. Defaults to ``False``.\n allow_rechunk: Optional, bool, keyword only\n Allows rechunking, otherwise chunk sizes need to match and core\n dimensions are to consist only of one chunk.\n Warning: enabling this can increase memory usage significantly.\n Defaults to ``False``.\n meta: Optional, tuple, keyword only\n tuple of empty ndarrays describing the shape and dtype of the output of the gufunc.\n Defaults to ``None``.\n\n Returns\n -------\n Decorator for `pyfunc` that itself returns a `gufunc`.\n\n Examples\n --------\n >>> import dask.array as da\n >>> import numpy as np\n >>> a = da.random.normal(size=(10,20,30), chunks=(5, 10, 30))\n >>> @da.as_gufunc(\"(i)->(),()\", output_dtypes=(float, float))\n ... def stats(x):\n ... return np.mean(x, axis=-1), np.std(x, axis=-1)\n >>> mean, std = stats(a)\n >>> mean.compute().shape\n (10, 20)\n\n >>> a = da.random.normal(size=( 20,30), chunks=(10, 30))\n >>> b = da.random.normal(size=(10, 1,40), chunks=(5, 1, 40))\n >>> @da.as_gufunc(\"(i),(j)->(i,j)\", output_dtypes=float, vectorize=True)\n ... def outer_product(x, y):\n ... return np.einsum(\"i,j->ij\", x, y)\n >>> c = outer_product(a, b)\n >>> c.compute().shape\n (10, 20, 30, 40)\n\n References\n ----------\n .. [1] https://docs.scipy.org/doc/numpy/reference/ufuncs.html\n .. [2] https://docs.scipy.org/doc/numpy/reference/c-api/generalized-ufuncs.html\n \"\"\"\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py_as_gufunc._allowedkeys_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py_as_gufunc._allowedkeys_", "embedding": null, "metadata": {"file_path": "dask/array/gufunc.py", "file_name": "gufunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 727, "end_line": 759, "span_ids": ["as_gufunc"], "tokens": 192}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def as_gufunc(signature=None, **kwargs):\n _allowedkeys = {\n \"vectorize\",\n \"axes\",\n \"axis\",\n \"keepdims\",\n \"output_sizes\",\n \"output_dtypes\",\n \"allow_rechunk\",\n \"meta\",\n }\n if set(_allowedkeys).issubset(kwargs.keys()):\n raise TypeError(\"Unsupported keyword argument(s) provided\")\n\n def _as_gufunc(pyfunc):\n return gufunc(pyfunc, signature=signature, **kwargs)\n\n _as_gufunc.__doc__ = \"\"\"\n Decorator to make ``dask.array.gufunc``\n signature: ``'{signature}'``\n\n Parameters\n ----------\n pyfunc : callable\n Function matching signature ``'{signature}'``.\n\n Returns\n -------\n ``dask.array.gufunc``\n \"\"\".format(\n signature=signature\n )\n return _as_gufunc", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/image.py_from_glob_import_glob_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/image.py_from_glob_import_glob_", "embedding": null, "metadata": {"file_path": "dask/array/image.py", "file_name": "image.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 70, "span_ids": ["add_leading_dimension", "imports", "imread"], "tokens": 468}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from glob import glob\nimport os\n\ntry:\n from skimage.io import imread as sk_imread\nexcept (AttributeError, ImportError):\n pass\n\nfrom .core import Array\nfrom ..base import tokenize\n\n\ndef add_leading_dimension(x):\n return x[None, ...]\n\n\ndef imread(filename, imread=None, preprocess=None):\n \"\"\"Read a stack of images into a dask array\n\n Parameters\n ----------\n\n filename: string\n A globstring like 'myfile.*.png'\n imread: function (optional)\n Optionally provide custom imread function.\n Function should expect a filename and produce a numpy array.\n Defaults to ``skimage.io.imread``.\n preprocess: function (optional)\n Optionally provide custom function to preprocess the image.\n Function should expect a numpy array for a single image.\n\n Examples\n --------\n\n >>> from dask.array.image import imread\n >>> im = imread('2015-*-*.png') # doctest: +SKIP\n >>> im.shape # doctest: +SKIP\n (365, 1000, 1000, 3)\n\n Returns\n -------\n\n Dask array of all images stacked along the first dimension. All images\n will be treated as individual chunks\n \"\"\"\n imread = imread or sk_imread\n filenames = sorted(glob(filename))\n if not filenames:\n raise ValueError(\"No files found under name %s\" % filename)\n\n name = \"imread-%s\" % tokenize(filenames, map(os.path.getmtime, filenames))\n\n sample = imread(filenames[0])\n if preprocess:\n sample = preprocess(sample)\n\n keys = [(name, i) + (0,) * len(sample.shape) for i in range(len(filenames))]\n if preprocess:\n values = [\n (add_leading_dimension, (preprocess, (imread, fn))) for fn in filenames\n ]\n else:\n values = [(add_leading_dimension, (imread, fn)) for fn in filenames]\n dsk = dict(zip(keys, values))\n\n chunks = ((1,) * len(filenames),) + tuple((d,) for d in sample.shape)\n\n return Array(dsk, name, chunks, sample.dtype)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_operator__nanmin.return.k_1_if_np_isnan_k_0_else": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_operator__nanmin.return.k_1_if_np_isnan_k_0_else", "embedding": null, "metadata": {"file_path": "dask/array/linalg.py", "file_name": "linalg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 33, "span_ids": ["_nanmin", "imports", "_cumsum_part", "_cumsum_blocks"], "tokens": 216}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import operator\nfrom numbers import Number\n\nimport numpy as np\nimport tlz as toolz\n\nfrom ..base import tokenize, wait\nfrom ..delayed import delayed\nfrom ..blockwise import blockwise\nfrom ..highlevelgraph import HighLevelGraph\nfrom ..utils import derived_from, apply\nfrom .core import dotmany, Array, concatenate, from_delayed\nfrom .creation import eye\nfrom .random import RandomState\nfrom .utils import meta_from_array, svd_flip, ones_like_safe\n\n\ndef _cumsum_blocks(it):\n total = 0\n for x in it:\n total_previous = total\n total += x\n yield (total_previous, total)\n\n\ndef _cumsum_part(last, new):\n return (last[1], last[1] + new)\n\n\ndef _nanmin(m, n):\n k_0 = min([m, n])\n k_1 = m if np.isnan(n) else n\n return k_1 if np.isnan(k_0) else k_0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py__wrapped_qr__wrapped_qr.if_a_shape_0_0_.else_.return.np_linalg_qr_a_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py__wrapped_qr__wrapped_qr.if_a_shape_0_0_.else_.return.np_linalg_qr_a_", "embedding": null, "metadata": {"file_path": "dask/array/linalg.py", "file_name": "linalg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 36, "end_line": 48, "span_ids": ["_wrapped_qr"], "tokens": 121}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _wrapped_qr(a):\n \"\"\"\n A wrapper for np.linalg.qr that handles arrays with 0 rows\n\n Notes: Created for tsqr so as to manage cases with uncertain\n array dimensions. In particular, the case where arrays have\n (uncertain) chunks with 0 rows.\n \"\"\"\n # workaround may be removed when numpy stops rejecting edge cases\n if a.shape[0] == 0:\n return np.zeros((0, 0)), np.zeros((0, a.shape[1]))\n else:\n return np.linalg.qr(a)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_tsqr_tsqr.layers.data___dask_graph___lay": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_tsqr_tsqr.layers.data___dask_graph___lay", "embedding": null, "metadata": {"file_path": "dask/array/linalg.py", "file_name": "linalg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 51, "end_line": 129, "span_ids": ["tsqr"], "tokens": 756}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def tsqr(data, compute_svd=False, _max_vchunk_size=None):\n \"\"\"Direct Tall-and-Skinny QR algorithm\n\n As presented in:\n\n A. Benson, D. Gleich, and J. Demmel.\n Direct QR factorizations for tall-and-skinny matrices in\n MapReduce architectures.\n IEEE International Conference on Big Data, 2013.\n https://arxiv.org/abs/1301.1071\n\n This algorithm is used to compute both the QR decomposition and the\n Singular Value Decomposition. It requires that the input array have a\n single column of blocks, each of which fit in memory.\n\n Parameters\n ----------\n data: Array\n compute_svd: bool\n Whether to compute the SVD rather than the QR decomposition\n _max_vchunk_size: Integer\n Used internally in recursion to set the maximum row dimension\n of chunks in subsequent recursive calls.\n\n Notes\n -----\n With ``k`` blocks of size ``(m, n)``, this algorithm has memory use that\n scales as ``k * n * n``.\n\n The implementation here is the recursive variant due to the ultimate\n need for one \"single core\" QR decomposition. In the non-recursive version\n of the algorithm, given ``k`` blocks, after ``k`` ``m * n`` QR\n decompositions, there will be a \"single core\" QR decomposition that will\n have to work with a ``(k * n, n)`` matrix.\n\n Here, recursion is applied as necessary to ensure that ``k * n`` is not\n larger than ``m`` (if ``m / n >= 2``). In particular, this is done\n to ensure that single core computations do not have to work on blocks\n larger than ``(m, n)``.\n\n Where blocks are irregular, the above logic is applied with the \"height\" of\n the \"tallest\" block used in place of ``m``.\n\n Consider use of the ``rechunk`` method to control this behavior.\n Taller blocks will reduce overall memory use (assuming that many of them\n still fit in memory at once).\n\n See Also\n --------\n dask.array.linalg.qr\n Powered by this algorithm\n dask.array.linalg.svd\n Powered by this algorithm\n dask.array.linalg.sfqr\n Variant for short-and-fat arrays\n \"\"\"\n nr, nc = len(data.chunks[0]), len(data.chunks[1])\n cr_max, cc = max(data.chunks[0]), data.chunks[1][0]\n\n if not (data.ndim == 2 and nc == 1): # Is a matrix # Only one column block\n raise ValueError(\n \"Input must have the following properties:\\n\"\n \" 1. Have two dimensions\\n\"\n \" 2. Have only one column of blocks\\n\\n\"\n \"Note: This function (tsqr) supports QR decomposition in the case of\\n\"\n \"tall-and-skinny matrices (single column chunk/block; see qr)\\n\"\n \"Current shape: {},\\nCurrent chunksize: {}\".format(\n data.shape, data.chunksize\n )\n )\n\n token = \"-\" + tokenize(data, compute_svd)\n\n m, n = data.shape\n numblocks = (nr, 1)\n\n qq, rr = np.linalg.qr(np.ones(shape=(1, 1), dtype=data.dtype))\n\n layers = data.__dask_graph__().layers.copy()\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_tsqr.dependencies_tsqr.can_distribute.chunks_well_defined_and_i": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_tsqr.dependencies_tsqr.can_distribute.chunks_well_defined_and_i", "embedding": null, "metadata": {"file_path": "dask/array/linalg.py", "file_name": "linalg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 130, "end_line": 174, "span_ids": ["tsqr"], "tokens": 525}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def tsqr(data, compute_svd=False, _max_vchunk_size=None):\n # ... other code\n dependencies = data.__dask_graph__().dependencies.copy()\n\n # Block qr\n name_qr_st1 = \"qr\" + token\n dsk_qr_st1 = blockwise(\n _wrapped_qr,\n name_qr_st1,\n \"ij\",\n data.name,\n \"ij\",\n numblocks={data.name: numblocks},\n )\n layers[name_qr_st1] = dsk_qr_st1\n dependencies[name_qr_st1] = set(data.__dask_layers__())\n\n # Block qr[0]\n name_q_st1 = \"getitem\" + token + \"-q1\"\n dsk_q_st1 = dict(\n ((name_q_st1, i, 0), (operator.getitem, (name_qr_st1, i, 0), 0))\n for i in range(numblocks[0])\n )\n layers[name_q_st1] = dsk_q_st1\n dependencies[name_q_st1] = {name_qr_st1}\n\n # Block qr[1]\n name_r_st1 = \"getitem\" + token + \"-r1\"\n dsk_r_st1 = dict(\n ((name_r_st1, i, 0), (operator.getitem, (name_qr_st1, i, 0), 1))\n for i in range(numblocks[0])\n )\n layers[name_r_st1] = dsk_r_st1\n dependencies[name_r_st1] = {name_qr_st1}\n\n # Next step is to obtain a QR decomposition for the stacked R factors, so either:\n # - gather R factors into a single core and do a QR decomposition\n # - recurse with tsqr (if single core computation too large and a-priori \"meaningful\n # reduction\" possible, meaning that chunks have to be well defined)\n\n single_core_compute_m = nr * cc\n chunks_well_defined = not any(np.isnan(c) for cs in data.chunks for c in cs)\n prospective_blocks = np.ceil(single_core_compute_m / cr_max)\n meaningful_reduction_possible = (\n cr_max if _max_vchunk_size is None else _max_vchunk_size\n ) >= 2 * cc\n can_distribute = chunks_well_defined and int(prospective_blocks) > 1\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_tsqr.if_chunks_well_defined_an_tsqr.if_chunks_well_defined_an.dependencies_name_q_st3_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_tsqr.if_chunks_well_defined_an_tsqr.if_chunks_well_defined_an.dependencies_name_q_st3_", "embedding": null, "metadata": {"file_path": "dask/array/linalg.py", "file_name": "linalg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 176, "end_line": 273, "span_ids": ["tsqr"], "tokens": 921}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def tsqr(data, compute_svd=False, _max_vchunk_size=None):\n # ... other code\n\n if chunks_well_defined and meaningful_reduction_possible and can_distribute:\n # stack chunks into blocks and recurse using tsqr\n\n # Prepare to stack chunks into blocks (from block qr[1])\n all_blocks = []\n curr_block = []\n curr_block_sz = 0\n for idx, a_m in enumerate(data.chunks[0]):\n m_q = a_m\n n_q = min(m_q, cc)\n m_r = n_q\n # n_r = cc\n if curr_block_sz + m_r > cr_max:\n all_blocks.append(curr_block)\n curr_block = []\n curr_block_sz = 0\n curr_block.append((idx, m_r))\n curr_block_sz += m_r\n if len(curr_block) > 0:\n all_blocks.append(curr_block)\n\n # R_stacked\n name_r_stacked = \"stack\" + token + \"-r1\"\n dsk_r_stacked = dict(\n (\n (name_r_stacked, i, 0),\n (\n np.vstack,\n (tuple, [(name_r_st1, idx, 0) for idx, _ in sub_block_info]),\n ),\n )\n for i, sub_block_info in enumerate(all_blocks)\n )\n layers[name_r_stacked] = dsk_r_stacked\n dependencies[name_r_stacked] = {name_r_st1}\n\n # retrieve R_stacked for recursion with tsqr\n vchunks_rstacked = tuple(\n [sum(map(lambda x: x[1], sub_block_info)) for sub_block_info in all_blocks]\n )\n graph = HighLevelGraph(layers, dependencies)\n # dsk.dependencies[name_r_stacked] = {data.name}\n r_stacked_meta = meta_from_array(\n data, len((sum(vchunks_rstacked), n)), dtype=rr.dtype\n )\n r_stacked = Array(\n graph,\n name_r_stacked,\n shape=(sum(vchunks_rstacked), n),\n chunks=(vchunks_rstacked, n),\n meta=r_stacked_meta,\n )\n\n # recurse\n q_inner, r_inner = tsqr(r_stacked, _max_vchunk_size=cr_max)\n layers = toolz.merge(q_inner.dask.layers, r_inner.dask.layers)\n dependencies = toolz.merge(q_inner.dask.dependencies, r_inner.dask.dependencies)\n\n # Q_inner: \"unstack\"\n name_q_st2 = \"getitem\" + token + \"-q2\"\n dsk_q_st2 = dict(\n (\n (name_q_st2, j, 0),\n (\n operator.getitem,\n (q_inner.name, i, 0),\n ((slice(e[0], e[1])), (slice(0, n))),\n ),\n )\n for i, sub_block_info in enumerate(all_blocks)\n for j, e in zip(\n [x[0] for x in sub_block_info],\n _cumsum_blocks([x[1] for x in sub_block_info]),\n )\n )\n layers[name_q_st2] = dsk_q_st2\n dependencies[name_q_st2] = set(q_inner.__dask_layers__())\n\n # R: R_inner\n name_r_st2 = \"r-inner\" + token\n dsk_r_st2 = {(name_r_st2, 0, 0): (r_inner.name, 0, 0)}\n layers[name_r_st2] = dsk_r_st2\n dependencies[name_r_st2] = set(r_inner.__dask_layers__())\n\n # Q: Block qr[0] (*) Q_inner\n name_q_st3 = \"dot\" + token + \"-q3\"\n dsk_q_st3 = blockwise(\n np.dot,\n name_q_st3,\n \"ij\",\n name_q_st1,\n \"ij\",\n name_q_st2,\n \"ij\",\n numblocks={name_q_st1: numblocks, name_q_st2: numblocks},\n )\n layers[name_q_st3] = dsk_q_st3\n dependencies[name_q_st3] = {name_q_st1, name_q_st2}\n # ... other code\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_tsqr.if_chunks_well_defined_an.else__tsqr.if_chunks_well_defined_an.else_.dependencies_name_r_st2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_tsqr.if_chunks_well_defined_an.else__tsqr.if_chunks_well_defined_an.else_.dependencies_name_r_st2_", "embedding": null, "metadata": {"file_path": "dask/array/linalg.py", "file_name": "linalg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 274, "end_line": 391, "span_ids": ["tsqr"], "tokens": 1380}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def tsqr(data, compute_svd=False, _max_vchunk_size=None):\n\n if chunks_well_defined and meaningful_reduction_possible and can_distribute:\n # stack chunks into blocks and recurse using tsqr\n\n # Prepare to stack chunks into blocks (from block qr[1])\n # ... other code\n else:\n # Do single core computation\n\n # Stacking for in-core QR computation\n to_stack = [(name_r_st1, i, 0) for i in range(numblocks[0])]\n name_r_st1_stacked = \"stack\" + token + \"-r1\"\n dsk_r_st1_stacked = {(name_r_st1_stacked, 0, 0): (np.vstack, (tuple, to_stack))}\n layers[name_r_st1_stacked] = dsk_r_st1_stacked\n dependencies[name_r_st1_stacked] = {name_r_st1}\n\n # In-core QR computation\n name_qr_st2 = \"qr\" + token + \"-qr2\"\n dsk_qr_st2 = blockwise(\n np.linalg.qr,\n name_qr_st2,\n \"ij\",\n name_r_st1_stacked,\n \"ij\",\n numblocks={name_r_st1_stacked: (1, 1)},\n )\n layers[name_qr_st2] = dsk_qr_st2\n dependencies[name_qr_st2] = {name_r_st1_stacked}\n\n # In-core qr[0]\n name_q_st2_aux = \"getitem\" + token + \"-q2-aux\"\n dsk_q_st2_aux = {\n (name_q_st2_aux, 0, 0): (operator.getitem, (name_qr_st2, 0, 0), 0)\n }\n layers[name_q_st2_aux] = dsk_q_st2_aux\n dependencies[name_q_st2_aux] = {name_qr_st2}\n\n chucks_are_all_known = not any(np.isnan(c) for cs in data.chunks for c in cs)\n if chucks_are_all_known:\n # when chunks are all known...\n # obtain slices on q from in-core compute (e.g.: (slice(10, 20), slice(0, 5)))\n q2_block_sizes = [min(e, n) for e in data.chunks[0]]\n block_slices = [\n (slice(e[0], e[1]), slice(0, n)) for e in _cumsum_blocks(q2_block_sizes)\n ]\n dsk_q_blockslices = {}\n deps = set()\n else:\n # when chunks are not already known...\n\n # request shape information: vertical chunk sizes & column dimension (n)\n name_q2bs = \"shape\" + token + \"-q2\"\n dsk_q2_shapes = {\n (name_q2bs, i): (min, (getattr, (data.name, i, 0), \"shape\"))\n for i in range(numblocks[0])\n }\n name_n = \"getitem\" + token + \"-n\"\n dsk_n = {\n name_n: (operator.getitem, (getattr, (data.name, 0, 0), \"shape\"), 1)\n }\n\n # cumulative sums (start, end)\n name_q2cs = \"cumsum\" + token + \"-q2\"\n dsk_q2_cumsum = {(name_q2cs, 0): [0, (name_q2bs, 0)]}\n\n for i in range(1, numblocks[0]):\n dsk_q2_cumsum[(name_q2cs, i)] = (\n _cumsum_part,\n (name_q2cs, i - 1),\n (name_q2bs, i),\n )\n\n # obtain slices on q from in-core compute (e.g.: (slice(10, 20), slice(0, 5)))\n name_blockslice = \"slice\" + token + \"-q\"\n dsk_block_slices = {\n (name_blockslice, i): (\n tuple,\n [(apply, slice, (name_q2cs, i)), (slice, 0, name_n)],\n )\n for i in range(numblocks[0])\n }\n\n dsk_q_blockslices = toolz.merge(\n dsk_n, dsk_q2_shapes, dsk_q2_cumsum, dsk_block_slices\n )\n\n deps = {data.name}\n block_slices = [(name_blockslice, i) for i in range(numblocks[0])]\n\n layers[\"q-blocksizes\" + token] = dsk_q_blockslices\n dependencies[\"q-blocksizes\" + token] = deps\n\n # In-core qr[0] unstacking\n name_q_st2 = \"getitem\" + token + \"-q2\"\n dsk_q_st2 = dict(\n ((name_q_st2, i, 0), (operator.getitem, (name_q_st2_aux, 0, 0), b))\n for i, b in enumerate(block_slices)\n )\n layers[name_q_st2] = dsk_q_st2\n if chucks_are_all_known:\n dependencies[name_q_st2] = {name_q_st2_aux}\n else:\n dependencies[name_q_st2] = {name_q_st2_aux, \"q-blocksizes\" + token}\n\n # Q: Block qr[0] (*) In-core qr[0]\n name_q_st3 = \"dot\" + token + \"-q3\"\n dsk_q_st3 = blockwise(\n np.dot,\n name_q_st3,\n \"ij\",\n name_q_st1,\n \"ij\",\n name_q_st2,\n \"ij\",\n numblocks={name_q_st1: numblocks, name_q_st2: numblocks},\n )\n layers[name_q_st3] = dsk_q_st3\n dependencies[name_q_st3] = {name_q_st1, name_q_st2}\n\n # R: In-core qr[1]\n name_r_st2 = \"getitem\" + token + \"-r2\"\n dsk_r_st2 = {(name_r_st2, 0, 0): (operator.getitem, (name_qr_st2, 0, 0), 1)}\n layers[name_r_st2] = dsk_r_st2\n dependencies[name_r_st2] = {name_qr_st2}\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_tsqr.if_not_compute_svd__tsqr.if_not_compute_svd_.else_.return.u_s_vh": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_tsqr.if_not_compute_svd__tsqr.if_not_compute_svd_.else_.return.u_s_vh", "embedding": null, "metadata": {"file_path": "dask/array/linalg.py", "file_name": "linalg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 393, "end_line": 510, "span_ids": ["tsqr"], "tokens": 1327}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def tsqr(data, compute_svd=False, _max_vchunk_size=None):\n # ... other code\n\n if not compute_svd:\n is_unknown_m = np.isnan(data.shape[0]) or any(\n np.isnan(c) for c in data.chunks[0]\n )\n is_unknown_n = np.isnan(data.shape[1]) or any(\n np.isnan(c) for c in data.chunks[1]\n )\n\n if is_unknown_m and is_unknown_n:\n # assumption: m >= n\n q_shape = data.shape\n q_chunks = (data.chunks[0], (np.nan,))\n r_shape = (np.nan, np.nan)\n r_chunks = ((np.nan,), (np.nan,))\n elif is_unknown_m and not is_unknown_n:\n # assumption: m >= n\n q_shape = data.shape\n q_chunks = (data.chunks[0], (n,))\n r_shape = (n, n)\n r_chunks = (n, n)\n elif not is_unknown_m and is_unknown_n:\n # assumption: m >= n\n q_shape = data.shape\n q_chunks = (data.chunks[0], (np.nan,))\n r_shape = (np.nan, np.nan)\n r_chunks = ((np.nan,), (np.nan,))\n else:\n q_shape = (\n data.shape\n if data.shape[0] >= data.shape[1]\n else (data.shape[0], data.shape[0])\n )\n q_chunks = (\n data.chunks\n if data.shape[0] >= data.shape[1]\n else (data.chunks[0], data.chunks[0])\n )\n r_shape = (n, n) if data.shape[0] >= data.shape[1] else data.shape\n r_chunks = r_shape\n\n # dsk.dependencies[name_q_st3] = {data.name}\n # dsk.dependencies[name_r_st2] = {data.name}\n graph = HighLevelGraph(layers, dependencies)\n q_meta = meta_from_array(data, len(q_shape), qq.dtype)\n r_meta = meta_from_array(data, len(r_shape), rr.dtype)\n q = Array(graph, name_q_st3, shape=q_shape, chunks=q_chunks, meta=q_meta)\n r = Array(graph, name_r_st2, shape=r_shape, chunks=r_chunks, meta=r_meta)\n return q, r\n else:\n # In-core SVD computation\n name_svd_st2 = \"svd\" + token + \"-2\"\n dsk_svd_st2 = blockwise(\n np.linalg.svd,\n name_svd_st2,\n \"ij\",\n name_r_st2,\n \"ij\",\n numblocks={name_r_st2: (1, 1)},\n )\n # svd[0]\n name_u_st2 = \"getitem\" + token + \"-u2\"\n dsk_u_st2 = {(name_u_st2, 0, 0): (operator.getitem, (name_svd_st2, 0, 0), 0)}\n # svd[1]\n name_s_st2 = \"getitem\" + token + \"-s2\"\n dsk_s_st2 = {(name_s_st2, 0): (operator.getitem, (name_svd_st2, 0, 0), 1)}\n # svd[2]\n name_v_st2 = \"getitem\" + token + \"-v2\"\n dsk_v_st2 = {(name_v_st2, 0, 0): (operator.getitem, (name_svd_st2, 0, 0), 2)}\n # Q * U\n name_u_st4 = \"getitem\" + token + \"-u4\"\n dsk_u_st4 = blockwise(\n dotmany,\n name_u_st4,\n \"ij\",\n name_q_st3,\n \"ik\",\n name_u_st2,\n \"kj\",\n numblocks={name_q_st3: numblocks, name_u_st2: (1, 1)},\n )\n\n layers[name_svd_st2] = dsk_svd_st2\n dependencies[name_svd_st2] = {name_r_st2}\n layers[name_u_st2] = dsk_u_st2\n dependencies[name_u_st2] = {name_svd_st2}\n layers[name_u_st4] = dsk_u_st4\n dependencies[name_u_st4] = {name_q_st3, name_u_st2}\n layers[name_s_st2] = dsk_s_st2\n dependencies[name_s_st2] = {name_svd_st2}\n layers[name_v_st2] = dsk_v_st2\n dependencies[name_v_st2] = {name_svd_st2}\n\n uu, ss, vvh = np.linalg.svd(np.ones(shape=(1, 1), dtype=data.dtype))\n\n k = _nanmin(m, n) # avoid RuntimeWarning with np.nanmin([m, n])\n\n m_u = m\n n_u = int(k) if not np.isnan(k) else k\n n_s = n_u\n m_vh = n_u\n n_vh = n\n d_vh = max(m_vh, n_vh) # full matrix returned: but basically n\n graph = HighLevelGraph(layers, dependencies)\n u_meta = meta_from_array(data, len((m_u, n_u)), uu.dtype)\n s_meta = meta_from_array(data, len((n_s,)), ss.dtype)\n vh_meta = meta_from_array(data, len((d_vh, d_vh)), vvh.dtype)\n u = Array(\n graph,\n name_u_st4,\n shape=(m_u, n_u),\n chunks=(data.chunks[0], (n_u,)),\n meta=u_meta,\n )\n s = Array(graph, name_s_st2, shape=(n_s,), chunks=((n_s,),), meta=s_meta)\n vh = Array(\n graph, name_v_st2, shape=(d_vh, d_vh), chunks=((n,), (n,)), meta=vh_meta\n )\n return u, s, vh", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_sfqr_sfqr.name_R_1.prefix_R_1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_sfqr_sfqr.name_R_1.prefix_R_1_", "embedding": null, "metadata": {"file_path": "dask/array/linalg.py", "file_name": "linalg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 513, "end_line": 585, "span_ids": ["sfqr"], "tokens": 748}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def sfqr(data, name=None):\n \"\"\"Direct Short-and-Fat QR\n\n Currently, this is a quick hack for non-tall-and-skinny matrices which\n are one chunk tall and (unless they are one chunk wide) have chunks\n that are wider than they are tall\n\n Q [R_1 R_2 ...] = [A_1 A_2 ...]\n\n it computes the factorization Q R_1 = A_1, then computes the other\n R_k's in parallel.\n\n Parameters\n ----------\n data: Array\n\n See Also\n --------\n dask.array.linalg.qr\n Main user API that uses this function\n dask.array.linalg.tsqr\n Variant for tall-and-skinny case\n \"\"\"\n nr, nc = len(data.chunks[0]), len(data.chunks[1])\n cr, cc = data.chunks[0][0], data.chunks[1][0]\n\n if not (\n (data.ndim == 2)\n and (nr == 1) # Is a matrix\n and ( # Has exactly one block row\n (cr <= cc)\n or (nc == 1) # Chunking dimension on rows is at least that on cols or...\n )\n ): # ... only one block col\n raise ValueError(\n \"Input must have the following properties:\\n\"\n \" 1. Have two dimensions\\n\"\n \" 2. Have only one row of blocks\\n\"\n \" 3. Either one column of blocks or (first) chunk size on cols\\n\"\n \" is at most that on rows (e.g.: for a 5x20 matrix,\\n\"\n \" chunks=((5), (8,4,8)) is fine, but chunks=((5), (4,8,8)) is not;\\n\"\n \" still, prefer something simple like chunks=(5,10) or chunks=5)\\n\\n\"\n \"Note: This function (sfqr) supports QR decomposition in the case\\n\"\n \"of short-and-fat matrices (single row chunk/block; see qr)\"\n )\n\n prefix = name or \"sfqr-\" + tokenize(data)\n prefix += \"_\"\n\n m, n = data.shape\n\n qq, rr = np.linalg.qr(np.ones(shape=(1, 1), dtype=data.dtype))\n\n layers = data.__dask_graph__().layers.copy()\n dependencies = data.__dask_graph__().dependencies.copy()\n\n # data = A = [A_1 A_rest]\n name_A_1 = prefix + \"A_1\"\n name_A_rest = prefix + \"A_rest\"\n layers[name_A_1] = {(name_A_1, 0, 0): (data.name, 0, 0)}\n dependencies[name_A_1] = set(data.__dask_layers__())\n layers[name_A_rest] = {\n (name_A_rest, 0, idx): (data.name, 0, 1 + idx) for idx in range(nc - 1)\n }\n if len(layers[name_A_rest]) > 0:\n dependencies[name_A_rest] = set(data.__dask_layers__())\n else:\n dependencies[name_A_rest] = set()\n\n # Q R_1 = A_1\n name_Q_R1 = prefix + \"Q_R_1\"\n name_Q = prefix + \"Q\"\n name_R_1 = prefix + \"R_1\"\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_sfqr.layers_name_Q_R1_nam_sfqr.return.Q_R": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_sfqr.layers_name_Q_R1_nam_sfqr.return.Q_R", "embedding": null, "metadata": {"file_path": "dask/array/linalg.py", "file_name": "linalg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 586, "end_line": 618, "span_ids": ["sfqr"], "tokens": 418}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def sfqr(data, name=None):\n # ... other code\n layers[name_Q_R1] = {(name_Q_R1, 0, 0): (np.linalg.qr, (name_A_1, 0, 0))}\n dependencies[name_Q_R1] = {name_A_1}\n\n layers[name_Q] = {(name_Q, 0, 0): (operator.getitem, (name_Q_R1, 0, 0), 0)}\n dependencies[name_Q] = {name_Q_R1}\n\n layers[name_R_1] = {(name_R_1, 0, 0): (operator.getitem, (name_Q_R1, 0, 0), 1)}\n dependencies[name_R_1] = {name_Q_R1}\n\n graph = HighLevelGraph(layers, dependencies)\n\n Q_meta = meta_from_array(data, len((m, min(m, n))), dtype=qq.dtype)\n R_1_meta = meta_from_array(data, len((min(m, n), cc)), dtype=rr.dtype)\n Q = Array(graph, name_Q, shape=(m, min(m, n)), chunks=(m, min(m, n)), meta=Q_meta)\n R_1 = Array(graph, name_R_1, shape=(min(m, n), cc), chunks=(cr, cc), meta=R_1_meta)\n\n # R = [R_1 Q'A_rest]\n Rs = [R_1]\n\n if nc > 1:\n A_rest_meta = meta_from_array(data, len((min(m, n), n - cc)), dtype=rr.dtype)\n A_rest = Array(\n graph,\n name_A_rest,\n shape=(min(m, n), n - cc),\n chunks=(cr, data.chunks[1][1:]),\n meta=A_rest_meta,\n )\n Rs.append(Q.T.dot(A_rest))\n\n R = concatenate(Rs, axis=1)\n\n return Q, R", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_compression_level_compression_level.return.min_max_min_subspace_size": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_compression_level_compression_level.return.min_max_min_subspace_size", "embedding": null, "metadata": {"file_path": "dask/array/linalg.py", "file_name": "linalg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 621, "end_line": 637, "span_ids": ["compression_level"], "tokens": 172}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def compression_level(n, q, oversampling=10, min_subspace_size=20):\n \"\"\"Compression level to use in svd_compressed\n\n Given the size ``n`` of a space, compress that that to one of size\n ``q`` plus oversampling.\n\n The oversampling allows for greater flexibility in finding an\n appropriate subspace, a low value is often enough (10 is already a\n very conservative choice, it can be further reduced).\n ``q + oversampling`` should not be larger than ``n``. In this\n specific implementation, ``q + oversampling`` is at least\n ``min_subspace_size``.\n\n >>> compression_level(100, 10)\n 20\n \"\"\"\n return min(max(min_subspace_size, q + oversampling), n)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_compression_matrix_compression_matrix.return.q_T": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_compression_matrix_compression_matrix.return.q_T", "embedding": null, "metadata": {"file_path": "dask/array/linalg.py", "file_name": "linalg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 640, "end_line": 692, "span_ids": ["compression_matrix"], "tokens": 447}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def compression_matrix(data, q, n_power_iter=0, seed=None, compute=False):\n \"\"\"Randomly sample matrix to find most active subspace\n\n This compression matrix returned by this algorithm can be used to\n compute both the QR decomposition and the Singular Value\n Decomposition.\n\n Parameters\n ----------\n data: Array\n q: int\n Size of the desired subspace (the actual size will be bigger,\n because of oversampling, see ``da.linalg.compression_level``)\n n_power_iter: int\n number of power iterations, useful when the singular values of\n the input matrix decay very slowly.\n compute : bool\n Whether or not to compute data at each use.\n Recomputing the input while performing several passes reduces memory\n pressure, but means that we have to compute the input multiple times.\n This is a good choice if the data is larger than memory and cheap to\n recreate.\n\n References\n ----------\n N. Halko, P. G. Martinsson, and J. A. Tropp.\n Finding structure with randomness: Probabilistic algorithms for\n constructing approximate matrix decompositions.\n SIAM Rev., Survey and Review section, Vol. 53, num. 2,\n pp. 217-288, June 2011\n https://arxiv.org/abs/0909.4061\n \"\"\"\n m, n = data.shape\n comp_level = compression_level(min(m, n), q)\n if isinstance(seed, RandomState):\n state = seed\n else:\n state = RandomState(seed)\n omega = state.standard_normal(\n size=(n, comp_level), chunks=(data.chunks[1], (comp_level,))\n )\n mat_h = data.dot(omega)\n for j in range(n_power_iter):\n if compute:\n mat_h = mat_h.persist()\n wait(mat_h)\n tmp = data.T.dot(mat_h)\n if compute:\n tmp = tmp.persist()\n wait(tmp)\n mat_h = data.dot(tmp)\n q, _ = tsqr(mat_h)\n return q.T", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_svd_compressed_svd_compressed.return.u_s_v": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_svd_compressed_svd_compressed.return.u_s_v", "embedding": null, "metadata": {"file_path": "dask/array/linalg.py", "file_name": "linalg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 695, "end_line": 758, "span_ids": ["svd_compressed"], "tokens": 561}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def svd_compressed(a, k, n_power_iter=0, seed=None, compute=False, coerce_signs=True):\n \"\"\"Randomly compressed rank-k thin Singular Value Decomposition.\n\n This computes the approximate singular value decomposition of a large\n array. This algorithm is generally faster than the normal algorithm\n but does not provide exact results. One can balance between\n performance and accuracy with input parameters (see below).\n\n Parameters\n ----------\n a: Array\n Input array\n k: int\n Rank of the desired thin SVD decomposition.\n n_power_iter: int\n Number of power iterations, useful when the singular values\n decay slowly. Error decreases exponentially as n_power_iter\n increases. In practice, set n_power_iter <= 4.\n compute : bool\n Whether or not to compute data at each use.\n Recomputing the input while performing several passes reduces memory\n pressure, but means that we have to compute the input multiple times.\n This is a good choice if the data is larger than memory and cheap to\n recreate.\n coerce_signs : bool\n Whether or not to apply sign coercion to singular vectors in\n order to maintain deterministic results, by default True.\n\n\n Examples\n --------\n >>> u, s, vt = svd_compressed(x, 20) # doctest: +SKIP\n\n Returns\n -------\n u: Array, unitary / orthogonal\n s: Array, singular values in decreasing order (largest first)\n v: Array, unitary / orthogonal\n\n References\n ----------\n N. Halko, P. G. Martinsson, and J. A. Tropp.\n Finding structure with randomness: Probabilistic algorithms for\n constructing approximate matrix decompositions.\n SIAM Rev., Survey and Review section, Vol. 53, num. 2,\n pp. 217-288, June 2011\n https://arxiv.org/abs/0909.4061\n \"\"\"\n comp = compression_matrix(\n a, k, n_power_iter=n_power_iter, seed=seed, compute=compute\n )\n if compute:\n comp = comp.persist()\n wait(comp)\n a_compressed = comp.dot(a)\n v, s, u = tsqr(a_compressed.T, compute_svd=True)\n u = comp.T.dot(u)\n v = v.T\n u = u[:, :k]\n s = s[:k]\n v = v[:k, :]\n if coerce_signs:\n u, v = svd_flip(u, v)\n return u, s, v", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_qr_qr.if_len_a_chunks_1_1_.else_.raise_NotImplementedError": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_qr_qr.if_len_a_chunks_1_1_.else_.raise_NotImplementedError", "embedding": null, "metadata": {"file_path": "dask/array/linalg.py", "file_name": "linalg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 761, "end_line": 797, "span_ids": ["qr"], "tokens": 311}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def qr(a):\n \"\"\"\n Compute the qr factorization of a matrix.\n\n Parameters\n ----------\n a : Array\n\n Returns\n -------\n q: Array, orthonormal\n r: Array, upper-triangular\n\n Examples\n --------\n >>> q, r = da.linalg.qr(x) # doctest: +SKIP\n\n See Also\n --------\n numpy.linalg.qr: Equivalent NumPy Operation\n dask.array.linalg.tsqr: Implementation for tall-and-skinny arrays\n dask.array.linalg.sfqr: Implementation for short-and-fat arrays\n \"\"\"\n\n if len(a.chunks[1]) == 1 and len(a.chunks[0]) > 1:\n return tsqr(a)\n elif len(a.chunks[0]) == 1:\n return sfqr(a)\n else:\n raise NotImplementedError(\n \"qr currently supports only tall-and-skinny (single column chunk/block; see tsqr)\\n\"\n \"and short-and-fat (single row chunk/block; see sfqr) matrices\\n\\n\"\n \"Consider use of the rechunk method. For example,\\n\\n\"\n \"x.rechunk({0: -1, 1: 'auto'}) or x.rechunk({0: 'auto', 1: -1})\\n\\n\"\n \"which rechunk one shorter axis to a single chunk, while allowing\\n\"\n \"the other axis to automatically grow/shrink appropriately.\"\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_svd_svd._Single_chunk_case": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_svd_svd._Single_chunk_case", "embedding": null, "metadata": {"file_path": "dask/array/linalg.py", "file_name": "linalg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 800, "end_line": 860, "span_ids": ["svd"], "tokens": 533}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def svd(a, coerce_signs=True):\n \"\"\"\n Compute the singular value decomposition of a matrix.\n\n Parameters\n ----------\n a : (M, N) Array\n coerce_signs : bool\n Whether or not to apply sign coercion to singular vectors in\n order to maintain deterministic results, by default True.\n\n Examples\n --------\n\n >>> u, s, v = da.linalg.svd(x) # doctest: +SKIP\n\n Returns\n -------\n\n u : (M, K) Array, unitary / orthogonal\n Left-singular vectors of `a` (in columns) with shape (M, K)\n where K = min(M, N).\n s : (K,) Array, singular values in decreasing order (largest first)\n Singular values of `a`.\n v : (K, N) Array, unitary / orthogonal\n Right-singular vectors of `a` (in rows) with shape (K, N)\n where K = min(M, N).\n\n Warnings\n --------\n\n SVD is only supported for arrays with chunking in one dimension.\n This requires that all inputs either contain a single column\n of chunks (tall-and-skinny) or a single row of chunks (short-and-fat).\n For arrays with chunking in both dimensions, see da.linalg.svd_compressed.\n\n See Also\n --------\n\n np.linalg.svd : Equivalent NumPy Operation\n da.linalg.svd_compressed : Randomized SVD for fully chunked arrays\n dask.array.linalg.tsqr : QR factorization for tall-and-skinny arrays\n dask.array.utils.svd_flip : Sign normalization for singular vectors\n \"\"\"\n nb = a.numblocks\n if a.ndim != 2:\n raise ValueError(\n \"Array must be 2D.\\n\"\n \"Input shape: {}\\n\"\n \"Input ndim: {}\\n\".format(a.shape, a.ndim)\n )\n if nb[0] > 1 and nb[1] > 1:\n raise NotImplementedError(\n \"Array must be chunked in one dimension only. \"\n \"This function (svd) only supports tall-and-skinny or short-and-fat \"\n \"matrices (see da.linalg.svd_compressed for SVD on fully chunked arrays).\\n\"\n \"Input shape: {}\\n\"\n \"Input numblocks: {}\\n\".format(a.shape, nb)\n )\n\n # Single-chunk case\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_svd.if_nb_0_nb_1_1___solve_triangular_lower.return.scipy_linalg_solve_triang": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_svd.if_nb_0_nb_1_1___solve_triangular_lower.return.scipy_linalg_solve_triang", "embedding": null, "metadata": {"file_path": "dask/array/linalg.py", "file_name": "linalg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 861, "end_line": 896, "span_ids": ["svd", "_solve_triangular_lower"], "tokens": 399}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def svd(a, coerce_signs=True):\n # ... other code\n if nb[0] == nb[1] == 1:\n m, n = a.shape\n k = min(a.shape)\n mu, ms, mv = np.linalg.svd(\n ones_like_safe(a._meta, shape=(1, 1), dtype=a._meta.dtype)\n )\n u, s, v = delayed(np.linalg.svd, nout=3)(a, full_matrices=False)\n u = from_delayed(u, shape=(m, k), meta=mu)\n s = from_delayed(s, shape=(k,), meta=ms)\n v = from_delayed(v, shape=(k, n), meta=mv)\n # Multi-chunk cases\n else:\n # Tall-and-skinny case\n if nb[0] > nb[1]:\n u, s, v = tsqr(a, compute_svd=True)\n truncate = a.shape[0] < a.shape[1]\n # Short-and-fat case\n else:\n vt, s, ut = tsqr(a.T, compute_svd=True)\n u, s, v = ut.T, s, vt.T\n truncate = a.shape[0] > a.shape[1]\n # Only when necessary, remove extra singular vectors if array\n # has shape that contradicts chunking, e.g. the array is a\n # column of chunks but still has more columns than rows overall\n if truncate:\n k = min(a.shape)\n u, v = u[:, :k], v[:k, :]\n if coerce_signs:\n u, v = svd_flip(u, v)\n return u, s, v\n\n\ndef _solve_triangular_lower(a, b):\n import scipy.linalg\n\n return scipy.linalg.solve_triangular(a, b, lower=True)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_lu_lu.dsk._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_lu_lu.dsk._", "embedding": null, "metadata": {"file_path": "dask/array/linalg.py", "file_name": "linalg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 897, "end_line": 946, "span_ids": ["lu"], "tokens": 367}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def lu(a):\n \"\"\"\n Compute the lu decomposition of a matrix.\n\n Examples\n --------\n\n >>> p, l, u = da.linalg.lu(x) # doctest: +SKIP\n\n Returns\n -------\n\n p: Array, permutation matrix\n l: Array, lower triangular matrix with unit diagonal.\n u: Array, upper triangular matrix\n \"\"\"\n\n import scipy.linalg\n\n if a.ndim != 2:\n raise ValueError(\"Dimension must be 2 to perform lu decomposition\")\n\n xdim, ydim = a.shape\n if xdim != ydim:\n raise ValueError(\"Input must be a square matrix to perform lu decomposition\")\n if not len(set(a.chunks[0] + a.chunks[1])) == 1:\n msg = (\n \"All chunks must be a square matrix to perform lu decomposition. \"\n \"Use .rechunk method to change the size of chunks.\"\n )\n raise ValueError(msg)\n\n vdim = len(a.chunks[0])\n hdim = len(a.chunks[1])\n\n token = tokenize(a)\n name_lu = \"lu-lu-\" + token\n\n name_p = \"lu-p-\" + token\n name_l = \"lu-l-\" + token\n name_u = \"lu-u-\" + token\n\n # for internal calculation\n name_p_inv = \"lu-p-inv-\" + token\n name_l_permuted = \"lu-l-permute-\" + token\n name_u_transposed = \"lu-u-transpose-\" + token\n name_plu_dot = \"lu-plu-dot-\" + token\n name_lu_dot = \"lu-lu-dot-\" + token\n\n dsk = {}\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_lu.for_i_in_range_min_vdim__lu.for_i_in_range_min_vdim_.for_k_in_range_i_1_vdi.dsk_name_lu_k_i_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_lu.for_i_in_range_min_vdim__lu.for_i_in_range_min_vdim_.for_k_in_range_i_1_vdi.dsk_name_lu_k_i_", "embedding": null, "metadata": {"file_path": "dask/array/linalg.py", "file_name": "linalg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 947, "end_line": 989, "span_ids": ["lu"], "tokens": 487}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def lu(a):\n # ... other code\n for i in range(min(vdim, hdim)):\n target = (a.name, i, i)\n if i > 0:\n prevs = []\n for p in range(i):\n prev = name_plu_dot, i, p, p, i\n dsk[prev] = (np.dot, (name_l_permuted, i, p), (name_u, p, i))\n prevs.append(prev)\n target = (operator.sub, target, (sum, prevs))\n # diagonal block\n dsk[name_lu, i, i] = (scipy.linalg.lu, target)\n\n # sweep to horizontal\n for j in range(i + 1, hdim):\n target = (np.dot, (name_p_inv, i, i), (a.name, i, j))\n if i > 0:\n prevs = []\n for p in range(i):\n prev = name_lu_dot, i, p, p, j\n dsk[prev] = (np.dot, (name_l, i, p), (name_u, p, j))\n prevs.append(prev)\n target = (operator.sub, target, (sum, prevs))\n dsk[name_lu, i, j] = (_solve_triangular_lower, (name_l, i, i), target)\n\n # sweep to vertical\n for k in range(i + 1, vdim):\n target = (a.name, k, i)\n if i > 0:\n prevs = []\n for p in range(i):\n prev = name_plu_dot, k, p, p, i\n dsk[prev] = (np.dot, (name_l_permuted, k, p), (name_u, p, i))\n prevs.append(prev)\n target = (operator.sub, target, (sum, prevs))\n # solving x.dot(u) = target is equal to u.T.dot(x.T) = target.T\n dsk[name_lu, k, i] = (\n np.transpose,\n (\n _solve_triangular_lower,\n (name_u_transposed, i, i),\n (np.transpose, target),\n ),\n )\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_lu.None_4_lu.return.p_l_u": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_lu.None_4_lu.return.p_l_u", "embedding": null, "metadata": {"file_path": "dask/array/linalg.py", "file_name": "linalg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 991, "end_line": 1031, "span_ids": ["lu"], "tokens": 692}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def lu(a):\n # ... other code\n\n for i in range(min(vdim, hdim)):\n for j in range(min(vdim, hdim)):\n if i == j:\n dsk[name_p, i, j] = (operator.getitem, (name_lu, i, j), 0)\n dsk[name_l, i, j] = (operator.getitem, (name_lu, i, j), 1)\n dsk[name_u, i, j] = (operator.getitem, (name_lu, i, j), 2)\n\n # permuted l is required to be propagated to i > j blocks\n dsk[name_l_permuted, i, j] = (np.dot, (name_p, i, j), (name_l, i, j))\n dsk[name_u_transposed, i, j] = (np.transpose, (name_u, i, j))\n # transposed permutation matrix is equal to its inverse\n dsk[name_p_inv, i, j] = (np.transpose, (name_p, i, j))\n elif i > j:\n dsk[name_p, i, j] = (np.zeros, (a.chunks[0][i], a.chunks[1][j]))\n # calculations are performed using permuted l,\n # thus the result should be reverted by inverted (=transposed) p\n # to have the same row order as diagonal blocks\n dsk[name_l, i, j] = (np.dot, (name_p_inv, i, i), (name_lu, i, j))\n dsk[name_u, i, j] = (np.zeros, (a.chunks[0][i], a.chunks[1][j]))\n dsk[name_l_permuted, i, j] = (name_lu, i, j)\n else:\n dsk[name_p, i, j] = (np.zeros, (a.chunks[0][i], a.chunks[1][j]))\n dsk[name_l, i, j] = (np.zeros, (a.chunks[0][i], a.chunks[1][j]))\n dsk[name_u, i, j] = (name_lu, i, j)\n # l_permuted is not referred in upper triangulars\n\n pp, ll, uu = scipy.linalg.lu(np.ones(shape=(1, 1), dtype=a.dtype))\n pp_meta = meta_from_array(a, dtype=pp.dtype)\n ll_meta = meta_from_array(a, dtype=ll.dtype)\n uu_meta = meta_from_array(a, dtype=uu.dtype)\n\n graph = HighLevelGraph.from_collections(name_p, dsk, dependencies=[a])\n p = Array(graph, name_p, shape=a.shape, chunks=a.chunks, meta=pp_meta)\n\n graph = HighLevelGraph.from_collections(name_l, dsk, dependencies=[a])\n l = Array(graph, name_l, shape=a.shape, chunks=a.chunks, meta=ll_meta)\n\n graph = HighLevelGraph.from_collections(name_u, dsk, dependencies=[a])\n u = Array(graph, name_u, shape=a.shape, chunks=a.chunks, meta=uu_meta)\n\n return p, l, u", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_solve_triangular_solve_triangular.return.Array_graph_name_shape_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_solve_triangular_solve_triangular.return.Array_graph_name_shape_", "embedding": null, "metadata": {"file_path": "dask/array/linalg.py", "file_name": "linalg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1034, "end_line": 1126, "span_ids": ["solve_triangular"], "tokens": 841}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def solve_triangular(a, b, lower=False):\n \"\"\"\n Solve the equation `a x = b` for `x`, assuming a is a triangular matrix.\n\n Parameters\n ----------\n a : (M, M) array_like\n A triangular matrix\n b : (M,) or (M, N) array_like\n Right-hand side matrix in `a x = b`\n lower : bool, optional\n Use only data contained in the lower triangle of `a`.\n Default is to use upper triangle.\n\n Returns\n -------\n x : (M,) or (M, N) array\n Solution to the system `a x = b`. Shape of return matches `b`.\n \"\"\"\n\n import scipy.linalg\n\n if a.ndim != 2:\n raise ValueError(\"a must be 2 dimensional\")\n if b.ndim <= 2:\n if a.shape[1] != b.shape[0]:\n raise ValueError(\"a.shape[1] and b.shape[0] must be equal\")\n if a.chunks[1] != b.chunks[0]:\n msg = (\n \"a.chunks[1] and b.chunks[0] must be equal. \"\n \"Use .rechunk method to change the size of chunks.\"\n )\n raise ValueError(msg)\n else:\n raise ValueError(\"b must be 1 or 2 dimensional\")\n\n vchunks = len(a.chunks[1])\n hchunks = 1 if b.ndim == 1 else len(b.chunks[1])\n token = tokenize(a, b, lower)\n name = \"solve-triangular-\" + token\n\n # for internal calculation\n # (name, i, j, k, l) corresponds to a_ij.dot(b_kl)\n name_mdot = \"solve-tri-dot-\" + token\n\n def _b_init(i, j):\n if b.ndim == 1:\n return b.name, i\n else:\n return b.name, i, j\n\n def _key(i, j):\n if b.ndim == 1:\n return name, i\n else:\n return name, i, j\n\n dsk = {}\n if lower:\n for i in range(vchunks):\n for j in range(hchunks):\n target = _b_init(i, j)\n if i > 0:\n prevs = []\n for k in range(i):\n prev = name_mdot, i, k, k, j\n dsk[prev] = (np.dot, (a.name, i, k), _key(k, j))\n prevs.append(prev)\n target = (operator.sub, target, (sum, prevs))\n dsk[_key(i, j)] = (_solve_triangular_lower, (a.name, i, i), target)\n else:\n for i in range(vchunks):\n for j in range(hchunks):\n target = _b_init(i, j)\n if i < vchunks - 1:\n prevs = []\n for k in range(i + 1, vchunks):\n prev = name_mdot, i, k, k, j\n dsk[prev] = (np.dot, (a.name, i, k), _key(k, j))\n prevs.append(prev)\n target = (operator.sub, target, (sum, prevs))\n dsk[_key(i, j)] = (\n scipy.linalg.solve_triangular,\n (a.name, i, i),\n target,\n )\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[a, b])\n res = _solve_triangular_lower(\n np.array([[1, 0], [1, 2]], dtype=a.dtype), np.array([0, 1], dtype=b.dtype)\n )\n meta = meta_from_array(a, b.ndim, dtype=res.dtype)\n return Array(graph, name, shape=b.shape, chunks=b.chunks, meta=meta)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_solve_solve.return.solve_triangular_u_uy_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_solve_solve.return.solve_triangular_u_uy_", "embedding": null, "metadata": {"file_path": "dask/array/linalg.py", "file_name": "linalg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1129, "end_line": 1157, "span_ids": ["solve"], "tokens": 240}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def solve(a, b, sym_pos=False):\n \"\"\"\n Solve the equation ``a x = b`` for ``x``. By default, use LU\n decomposition and forward / backward substitutions. When ``sym_pos`` is\n ``True``, use Cholesky decomposition.\n\n Parameters\n ----------\n a : (M, M) array_like\n A square matrix.\n b : (M,) or (M, N) array_like\n Right-hand side matrix in ``a x = b``.\n sym_pos : bool\n Assume a is symmetric and positive definite. If ``True``, use Cholesky\n decomposition.\n\n Returns\n -------\n x : (M,) or (M, N) Array\n Solution to the system ``a x = b``. Shape of the return matches the\n shape of `b`.\n \"\"\"\n if sym_pos:\n l, u = _cholesky(a)\n else:\n p, l, u = lu(a)\n b = p.T.dot(b)\n uy = solve_triangular(l, b, lower=True)\n return solve_triangular(u, uy)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_inv__cholesky_lower.return.scipy_linalg_cholesky_a_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_inv__cholesky_lower.return.scipy_linalg_cholesky_a_", "embedding": null, "metadata": {"file_path": "dask/array/linalg.py", "file_name": "linalg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1160, "end_line": 1181, "span_ids": ["_cholesky_lower", "inv"], "tokens": 111}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def inv(a):\n \"\"\"\n Compute the inverse of a matrix with LU decomposition and\n forward / backward substitutions.\n\n Parameters\n ----------\n a : array_like\n Square matrix to be inverted.\n\n Returns\n -------\n ainv : Array\n Inverse of the matrix `a`.\n \"\"\"\n return solve(a, eye(a.shape[0], chunks=a.chunks[0][0]))\n\n\ndef _cholesky_lower(a):\n import scipy.linalg\n\n return scipy.linalg.cholesky(a, lower=True)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_cholesky_cholesky.if_lower_.else_.return.u": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_cholesky_cholesky.if_lower_.else_.return.u", "embedding": null, "metadata": {"file_path": "dask/array/linalg.py", "file_name": "linalg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1184, "end_line": 1207, "span_ids": ["cholesky"], "tokens": 166}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def cholesky(a, lower=False):\n \"\"\"\n Returns the Cholesky decomposition, :math:`A = L L^*` or\n :math:`A = U^* U` of a Hermitian positive-definite matrix A.\n\n Parameters\n ----------\n a : (M, M) array_like\n Matrix to be decomposed\n lower : bool, optional\n Whether to compute the upper or lower triangular Cholesky\n factorization. Default is upper-triangular.\n\n Returns\n -------\n c : (M, M) Array\n Upper- or lower-triangular Cholesky factor of `a`.\n \"\"\"\n\n l, u = _cholesky(a)\n if lower:\n return l\n else:\n return u", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py__cholesky__cholesky.return.lower_upper": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py__cholesky__cholesky.return.lower_upper", "embedding": null, "metadata": {"file_path": "dask/array/linalg.py", "file_name": "linalg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1210, "end_line": 1285, "span_ids": ["_cholesky"], "tokens": 855}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _cholesky(a):\n \"\"\"\n Private function to perform Cholesky decomposition, which returns both\n lower and upper triangulars.\n \"\"\"\n import scipy.linalg\n\n if a.ndim != 2:\n raise ValueError(\"Dimension must be 2 to perform cholesky decomposition\")\n\n xdim, ydim = a.shape\n if xdim != ydim:\n raise ValueError(\n \"Input must be a square matrix to perform cholesky decomposition\"\n )\n if not len(set(a.chunks[0] + a.chunks[1])) == 1:\n msg = (\n \"All chunks must be a square matrix to perform cholesky decomposition. \"\n \"Use .rechunk method to change the size of chunks.\"\n )\n raise ValueError(msg)\n\n vdim = len(a.chunks[0])\n hdim = len(a.chunks[1])\n\n token = tokenize(a)\n name = \"cholesky-\" + token\n\n # (name_lt_dot, i, j, k, l) corresponds to l_ij.dot(l_kl.T)\n name_lt_dot = \"cholesky-lt-dot-\" + token\n # because transposed results are needed for calculation,\n # we can build graph for upper triangular simultaneously\n name_upper = \"cholesky-upper-\" + token\n\n # calculates lower triangulars because subscriptions get simpler\n dsk = {}\n for i in range(vdim):\n for j in range(hdim):\n if i < j:\n dsk[name, i, j] = (np.zeros, (a.chunks[0][i], a.chunks[1][j]))\n dsk[name_upper, j, i] = (name, i, j)\n elif i == j:\n target = (a.name, i, j)\n if i > 0:\n prevs = []\n for p in range(i):\n prev = name_lt_dot, i, p, i, p\n dsk[prev] = (np.dot, (name, i, p), (name_upper, p, i))\n prevs.append(prev)\n target = (operator.sub, target, (sum, prevs))\n dsk[name, i, i] = (_cholesky_lower, target)\n dsk[name_upper, i, i] = (np.transpose, (name, i, i))\n else:\n # solving x.dot(L11.T) = (A21 - L20.dot(L10.T)) is equal to\n # L11.dot(x.T) = A21.T - L10.dot(L20.T)\n # L11.dot(x.T) = A12 - L10.dot(L02)\n target = (a.name, j, i)\n if j > 0:\n prevs = []\n for p in range(j):\n prev = name_lt_dot, j, p, i, p\n dsk[prev] = (np.dot, (name, j, p), (name_upper, p, i))\n prevs.append(prev)\n target = (operator.sub, target, (sum, prevs))\n dsk[name_upper, j, i] = (_solve_triangular_lower, (name, j, j), target)\n dsk[name, i, j] = (np.transpose, (name_upper, j, i))\n\n graph_upper = HighLevelGraph.from_collections(name_upper, dsk, dependencies=[a])\n graph_lower = HighLevelGraph.from_collections(name, dsk, dependencies=[a])\n cho = scipy.linalg.cholesky(np.array([[1, 2], [2, 5]], dtype=a.dtype))\n meta = meta_from_array(a, dtype=cho.dtype)\n\n lower = Array(graph_lower, name, shape=a.shape, chunks=a.chunks, meta=meta)\n # do not use .T, because part of transposed blocks are already calculated\n upper = Array(graph_upper, name_upper, shape=a.shape, chunks=a.chunks, meta=meta)\n return lower, upper", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py__sort_decreasing_lstsq.return.x_residuals_rank_s": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py__sort_decreasing_lstsq.return.x_residuals_rank_s", "embedding": null, "metadata": {"file_path": "dask/array/linalg.py", "file_name": "linalg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1290, "end_line": 1366, "span_ids": ["lstsq", "_sort_decreasing"], "tokens": 761}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _sort_decreasing(x):\n x[::-1].sort()\n return x\n\n\ndef lstsq(a, b):\n \"\"\"\n Return the least-squares solution to a linear matrix equation using\n QR decomposition.\n\n Solves the equation `a x = b` by computing a vector `x` that\n minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may\n be under-, well-, or over- determined (i.e., the number of\n linearly independent rows of `a` can be less than, equal to, or\n greater than its number of linearly independent columns). If `a`\n is square and of full rank, then `x` (but for round-off error) is\n the \"exact\" solution of the equation.\n\n Parameters\n ----------\n a : (M, N) array_like\n \"Coefficient\" matrix.\n b : {(M,), (M, K)} array_like\n Ordinate or \"dependent variable\" values. If `b` is two-dimensional,\n the least-squares solution is calculated for each of the `K` columns\n of `b`.\n\n Returns\n -------\n x : {(N,), (N, K)} Array\n Least-squares solution. If `b` is two-dimensional,\n the solutions are in the `K` columns of `x`.\n residuals : {(1,), (K,)} Array\n Sums of residuals; squared Euclidean 2-norm for each column in\n ``b - a*x``.\n If `b` is 1-dimensional, this is a (1,) shape array.\n Otherwise the shape is (K,).\n rank : Array\n Rank of matrix `a`.\n s : (min(M, N),) Array\n Singular values of `a`.\n \"\"\"\n q, r = qr(a)\n x = solve_triangular(r, q.T.dot(b))\n residuals = b - a.dot(x)\n residuals = (residuals ** 2).sum(axis=0, keepdims=b.ndim == 1)\n\n token = tokenize(a, b)\n\n # r must be a triangular with single block\n\n # rank\n rname = \"lstsq-rank-\" + token\n rdsk = {(rname,): (np.linalg.matrix_rank, (r.name, 0, 0))}\n graph = HighLevelGraph.from_collections(rname, rdsk, dependencies=[r])\n # rank must be an integer\n rank = Array(graph, rname, shape=(), chunks=(), dtype=int)\n\n # singular\n sname = \"lstsq-singular-\" + token\n rt = r.T\n sdsk = {\n (sname, 0): (\n _sort_decreasing,\n (np.sqrt, (np.linalg.eigvals, (np.dot, (rt.name, 0, 0), (r.name, 0, 0)))),\n )\n }\n graph = HighLevelGraph.from_collections(sname, sdsk, dependencies=[rt, r])\n _, _, _, ss = np.linalg.lstsq(\n np.array([[1, 0], [1, 2]], dtype=a.dtype),\n np.array([0, 1], dtype=b.dtype),\n rcond=-1,\n )\n meta = meta_from_array(r, 1, dtype=ss.dtype)\n s = Array(graph, sname, shape=(r.shape[0],), chunks=r.shape[0], meta=meta)\n\n return x, residuals, rank, s", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_norm_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_norm_", "embedding": null, "metadata": {"file_path": "dask/array/linalg.py", "file_name": "linalg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1363, "end_line": 1440, "span_ids": ["norm"], "tokens": 763}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np.linalg)\ndef norm(x, ord=None, axis=None, keepdims=False):\n if axis is None:\n axis = tuple(range(x.ndim))\n elif isinstance(axis, Number):\n axis = (int(axis),)\n else:\n axis = tuple(axis)\n\n if len(axis) > 2:\n raise ValueError(\"Improper number of dimensions to norm.\")\n\n if ord == \"fro\":\n ord = None\n if len(axis) == 1:\n raise ValueError(\"Invalid norm order for vectors.\")\n\n # Coerce to double precision.\n r = x.astype(np.promote_types(x.dtype, float))\n\n if ord is None:\n r = (abs(r) ** 2).sum(axis=axis, keepdims=keepdims) ** 0.5\n elif ord == \"nuc\":\n if len(axis) == 1:\n raise ValueError(\"Invalid norm order for vectors.\")\n if x.ndim > 2:\n raise NotImplementedError(\"SVD based norm not implemented for ndim > 2\")\n\n r = svd(x)[1][None].sum(keepdims=keepdims)\n elif ord == np.inf:\n r = abs(r)\n if len(axis) == 1:\n r = r.max(axis=axis, keepdims=keepdims)\n else:\n r = r.sum(axis=axis[1], keepdims=True).max(axis=axis[0], keepdims=True)\n if keepdims is False:\n r = r.squeeze(axis=axis)\n elif ord == -np.inf:\n r = abs(r)\n if len(axis) == 1:\n r = r.min(axis=axis, keepdims=keepdims)\n else:\n r = r.sum(axis=axis[1], keepdims=True).min(axis=axis[0], keepdims=True)\n if keepdims is False:\n r = r.squeeze(axis=axis)\n elif ord == 0:\n if len(axis) == 2:\n raise ValueError(\"Invalid norm order for matrices.\")\n\n r = (r != 0).astype(r.dtype).sum(axis=axis, keepdims=keepdims)\n elif ord == 1:\n r = abs(r)\n if len(axis) == 1:\n r = r.sum(axis=axis, keepdims=keepdims)\n else:\n r = r.sum(axis=axis[0], keepdims=True).max(axis=axis[1], keepdims=True)\n if keepdims is False:\n r = r.squeeze(axis=axis)\n elif len(axis) == 2 and ord == -1:\n r = abs(r).sum(axis=axis[0], keepdims=True).min(axis=axis[1], keepdims=True)\n if keepdims is False:\n r = r.squeeze(axis=axis)\n elif len(axis) == 2 and ord == 2:\n if x.ndim > 2:\n raise NotImplementedError(\"SVD based norm not implemented for ndim > 2\")\n r = svd(x)[1][None].max(keepdims=keepdims)\n elif len(axis) == 2 and ord == -2:\n if x.ndim > 2:\n raise NotImplementedError(\"SVD based norm not implemented for ndim > 2\")\n r = svd(x)[1][None].min(keepdims=keepdims)\n else:\n if len(axis) == 2:\n raise ValueError(\"Invalid norm order for matrices.\")\n\n r = (abs(r) ** ord).sum(axis=axis, keepdims=keepdims) ** (1.0 / ord)\n\n return r", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ma.py_from_functools_import_wra_normalize_masked_array.return._data_mask_fill_value_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ma.py_from_functools_import_wra_normalize_masked_array.return._data_mask_fill_value_", "embedding": null, "metadata": {"file_path": "dask/array/ma.py", "file_name": "ma.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 22, "span_ids": ["imports", "normalize_masked_array"], "tokens": 114}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from functools import wraps\n\nimport numpy as np\n\nfrom ..base import normalize_token\nfrom .core import (\n concatenate_lookup,\n tensordot_lookup,\n map_blocks,\n asanyarray,\n blockwise,\n)\nfrom .routines import _average\nfrom ..utils import derived_from\n\n\n@normalize_token.register(np.ma.masked_array)\ndef normalize_masked_array(x):\n data = normalize_token(x.data)\n mask = normalize_token(x.mask)\n fill_value = normalize_token(x.fill_value)\n return (data, mask, fill_value)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ma.py__concatenate__concatenate.return.out": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ma.py__concatenate__concatenate.return.out", "embedding": null, "metadata": {"file_path": "dask/array/ma.py", "file_name": "ma.py", "file_type": "text/x-python", "category": "implementation", "start_line": 25, "end_line": 38, "span_ids": ["_concatenate"], "tokens": 148}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@concatenate_lookup.register(np.ma.masked_array)\ndef _concatenate(arrays, axis=0):\n out = np.ma.concatenate(arrays, axis=axis)\n fill_values = [i.fill_value for i in arrays if hasattr(i, \"fill_value\")]\n if any(isinstance(f, np.ndarray) for f in fill_values):\n raise ValueError(\n \"Dask doesn't support masked array's with non-scalar `fill_value`s\"\n )\n if fill_values:\n # If all the fill_values are the same copy over the fill value\n fill_values = np.unique(fill_values)\n if len(fill_values) == 1:\n out.fill_value = fill_values[0]\n return out", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ma.py__tensordot__tensordot.return.res_reshape_olda_oldb_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ma.py__tensordot__tensordot.return.res_reshape_olda_oldb_", "embedding": null, "metadata": {"file_path": "dask/array/ma.py", "file_name": "ma.py", "file_type": "text/x-python", "category": "implementation", "start_line": 41, "end_line": 106, "span_ids": ["_tensordot"], "tokens": 549}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@tensordot_lookup.register(np.ma.masked_array)\ndef _tensordot(a, b, axes=2):\n # Much of this is stolen from numpy/core/numeric.py::tensordot\n try:\n iter(axes)\n except TypeError:\n axes_a = list(range(-axes, 0))\n axes_b = list(range(0, axes))\n else:\n axes_a, axes_b = axes\n try:\n na = len(axes_a)\n axes_a = list(axes_a)\n except TypeError:\n axes_a = [axes_a]\n na = 1\n try:\n nb = len(axes_b)\n axes_b = list(axes_b)\n except TypeError:\n axes_b = [axes_b]\n nb = 1\n\n # a, b = asarray(a), asarray(b) # <--- modified\n as_ = a.shape\n nda = a.ndim\n bs = b.shape\n ndb = b.ndim\n equal = True\n if na != nb:\n equal = False\n else:\n for k in range(na):\n if as_[axes_a[k]] != bs[axes_b[k]]:\n equal = False\n break\n if axes_a[k] < 0:\n axes_a[k] += nda\n if axes_b[k] < 0:\n axes_b[k] += ndb\n if not equal:\n raise ValueError(\"shape-mismatch for sum\")\n\n # Move the axes to sum over to the end of \"a\"\n # and to the front of \"b\"\n notin = [k for k in range(nda) if k not in axes_a]\n newaxes_a = notin + axes_a\n N2 = 1\n for axis in axes_a:\n N2 *= as_[axis]\n newshape_a = (-1, N2)\n olda = [as_[axis] for axis in notin]\n\n notin = [k for k in range(ndb) if k not in axes_b]\n newaxes_b = axes_b + notin\n N2 = 1\n for axis in axes_b:\n N2 *= bs[axis]\n newshape_b = (N2, -1)\n oldb = [bs[axis] for axis in notin]\n\n at = a.transpose(newaxes_a).reshape(newshape_a)\n bt = b.transpose(newaxes_b).reshape(newshape_b)\n res = np.ma.dot(at, bt)\n return res.reshape(olda + oldb)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ma.py_filled_masked_outside.return.x_map_blocks_np_ma_masked": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ma.py_filled_masked_outside.return.x_map_blocks_np_ma_masked", "embedding": null, "metadata": {"file_path": "dask/array/ma.py", "file_name": "ma.py", "file_type": "text/x-python", "category": "implementation", "start_line": 109, "end_line": 158, "span_ids": ["impl", "masked_equal", "_wrap_masked", "masked_inside", "masked_outside", "filled", "masked_invalid"], "tokens": 402}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np.ma)\ndef filled(a, fill_value=None):\n a = asanyarray(a)\n return a.map_blocks(np.ma.filled, fill_value=fill_value)\n\n\ndef _wrap_masked(f):\n @wraps(f)\n def _(a, value):\n a = asanyarray(a)\n value = asanyarray(value)\n ainds = tuple(range(a.ndim))[::-1]\n vinds = tuple(range(value.ndim))[::-1]\n oinds = max(ainds, vinds, key=len)\n return blockwise(f, oinds, a, ainds, value, vinds, dtype=a.dtype)\n\n return _\n\n\nmasked_greater = _wrap_masked(np.ma.masked_greater)\nmasked_greater_equal = _wrap_masked(np.ma.masked_greater_equal)\nmasked_less = _wrap_masked(np.ma.masked_less)\nmasked_less_equal = _wrap_masked(np.ma.masked_less_equal)\nmasked_not_equal = _wrap_masked(np.ma.masked_not_equal)\n\n\n@derived_from(np.ma)\ndef masked_equal(a, value):\n a = asanyarray(a)\n if getattr(value, \"shape\", ()):\n raise ValueError(\"da.ma.masked_equal doesn't support array `value`s\")\n inds = tuple(range(a.ndim))\n return blockwise(np.ma.masked_equal, inds, a, inds, value, (), dtype=a.dtype)\n\n\n@derived_from(np.ma)\ndef masked_invalid(a):\n return asanyarray(a).map_blocks(np.ma.masked_invalid)\n\n\n@derived_from(np.ma)\ndef masked_inside(x, v1, v2):\n x = asanyarray(x)\n return x.map_blocks(np.ma.masked_inside, v1, v2)\n\n\n@derived_from(np.ma)\ndef masked_outside(x, v1, v2):\n x = asanyarray(x)\n return x.map_blocks(np.ma.masked_outside, v1, v2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ma.py_masked_where_masked_where.return.blockwise_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ma.py_masked_where_masked_where.return.blockwise_", "embedding": null, "metadata": {"file_path": "dask/array/ma.py", "file_name": "ma.py", "file_type": "text/x-python", "category": "implementation", "start_line": 161, "end_line": 175, "span_ids": ["masked_where"], "tokens": 137}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np.ma)\ndef masked_where(condition, a):\n cshape = getattr(condition, \"shape\", ())\n if cshape and cshape != a.shape:\n raise IndexError(\n \"Inconsistant shape between the condition and the \"\n \"input (got %s and %s)\" % (cshape, a.shape)\n )\n condition = asanyarray(condition)\n a = asanyarray(a)\n ainds = tuple(range(a.ndim))\n cinds = tuple(range(condition.ndim))\n return blockwise(\n np.ma.masked_where, ainds, condition, cinds, a, ainds, dtype=a.dtype\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ma.py_masked_values__masked_array.return.np_ma_masked_array_data_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ma.py_masked_values__masked_array.return.np_ma_masked_array_data_", "embedding": null, "metadata": {"file_path": "dask/array/ma.py", "file_name": "ma.py", "file_type": "text/x-python", "category": "implementation", "start_line": 178, "end_line": 208, "span_ids": ["getmaskarray", "_masked_array", "masked_values", "getdata", "fix_invalid"], "tokens": 245}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np.ma)\ndef masked_values(x, value, rtol=1e-05, atol=1e-08, shrink=True):\n x = asanyarray(x)\n if getattr(value, \"shape\", ()):\n raise ValueError(\"da.ma.masked_values doesn't support array `value`s\")\n return map_blocks(\n np.ma.masked_values, x, value, rtol=rtol, atol=atol, shrink=shrink\n )\n\n\n@derived_from(np.ma)\ndef fix_invalid(a, fill_value=None):\n a = asanyarray(a)\n return a.map_blocks(np.ma.fix_invalid, fill_value=fill_value)\n\n\n@derived_from(np.ma)\ndef getdata(a):\n a = asanyarray(a)\n return a.map_blocks(np.ma.getdata)\n\n\n@derived_from(np.ma)\ndef getmaskarray(a):\n a = asanyarray(a)\n return a.map_blocks(np.ma.getmaskarray)\n\n\ndef _masked_array(data, mask=np.ma.nomask, **kwargs):\n dtype = kwargs.pop(\"masked_dtype\", None)\n return np.ma.masked_array(data, mask=mask, dtype=dtype, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ma.py_masked_array_masked_array.return.blockwise__masked_array_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ma.py_masked_array_masked_array.return.blockwise__masked_array_", "embedding": null, "metadata": {"file_path": "dask/array/ma.py", "file_name": "ma.py", "file_type": "text/x-python", "category": "implementation", "start_line": 211, "end_line": 238, "span_ids": ["masked_array"], "tokens": 230}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np.ma)\ndef masked_array(data, mask=np.ma.nomask, fill_value=None, **kwargs):\n data = asanyarray(data)\n inds = tuple(range(data.ndim))\n arginds = [inds, data, inds]\n\n if getattr(fill_value, \"shape\", ()):\n raise ValueError(\"non-scalar fill_value not supported\")\n kwargs[\"fill_value\"] = fill_value\n\n if mask is not np.ma.nomask:\n mask = asanyarray(mask)\n if mask.size == 1:\n mask = mask.reshape((1,) * data.ndim)\n elif data.shape != mask.shape:\n raise np.ma.MaskError(\n \"Mask and data not compatible: data shape \"\n \"is %s, and mask shape is \"\n \"%s.\" % (repr(data.shape), repr(mask.shape))\n )\n arginds.extend([mask, inds])\n\n if \"dtype\" in kwargs:\n kwargs[\"masked_dtype\"] = kwargs[\"dtype\"]\n else:\n kwargs[\"dtype\"] = data.dtype\n\n return blockwise(_masked_array, *arginds, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ma.py__set_fill_value_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ma.py__set_fill_value_", "embedding": null, "metadata": {"file_path": "dask/array/ma.py", "file_name": "ma.py", "file_type": "text/x-python", "category": "implementation", "start_line": 241, "end_line": 262, "span_ids": ["_set_fill_value", "average", "set_fill_value"], "tokens": 179}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _set_fill_value(x, fill_value):\n if isinstance(x, np.ma.masked_array):\n x = x.copy()\n np.ma.set_fill_value(x, fill_value=fill_value)\n return x\n\n\n@derived_from(np.ma)\ndef set_fill_value(a, fill_value):\n a = asanyarray(a)\n if getattr(fill_value, \"shape\", ()):\n raise ValueError(\"da.ma.set_fill_value doesn't support array `value`s\")\n fill_value = np.ma.core._check_fill_value(fill_value, a.dtype)\n res = a.map_blocks(_set_fill_value, fill_value)\n a.dask = res.dask\n a.name = res.name\n\n\n@derived_from(np.ma)\ndef average(a, axis=None, weights=None, returned=False):\n return _average(a, axis, weights, returned, is_masked=True)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/numpy_compat.py_from_distutils_version_im_try_.except_TypeError_.ma_divide.np_ma_core__DomainedBinar": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/numpy_compat.py_from_distutils_version_im_try_.except_TypeError_.ma_divide.np_ma_core__DomainedBinar", "embedding": null, "metadata": {"file_path": "dask/array/numpy_compat.py", "file_name": "numpy_compat.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 48, "span_ids": ["imports"], "tokens": 434}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from distutils.version import LooseVersion\n\nimport numpy as np\nimport warnings\n\nfrom ..utils import derived_from\n\n_numpy_115 = LooseVersion(np.__version__) >= \"1.15.0\"\n_numpy_116 = LooseVersion(np.__version__) >= \"1.16.0\"\n_numpy_117 = LooseVersion(np.__version__) >= \"1.17.0\"\n_numpy_118 = LooseVersion(np.__version__) >= \"1.18.0\"\n_numpy_120 = LooseVersion(np.__version__) >= \"1.20.0\"\n\n\n# Taken from scikit-learn:\n# https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/fixes.py#L84\ntry:\n with warnings.catch_warnings():\n if (\n not np.allclose(\n np.divide(0.4, 1, casting=\"unsafe\"),\n np.divide(0.4, 1, casting=\"unsafe\", dtype=float),\n )\n or not np.allclose(np.divide(1, 0.5, dtype=\"i8\"), 2)\n or not np.allclose(np.divide(0.4, 1), 0.4)\n ):\n raise TypeError(\n \"Divide not working with dtype: \"\n \"https://github.com/numpy/numpy/issues/3484\"\n )\n divide = np.divide\n ma_divide = np.ma.divide\n\nexcept TypeError:\n # Divide with dtype doesn't work on Python 3\n def divide(x1, x2, out=None, dtype=None):\n \"\"\"Implementation of numpy.divide that works with dtype kwarg.\n\n Temporary compatibility fix for a bug in numpy's version. See\n https://github.com/numpy/numpy/issues/3484 for the relevant issue.\"\"\"\n x = np.divide(x1, x2, out)\n if dtype is not None:\n x = x.astype(dtype)\n return x\n\n ma_divide = np.ma.core._DomainedBinaryOperation(\n divide, np.ma.core._DomainSafeDivide(), 0, 1\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/numpy_compat.py_if_LooseVersion_np___vers_if_LooseVersion_np___vers.take_along_axis.return.arr__make_along_axis_idx_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/numpy_compat.py_if_LooseVersion_np___vers_if_LooseVersion_np___vers.take_along_axis.return.arr__make_along_axis_idx_", "embedding": null, "metadata": {"file_path": "dask/array/numpy_compat.py", "file_name": "numpy_compat.py", "file_type": "text/x-python", "category": "implementation", "start_line": 51, "end_line": 175, "span_ids": ["imports"], "tokens": 1270}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "if LooseVersion(np.__version__) < \"1.15.0\":\n # These functions were added in numpy 1.15.0. For previous versions they\n # are duplicated here\n\n def _make_along_axis_idx(arr_shape, indices, axis):\n # compute dimensions to iterate over\n if not np.issubdtype(indices.dtype, np.integer):\n raise IndexError(\"`indices` must be an integer array\")\n if len(arr_shape) != indices.ndim:\n raise ValueError(\n \"`indices` and `arr` must have the same number of dimensions\"\n )\n shape_ones = (1,) * indices.ndim\n dest_dims = list(range(axis)) + [None] + list(range(axis + 1, indices.ndim))\n\n # build a fancy index, consisting of orthogonal aranges, with the\n # requested index inserted at the right location\n fancy_index = []\n for dim, n in zip(dest_dims, arr_shape):\n if dim is None:\n fancy_index.append(indices)\n else:\n ind_shape = shape_ones[:dim] + (-1,) + shape_ones[dim + 1 :]\n fancy_index.append(np.arange(n).reshape(ind_shape))\n\n return tuple(fancy_index)\n\n def take_along_axis(arr, indices, axis):\n \"\"\"\n Take values from the input array by matching 1d index and data slices.\n This iterates over matching 1d slices oriented along the specified axis in\n the index and data arrays, and uses the former to look up values in the\n latter. These slices can be different lengths.\n Functions returning an index along an axis, like `argsort` and\n `argpartition`, produce suitable indices for this function.\n .. versionadded:: 1.15.0\n Parameters\n ----------\n arr: ndarray (Ni..., M, Nk...)\n Source array\n indices: ndarray (Ni..., J, Nk...)\n Indices to take along each 1d slice of `arr`. This must match the\n dimension of arr, but dimensions Ni and Nj only need to broadcast\n against `arr`.\n axis: int\n The axis to take 1d slices along. If axis is None, the input array is\n treated as if it had first been flattened to 1d, for consistency with\n `sort` and `argsort`.\n Returns\n -------\n out: ndarray (Ni..., J, Nk...)\n The indexed result.\n Notes\n -----\n This is equivalent to (but faster than) the following use of `ndindex` and\n `s_`, which sets each of ``ii`` and ``kk`` to a tuple of indices::\n Ni, M, Nk = a.shape[:axis], a.shape[axis], a.shape[axis+1:]\n J = indices.shape[axis] # Need not equal M\n out = np.empty(Nk + (J,) + Nk)\n for ii in ndindex(Ni):\n for kk in ndindex(Nk):\n a_1d = a [ii + s_[:,] + kk]\n indices_1d = indices[ii + s_[:,] + kk]\n out_1d = out [ii + s_[:,] + kk]\n for j in range(J):\n out_1d[j] = a_1d[indices_1d[j]]\n Equivalently, eliminating the inner loop, the last two lines would be::\n out_1d[:] = a_1d[indices_1d]\n See Also\n --------\n take : Take along an axis, using the same indices for every 1d slice\n put_along_axis :\n Put values into the destination array by matching 1d index and data slices\n Examples\n --------\n For this sample array\n >>> a = np.array([[10, 30, 20], [60, 40, 50]])\n\n We can sort either by using sort directly, or argsort and this function\n >>> np.sort(a, axis=1)\n array([[10, 20, 30],\n [40, 50, 60]])\n >>> ai = np.argsort(a, axis=1); ai\n array([[0, 2, 1],\n [1, 2, 0]])\n >>> take_along_axis(a, ai, axis=1)\n array([[10, 20, 30],\n [40, 50, 60]])\n\n The same works for max and min, if you expand the dimensions:\n >>> np.expand_dims(np.max(a, axis=1), axis=1)\n array([[30],\n [60]])\n >>> ai = np.expand_dims(np.argmax(a, axis=1), axis=1)\n >>> ai\n array([[1],\n [0]])\n >>> take_along_axis(a, ai, axis=1)\n array([[30],\n [60]])\n\n If we want to get the max and min at the same time,\n we can stack the indices first:\n >>> ai_min = np.expand_dims(np.argmin(a, axis=1), axis=1)\n >>> ai_max = np.expand_dims(np.argmax(a, axis=1), axis=1)\n >>> ai = np.concatenate([ai_min, ai_max], axis=1)\n >>> ai\n array([[0, 1],\n [1, 0]])\n >>> take_along_axis(a, ai, axis=1)\n array([[10, 30],\n [40, 60]])\n \"\"\"\n # normalize inputs\n if axis is None:\n arr = arr.flat\n arr_shape = (len(arr),) # flatiter has no .shape\n axis = 0\n else:\n if axis < 0:\n axis = arr.ndim + axis\n arr_shape = arr.shape\n\n # use the fancy index\n return arr[_make_along_axis_idx(arr_shape, indices, axis)]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/numpy_compat.py__make_sliced_dtype_np_ge_16__make_sliced_dtype_np_ge_16.return.np_dtype_new_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/numpy_compat.py__make_sliced_dtype_np_ge_16__make_sliced_dtype_np_ge_16.return.np_dtype_new_", "embedding": null, "metadata": {"file_path": "dask/array/numpy_compat.py", "file_name": "numpy_compat.py", "file_type": "text/x-python", "category": "implementation", "start_line": 178, "end_line": 192, "span_ids": ["_make_sliced_dtype_np_ge_16"], "tokens": 179}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _make_sliced_dtype_np_ge_16(dtype, index):\n # This was briefly added in 1.14.0\n # https://github.com/numpy/numpy/pull/6053, NumPy >= 1.14\n # which was then reverted in 1.14.1 with\n # https://github.com/numpy/numpy/pull/10411\n # And then was finally released with\n # https://github.com/numpy/numpy/pull/12447\n # in version 1.16.0\n new = {\n \"names\": index,\n \"formats\": [dtype.fields[name][0] for name in index],\n \"offsets\": [dtype.fields[name][1] for name in index],\n \"itemsize\": dtype.itemsize,\n }\n return np.dtype(new)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/numpy_compat.py__make_sliced_dtype_np_lt_14_None_1.else_._make_sliced_dtype._make_sliced_dtype_np_lt_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/numpy_compat.py__make_sliced_dtype_np_lt_14_None_1.else_._make_sliced_dtype._make_sliced_dtype_np_lt_", "embedding": null, "metadata": {"file_path": "dask/array/numpy_compat.py", "file_name": "numpy_compat.py", "file_type": "text/x-python", "category": "implementation", "start_line": 195, "end_line": 206, "span_ids": ["impl:23", "_make_sliced_dtype_np_lt_14"], "tokens": 116}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _make_sliced_dtype_np_lt_14(dtype, index):\n # For numpy < 1.14\n dt = np.dtype([(name, dtype[name]) for name in index])\n return dt\n\n\nif LooseVersion(np.__version__) >= LooseVersion(\"1.16.0\") or LooseVersion(\n np.__version__\n) == LooseVersion(\"1.14.0\"):\n _make_sliced_dtype = _make_sliced_dtype_np_ge_16\nelse:\n _make_sliced_dtype = _make_sliced_dtype_np_lt_14", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/numpy_compat.py__Recurser__Recurser.map_reduce.return.f_x_kwargs_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/numpy_compat.py__Recurser__Recurser.map_reduce.return.f_x_kwargs_", "embedding": null, "metadata": {"file_path": "dask/array/numpy_compat.py", "file_name": "numpy_compat.py", "file_type": "text/x-python", "category": "implementation", "start_line": 209, "end_line": 269, "span_ids": ["_Recurser.__init__", "_Recurser", "_Recurser.map_reduce"], "tokens": 413}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Recurser(object):\n \"\"\"\n Utility class for recursing over nested iterables\n \"\"\"\n\n # This was copied almost verbatim from numpy.core.shape_base._Recurser\n\n def __init__(self, recurse_if):\n self.recurse_if = recurse_if\n\n def map_reduce(\n self,\n x,\n f_map=lambda x, **kwargs: x,\n f_reduce=lambda x, **kwargs: x,\n f_kwargs=lambda **kwargs: kwargs,\n **kwargs\n ):\n \"\"\"\n Iterate over the nested list, applying:\n * ``f_map`` (T -> U) to items\n * ``f_reduce`` (Iterable[U] -> U) to mapped items\n\n For instance, ``map_reduce([[1, 2], 3, 4])`` is::\n\n f_reduce([\n f_reduce([\n f_map(1),\n f_map(2)\n ]),\n f_map(3),\n f_map(4)\n ]])\n\n\n State can be passed down through the calls with `f_kwargs`,\n to iterables of mapped items. When kwargs are passed, as in\n ``map_reduce([[1, 2], 3, 4], **kw)``, this becomes::\n\n kw1 = f_kwargs(**kw)\n kw2 = f_kwargs(**kw1)\n f_reduce([\n f_reduce([\n f_map(1), **kw2)\n f_map(2, **kw2)\n ], **kw1),\n f_map(3, **kw1),\n f_map(4, **kw1)\n ]], **kw)\n \"\"\"\n\n def f(x, **kwargs):\n if not self.recurse_if(x):\n return f_map(x, **kwargs)\n else:\n next_kwargs = f_kwargs(**kwargs)\n return f_reduce((f(xi, **next_kwargs) for xi in x), **kwargs)\n\n return f(x, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/numpy_compat.py__Recurser.walk_if__numpy_116_.else_._unravel_index_keyword._dims_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/numpy_compat.py__Recurser.walk_if__numpy_116_.else_._unravel_index_keyword._dims_", "embedding": null, "metadata": {"file_path": "dask/array/numpy_compat.py", "file_name": "numpy_compat.py", "file_type": "text/x-python", "category": "implementation", "start_line": 271, "end_line": 294, "span_ids": ["impl:29", "_Recurser.walk"], "tokens": 202}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Recurser(object):\n\n def walk(self, x, index=()):\n \"\"\"\n Iterate over x, yielding (index, value, entering), where\n\n * ``index``: a tuple of indices up to this point\n * ``value``: equal to ``x[index[0]][...][index[-1]]``. On the first iteration, is\n ``x`` itself\n * ``entering``: bool. The result of ``recurse_if(value)``\n \"\"\"\n do_recurse = self.recurse_if(x)\n yield index, x, do_recurse\n\n if not do_recurse:\n return\n for i, xi in enumerate(x):\n # yield from ...\n for v in self.walk(xi, index + (i,)):\n yield v\n\n\nif _numpy_116:\n _unravel_index_keyword = \"shape\"\nelse:\n _unravel_index_keyword = \"dims\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/numpy_compat.py__Implementation_taken_di_moveaxis.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/numpy_compat.py__Implementation_taken_di_moveaxis.return.result", "embedding": null, "metadata": {"file_path": "dask/array/numpy_compat.py", "file_name": "numpy_compat.py", "file_type": "text/x-python", "category": "implementation", "start_line": 297, "end_line": 317, "span_ids": ["impl:29", "moveaxis"], "tokens": 193}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# Implementation taken directly from numpy:\n# https://github.com/numpy/numpy/blob/d9b1e32cb8ef90d6b4a47853241db2a28146a57d/numpy/core/numeric.py#L1336-L1405\n@derived_from(np)\ndef moveaxis(a, source, destination):\n source = np.core.numeric.normalize_axis_tuple(source, a.ndim, \"source\")\n destination = np.core.numeric.normalize_axis_tuple(\n destination, a.ndim, \"destination\"\n )\n if len(source) != len(destination):\n raise ValueError(\n \"`source` and `destination` arguments must have \"\n \"the same number of elements\"\n )\n\n order = [n for n in range(a.ndim) if n not in source]\n\n for dest, src in sorted(zip(destination, source)):\n order.insert(dest, src)\n\n result = a.transpose(order)\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/numpy_compat.py__Implementation_adapted__": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/numpy_compat.py__Implementation_adapted__", "embedding": null, "metadata": {"file_path": "dask/array/numpy_compat.py", "file_name": "numpy_compat.py", "file_type": "text/x-python", "category": "implementation", "start_line": 320, "end_line": 339, "span_ids": ["rollaxis", "moveaxis"], "tokens": 199}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# Implementation adapted directly from numpy:\n# https://github.com/numpy/numpy/blob/v1.17.0/numpy/core/numeric.py#L1107-L1204\ndef rollaxis(a, axis, start=0):\n n = a.ndim\n axis = np.core.numeric.normalize_axis_index(axis, n)\n if start < 0:\n start += n\n msg = \"'%s' arg requires %d <= %s < %d, but %d was passed in\"\n if not (0 <= start < n + 1):\n raise ValueError(msg % (\"start\", -n, \"start\", n + 1, start))\n if axis < start:\n # it's been removed\n start -= 1\n if axis == start:\n return a[...]\n axes = list(range(0, n))\n axes.remove(axis)\n axes.insert(start, axis)\n return a.transpose(axes)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/optimization.py_from_itertools_import_zip_GETNOREMOVE._getter_getter_nofancy_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/optimization.py_from_itertools_import_zip_GETNOREMOVE._getter_getter_nofancy_", "embedding": null, "metadata": {"file_path": "dask/array/optimization.py", "file_name": "optimization.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 21, "span_ids": ["imports"], "tokens": 176}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from itertools import zip_longest\nfrom operator import getitem\n\nimport numpy as np\n\nfrom .core import getter, getter_nofancy, getter_inline\nfrom .. import config\nfrom ..blockwise import optimize_blockwise, fuse_roots\nfrom ..core import flatten, reverse_dict\nfrom ..optimization import fuse, inline_functions\nfrom ..utils import ensure_dict\nfrom ..highlevelgraph import HighLevelGraph\n\nfrom numbers import Integral\n\n# All get* functions the optimizations know about\nGETTERS = (getter, getter_nofancy, getter_inline, getitem)\n# These get* functions aren't ever completely removed from the graph,\n# even if the index should be a no-op by numpy semantics. Some array-like's\n# don't completely follow semantics, making indexing always necessary.\nGETNOREMOVE = (getter, getter_nofancy)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/optimization.py_hold_keys_hold_keys.return.hold_keys": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/optimization.py_hold_keys_hold_keys.return.hold_keys", "embedding": null, "metadata": {"file_path": "dask/array/optimization.py", "file_name": "optimization.py", "file_type": "text/x-python", "category": "implementation", "start_line": 73, "end_line": 108, "span_ids": ["hold_keys"], "tokens": 337}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def hold_keys(dsk, dependencies):\n \"\"\"Find keys to avoid fusion\n\n We don't want to fuse data present in the graph because it is easier to\n serialize as a raw value.\n\n We don't want to fuse chains after getitem/GETTERS because we want to\n move around only small pieces of data, rather than the underlying arrays.\n \"\"\"\n dependents = reverse_dict(dependencies)\n data = {k for k, v in dsk.items() if type(v) not in (tuple, str)}\n\n hold_keys = list(data)\n for dat in data:\n deps = dependents[dat]\n for dep in deps:\n task = dsk[dep]\n # If the task is a get* function, we walk up the chain, and stop\n # when there's either more than one dependent, or the dependent is\n # no longer a get* function or an alias. We then add the final\n # key to the list of keys not to fuse.\n if type(task) is tuple and task and task[0] in GETTERS:\n try:\n while len(dependents[dep]) == 1:\n new_dep = next(iter(dependents[dep]))\n new_task = dsk[new_dep]\n # If the task is a get* or an alias, continue up the\n # linear chain\n if new_task[0] in GETTERS or new_task in dsk:\n dep = new_dep\n else:\n break\n except (IndexError, TypeError):\n pass\n hold_keys.append(dep)\n return hold_keys", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/optimization.py_optimize_slices_optimize_slices.return.dsk": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/optimization.py_optimize_slices_optimize_slices.return.dsk", "embedding": null, "metadata": {"file_path": "dask/array/optimization.py", "file_name": "optimization.py", "file_type": "text/x-python", "category": "implementation", "start_line": 111, "end_line": 195, "span_ids": ["optimize_slices"], "tokens": 758}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def optimize_slices(dsk):\n \"\"\"Optimize slices\n\n 1. Fuse repeated slices, like x[5:][2:6] -> x[7:11]\n 2. Remove full slices, like x[:] -> x\n\n See also:\n fuse_slice_dict\n \"\"\"\n fancy_ind_types = (list, np.ndarray)\n dsk = dsk.copy()\n for k, v in dsk.items():\n if type(v) is tuple and v[0] in GETTERS and len(v) in (3, 5):\n if len(v) == 3:\n get, a, a_index = v\n # getter defaults to asarray=True, getitem is semantically False\n a_asarray = get is not getitem\n a_lock = None\n else:\n get, a, a_index, a_asarray, a_lock = v\n while type(a) is tuple and a[0] in GETTERS and len(a) in (3, 5):\n if len(a) == 3:\n f2, b, b_index = a\n b_asarray = f2 is not getitem\n b_lock = None\n else:\n f2, b, b_index, b_asarray, b_lock = a\n\n if a_lock and a_lock is not b_lock:\n break\n if (type(a_index) is tuple) != (type(b_index) is tuple):\n break\n if type(a_index) is tuple:\n indices = b_index + a_index\n if len(a_index) != len(b_index) and any(i is None for i in indices):\n break\n if f2 is getter_nofancy and any(\n isinstance(i, fancy_ind_types) for i in indices\n ):\n break\n elif f2 is getter_nofancy and (\n type(a_index) in fancy_ind_types or type(b_index) in fancy_ind_types\n ):\n break\n try:\n c_index = fuse_slice(b_index, a_index)\n # rely on fact that nested gets never decrease in\n # strictness e.g. `(getter_nofancy, (getter, ...))` never\n # happens\n get = getter if f2 is getter_inline else f2\n except NotImplementedError:\n break\n a, a_index, a_lock = b, c_index, b_lock\n a_asarray |= b_asarray\n\n # Skip the get call if not from from_array and nothing to do\n if get not in GETNOREMOVE and (\n (\n type(a_index) is slice\n and not a_index.start\n and a_index.stop is None\n and a_index.step is None\n )\n or (\n type(a_index) is tuple\n and all(\n type(s) is slice\n and not s.start\n and s.stop is None\n and s.step is None\n for s in a_index\n )\n )\n ):\n dsk[k] = a\n elif get is getitem or (a_asarray and not a_lock):\n # default settings are fine, drop the extra parameters Since we\n # always fallback to inner `get` functions, `get is getitem`\n # can only occur if all gets are getitem, meaning all\n # parameters must be getitem defaults.\n dsk[k] = (get, a, a_index)\n else:\n dsk[k] = (get, a, a_index, a_asarray, a_lock)\n\n return dsk", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/optimization.py_normalize_slice_check_for_nonfusible_fancy_indexing.for_f_n_in_zip_longest_f.if_type_f_is_not_list_an.raise_NotImplementedError": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/optimization.py_normalize_slice_check_for_nonfusible_fancy_indexing.for_f_n_in_zip_longest_f.if_type_f_is_not_list_an.raise_NotImplementedError", "embedding": null, "metadata": {"file_path": "dask/array/optimization.py", "file_name": "optimization.py", "file_type": "text/x-python", "category": "implementation", "start_line": 198, "end_line": 227, "span_ids": ["check_for_nonfusible_fancy_indexing", "normalize_slice"], "tokens": 301}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def normalize_slice(s):\n \"\"\"Replace Nones in slices with integers\n\n >>> normalize_slice(slice(None, None, None))\n slice(0, None, 1)\n \"\"\"\n start, stop, step = s.start, s.stop, s.step\n if start is None:\n start = 0\n if step is None:\n step = 1\n if start < 0 or step < 0 or stop is not None and stop < 0:\n raise NotImplementedError()\n return slice(start, stop, step)\n\n\ndef check_for_nonfusible_fancy_indexing(fancy, normal):\n # Check for fancy indexing and normal indexing, where the fancy\n # indexed dimensions != normal indexed dimensions with integers. E.g.:\n # disallow things like:\n # x[:, [1, 2], :][0, :, :] -> x[0, [1, 2], :] or\n # x[0, :, :][:, [1, 2], :] -> x[0, [1, 2], :]\n for f, n in zip_longest(fancy, normal, fillvalue=slice(None)):\n if type(f) is not list and isinstance(n, Integral):\n raise NotImplementedError(\n \"Can't handle normal indexing with \"\n \"integers and fancy indexing if the \"\n \"integers and fancy indices don't \"\n \"align with the same dimensions.\"\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/optimization.py_fuse_slice_fuse_slice._and_newaxes": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/optimization.py_fuse_slice_fuse_slice._and_newaxes", "embedding": null, "metadata": {"file_path": "dask/array/optimization.py", "file_name": "optimization.py", "file_type": "text/x-python", "category": "implementation", "start_line": 230, "end_line": 298, "span_ids": ["fuse_slice"], "tokens": 581}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def fuse_slice(a, b):\n \"\"\"Fuse stacked slices together\n\n Fuse a pair of repeated slices into a single slice:\n\n >>> fuse_slice(slice(1000, 2000), slice(10, 15))\n slice(1010, 1015, None)\n\n This also works for tuples of slices\n\n >>> fuse_slice((slice(100, 200), slice(100, 200, 10)),\n ... (slice(10, 15), [5, 2]))\n (slice(110, 115, None), [150, 120])\n\n And a variety of other interesting cases\n\n >>> fuse_slice(slice(1000, 2000), 10) # integers\n 1010\n\n >>> fuse_slice(slice(1000, 2000, 5), slice(10, 20, 2))\n slice(1050, 1100, 10)\n\n >>> fuse_slice(slice(1000, 2000, 5), [1, 2, 3]) # lists\n [1005, 1010, 1015]\n\n >>> fuse_slice(None, slice(None, None)) # doctest: +SKIP\n None\n \"\"\"\n # None only works if the second side is a full slice\n if a is None and isinstance(b, slice) and b == slice(None, None):\n return None\n\n # Replace None with 0 and one in start and step\n if isinstance(a, slice):\n a = normalize_slice(a)\n if isinstance(b, slice):\n b = normalize_slice(b)\n\n if isinstance(a, slice) and isinstance(b, Integral):\n if b < 0:\n raise NotImplementedError()\n return a.start + b * a.step\n\n if isinstance(a, slice) and isinstance(b, slice):\n start = a.start + a.step * b.start\n if b.stop is not None:\n stop = a.start + a.step * b.stop\n else:\n stop = None\n if a.stop is not None:\n if stop is not None:\n stop = min(a.stop, stop)\n else:\n stop = a.stop\n step = a.step * b.step\n if step == 1:\n step = None\n return slice(start, stop, step)\n\n if isinstance(b, list):\n return [fuse_slice(a, bb) for bb in b]\n if isinstance(a, list) and isinstance(b, (Integral, slice)):\n return a[b]\n\n if isinstance(a, tuple) and not isinstance(b, tuple):\n b = (b,)\n\n # If given two tuples walk through both, being mindful of uneven sizes\n # and newaxes\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/optimization.py_fuse_slice.None_8_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/optimization.py_fuse_slice.None_8_", "embedding": null, "metadata": {"file_path": "dask/array/optimization.py", "file_name": "optimization.py", "file_type": "text/x-python", "category": "implementation", "start_line": 299, "end_line": 328, "span_ids": ["fuse_slice"], "tokens": 280}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def fuse_slice(a, b):\n # ... other code\n if isinstance(a, tuple) and isinstance(b, tuple):\n\n # Check for non-fusible cases with fancy-indexing\n a_has_lists = any(isinstance(item, list) for item in a)\n b_has_lists = any(isinstance(item, list) for item in b)\n if a_has_lists and b_has_lists:\n raise NotImplementedError(\"Can't handle multiple list indexing\")\n elif a_has_lists:\n check_for_nonfusible_fancy_indexing(a, b)\n elif b_has_lists:\n check_for_nonfusible_fancy_indexing(b, a)\n\n j = 0\n result = list()\n for i in range(len(a)):\n # axis ceased to exist or we're out of b\n if isinstance(a[i], Integral) or j == len(b):\n result.append(a[i])\n continue\n while b[j] is None: # insert any Nones on the rhs\n result.append(None)\n j += 1\n result.append(fuse_slice(a[i], b[j])) # Common case\n j += 1\n while j < len(b): # anything leftover on the right?\n result.append(b[j])\n j += 1\n return tuple(result)\n raise NotImplementedError()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_expand_key_expand_key.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_expand_key_expand_key.return.result", "embedding": null, "metadata": {"file_path": "dask/array/overlap.py", "file_name": "overlap.py", "file_type": "text/x-python", "category": "implementation", "start_line": 63, "end_line": 115, "span_ids": ["expand_key"], "tokens": 604}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def expand_key(k, dims, name=None, axes=None):\n \"\"\"Get all neighboring keys around center\n\n Parameters\n ----------\n k: tuple\n They key around which to generate new keys\n dims: Sequence[int]\n The number of chunks in each dimension\n name: Option[str]\n The name to include in the output keys, or none to include no name\n axes: Dict[int, int]\n The axes active in the expansion. We don't expand on non-active axes\n\n Examples\n --------\n >>> expand_key(('x', 2, 3), dims=[5, 5], name='y', axes={0: 1, 1: 1}) # doctest: +NORMALIZE_WHITESPACE\n [[('y', 1.1, 2.1), ('y', 1.1, 3), ('y', 1.1, 3.9)],\n [('y', 2, 2.1), ('y', 2, 3), ('y', 2, 3.9)],\n [('y', 2.9, 2.1), ('y', 2.9, 3), ('y', 2.9, 3.9)]]\n\n >>> expand_key(('x', 0, 4), dims=[5, 5], name='y', axes={0: 1, 1: 1}) # doctest: +NORMALIZE_WHITESPACE\n [[('y', 0, 3.1), ('y', 0, 4)],\n [('y', 0.9, 3.1), ('y', 0.9, 4)]]\n \"\"\"\n\n def inds(i, ind):\n rv = []\n if ind - 0.9 > 0:\n rv.append(ind - 0.9)\n rv.append(ind)\n if ind + 0.9 < dims[i] - 1:\n rv.append(ind + 0.9)\n return rv\n\n shape = []\n for i, ind in enumerate(k[1:]):\n num = 1\n if ind > 0:\n num += 1\n if ind < dims[i] - 1:\n num += 1\n shape.append(num)\n\n args = [\n inds(i, ind) if any((axes.get(i, 0),)) else [ind] for i, ind in enumerate(k[1:])\n ]\n if name is not None:\n args = [[name]] + args\n seq = list(product(*args))\n shape2 = [d if any((axes.get(i, 0),)) else 1 for i, d in enumerate(shape)]\n result = reshapelist(shape2, seq)\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_overlap_internal_overlap_internal.return.Array_graph_name_chunks": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_overlap_internal_overlap_internal.return.Array_graph_name_chunks", "embedding": null, "metadata": {"file_path": "dask/array/overlap.py", "file_name": "overlap.py", "file_type": "text/x-python", "category": "implementation", "start_line": 118, "end_line": 178, "span_ids": ["overlap_internal"], "tokens": 495}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def overlap_internal(x, axes):\n \"\"\"Share boundaries between neighboring blocks\n\n Parameters\n ----------\n\n x: da.Array\n A dask array\n axes: dict\n The size of the shared boundary per axis\n\n The axes input informs how many cells to overlap between neighboring blocks\n {0: 2, 2: 5} means share two cells in 0 axis, 5 cells in 2 axis\n \"\"\"\n dims = list(map(len, x.chunks))\n expand_key2 = partial(expand_key, dims=dims, axes=axes)\n\n # Make keys for each of the surrounding sub-arrays\n interior_keys = pipe(\n x.__dask_keys__(), flatten, map(expand_key2), map(flatten), concat, list\n )\n\n name = \"overlap-\" + tokenize(x, axes)\n getitem_name = \"getitem-\" + tokenize(x, axes)\n interior_slices = {}\n overlap_blocks = {}\n for k in interior_keys:\n frac_slice = fractional_slice((x.name,) + k, axes)\n if (x.name,) + k != frac_slice:\n interior_slices[(getitem_name,) + k] = frac_slice\n else:\n interior_slices[(getitem_name,) + k] = (x.name,) + k\n overlap_blocks[(name,) + k] = (\n concatenate3,\n (concrete, expand_key2((None,) + k, name=getitem_name)),\n )\n\n chunks = []\n for i, bds in enumerate(x.chunks):\n depth = axes.get(i, 0)\n if isinstance(depth, tuple):\n left_depth = depth[0]\n right_depth = depth[1]\n else:\n left_depth = depth\n right_depth = depth\n\n if len(bds) == 1:\n chunks.append(bds)\n else:\n left = [bds[0] + right_depth]\n right = [bds[-1] + left_depth]\n mid = []\n for bd in bds[1:-1]:\n mid.append(bd + left_depth + right_depth)\n chunks.append(left + mid + right)\n\n dsk = merge(interior_slices, overlap_blocks)\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[x])\n\n return Array(graph, name, chunks, meta=x)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_trim_overlap_trim_internal.return.map_blocks_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_trim_overlap_trim_internal.return.map_blocks_", "embedding": null, "metadata": {"file_path": "dask/array/overlap.py", "file_name": "overlap.py", "file_type": "text/x-python", "category": "implementation", "start_line": 181, "end_line": 242, "span_ids": ["trim_internal", "trim_overlap"], "tokens": 409}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def trim_overlap(x, depth, boundary=None):\n \"\"\"Trim sides from each block.\n\n This couples well with the ``map_overlap`` operation which may leave\n excess data on each block.\n\n See also\n --------\n dask.array.overlap.map_overlap\n\n \"\"\"\n\n # parameter to be passed to trim_internal\n axes = coerce_depth(x.ndim, depth)\n boundary2 = coerce_boundary(x.ndim, boundary)\n return trim_internal(x, axes=axes, boundary=boundary2)\n\n\ndef trim_internal(x, axes, boundary=None):\n \"\"\"Trim sides from each block\n\n This couples well with the overlap operation, which may leave excess data on\n each block\n\n See also\n --------\n dask.array.chunk.trim\n dask.array.map_blocks\n \"\"\"\n boundary = coerce_boundary(x.ndim, boundary)\n\n olist = []\n for i, bd in enumerate(x.chunks):\n bdy = boundary.get(i, \"none\")\n overlap = axes.get(i, 0)\n ilist = []\n for j, d in enumerate(bd):\n if bdy != \"none\":\n if isinstance(overlap, tuple):\n d = d - sum(overlap)\n else:\n d = d - overlap * 2\n\n else:\n if isinstance(overlap, tuple):\n d = d - overlap[0] if j != 0 else d\n d = d - overlap[1] if j != len(bd) - 1 else d\n else:\n d = d - overlap if j != 0 else d\n d = d - overlap if j != len(bd) - 1 else d\n\n ilist.append(d)\n olist.append(tuple(ilist))\n chunks = tuple(olist)\n\n return map_blocks(\n partial(_trim, axes=axes, boundary=boundary),\n x,\n chunks=chunks,\n dtype=x.dtype,\n meta=x._meta,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py__trim__trim.return.x_ind_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py__trim__trim.return.x_ind_", "embedding": null, "metadata": {"file_path": "dask/array/overlap.py", "file_name": "overlap.py", "file_type": "text/x-python", "category": "implementation", "start_line": 245, "end_line": 278, "span_ids": ["_trim"], "tokens": 302}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _trim(x, axes, boundary, block_info):\n \"\"\"Similar to dask.array.chunk.trim but requires one to specificy the\n boundary condition.\n\n ``axes``, and ``boundary`` are assumed to have been coerced.\n\n \"\"\"\n axes = [axes.get(i, 0) for i in range(x.ndim)]\n axes_front = (ax[0] if isinstance(ax, tuple) else ax for ax in axes)\n axes_back = (\n -ax[1]\n if isinstance(ax, tuple) and ax[1]\n else -ax\n if isinstance(ax, Integral) and ax\n else None\n for ax in axes\n )\n\n trim_front = (\n 0 if (chunk_location == 0 and boundary.get(i, \"none\") == \"none\") else ax\n for i, (chunk_location, ax) in enumerate(\n zip(block_info[0][\"chunk-location\"], axes_front)\n )\n )\n trim_back = (\n None\n if (chunk_location == chunks - 1 and boundary.get(i, \"none\") == \"none\")\n else ax\n for i, (chunks, chunk_location, ax) in enumerate(\n zip(block_info[0][\"num-chunks\"], block_info[0][\"chunk-location\"], axes_back)\n )\n )\n ind = tuple(slice(front, back) for front, back in zip(trim_front, trim_back))\n return x[ind]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_periodic_periodic.return.concatenate_r_x_l_ax": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_periodic_periodic.return.concatenate_r_x_l_ax", "embedding": null, "metadata": {"file_path": "dask/array/overlap.py", "file_name": "overlap.py", "file_type": "text/x-python", "category": "implementation", "start_line": 281, "end_line": 302, "span_ids": ["periodic"], "tokens": 178}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def periodic(x, axis, depth):\n \"\"\"Copy a slice of an array around to its other side\n\n Useful to create periodic boundary conditions for overlap\n \"\"\"\n\n left = (\n (slice(None, None, None),) * axis\n + (slice(0, depth),)\n + (slice(None, None, None),) * (x.ndim - axis - 1)\n )\n right = (\n (slice(None, None, None),) * axis\n + (slice(-depth, None),)\n + (slice(None, None, None),) * (x.ndim - axis - 1)\n )\n l = x[left]\n r = x[right]\n\n l, r = _remove_overlap_boundaries(l, r, axis, depth)\n\n return concatenate([r, x, l], axis=axis)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_reflect_reflect.return.concatenate_l_x_r_ax": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_reflect_reflect.return.concatenate_l_x_r_ax", "embedding": null, "metadata": {"file_path": "dask/array/overlap.py", "file_name": "overlap.py", "file_type": "text/x-python", "category": "implementation", "start_line": 305, "end_line": 332, "span_ids": ["reflect"], "tokens": 248}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def reflect(x, axis, depth):\n \"\"\"Reflect boundaries of array on the same side\n\n This is the converse of ``periodic``\n \"\"\"\n if depth == 1:\n left = (\n (slice(None, None, None),) * axis\n + (slice(0, 1),)\n + (slice(None, None, None),) * (x.ndim - axis - 1)\n )\n else:\n left = (\n (slice(None, None, None),) * axis\n + (slice(depth - 1, None, -1),)\n + (slice(None, None, None),) * (x.ndim - axis - 1)\n )\n right = (\n (slice(None, None, None),) * axis\n + (slice(-1, -depth - 1, -1),)\n + (slice(None, None, None),) * (x.ndim - axis - 1)\n )\n l = x[left]\n r = x[right]\n\n l, r = _remove_overlap_boundaries(l, r, axis, depth)\n\n return concatenate([l, x, r], axis=axis)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_nearest_nearest.return.concatenate_l_x_r_ax": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_nearest_nearest.return.concatenate_l_x_r_ax", "embedding": null, "metadata": {"file_path": "dask/array/overlap.py", "file_name": "overlap.py", "file_type": "text/x-python", "category": "implementation", "start_line": 335, "end_line": 357, "span_ids": ["nearest"], "tokens": 206}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def nearest(x, axis, depth):\n \"\"\"Each reflect each boundary value outwards\n\n This mimics what the skimage.filters.gaussian_filter(... mode=\"nearest\")\n does.\n \"\"\"\n left = (\n (slice(None, None, None),) * axis\n + (slice(0, 1),)\n + (slice(None, None, None),) * (x.ndim - axis - 1)\n )\n right = (\n (slice(None, None, None),) * axis\n + (slice(-1, -2, -1),)\n + (slice(None, None, None),) * (x.ndim - axis - 1)\n )\n\n l = concatenate([x[left]] * depth, axis=axis)\n r = concatenate([x[right]] * depth, axis=axis)\n\n l, r = _remove_overlap_boundaries(l, r, axis, depth)\n\n return concatenate([l, x, r], axis=axis)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_constant__remove_overlap_boundaries.return.l_r": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_constant__remove_overlap_boundaries.return.l_r", "embedding": null, "metadata": {"file_path": "dask/array/overlap.py", "file_name": "overlap.py", "file_type": "text/x-python", "category": "implementation", "start_line": 361, "end_line": 385, "span_ids": ["constant", "_remove_overlap_boundaries"], "tokens": 163}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def constant(x, axis, depth, value):\n \"\"\" Add constant slice to either side of array \"\"\"\n chunks = list(x.chunks)\n chunks[axis] = (depth,)\n\n c = full_like(\n x,\n value,\n shape=tuple(map(sum, chunks)),\n chunks=tuple(chunks),\n dtype=x.dtype,\n )\n\n return concatenate([c, x, c], axis=axis)\n\n\ndef _remove_overlap_boundaries(l, r, axis, depth):\n lchunks = list(l.chunks)\n lchunks[axis] = (depth,)\n rchunks = list(r.chunks)\n rchunks[axis] = (depth,)\n\n l = l.rechunk(tuple(lchunks))\n r = r.rechunk(tuple(rchunks))\n return l, r", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_boundaries_boundaries.return.x": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_boundaries_boundaries.return.x", "embedding": null, "metadata": {"file_path": "dask/array/overlap.py", "file_name": "overlap.py", "file_type": "text/x-python", "category": "implementation", "start_line": 392, "end_line": 422, "span_ids": ["boundaries"], "tokens": 211}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def boundaries(x, depth=None, kind=None):\n \"\"\"Add boundary conditions to an array before overlaping\n\n See Also\n --------\n periodic\n constant\n \"\"\"\n if not isinstance(kind, dict):\n kind = dict((i, kind) for i in range(x.ndim))\n if not isinstance(depth, dict):\n depth = dict((i, depth) for i in range(x.ndim))\n\n for i in range(x.ndim):\n d = depth.get(i, 0)\n if d == 0:\n continue\n\n this_kind = kind.get(i, \"none\")\n if this_kind == \"none\":\n continue\n elif this_kind == \"periodic\":\n x = periodic(x, i, d)\n elif this_kind == \"reflect\":\n x = reflect(x, i, d)\n elif this_kind == \"nearest\":\n x = nearest(x, i, d)\n elif i in kind:\n x = constant(x, i, d, kind[i])\n\n return x", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_overlap_overlap._Share_boundaries_betwe": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_overlap_overlap._Share_boundaries_betwe", "embedding": null, "metadata": {"file_path": "dask/array/overlap.py", "file_name": "overlap.py", "file_type": "text/x-python", "category": "implementation", "start_line": 425, "end_line": 476, "span_ids": ["overlap"], "tokens": 1038}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def overlap(x, depth, boundary):\n \"\"\"Share boundaries between neighboring blocks\n\n Parameters\n ----------\n\n x: da.Array\n A dask array\n depth: dict\n The size of the shared boundary per axis\n boundary: dict\n The boundary condition on each axis. Options are 'reflect', 'periodic',\n 'nearest', 'none', or an array value. Such a value will fill the\n boundary with that value.\n\n The depth input informs how many cells to overlap between neighboring\n blocks ``{0: 2, 2: 5}`` means share two cells in 0 axis, 5 cells in 2 axis.\n Axes missing from this input will not be overlapped.\n\n Examples\n --------\n >>> import numpy as np\n >>> import dask.array as da\n\n >>> x = np.arange(64).reshape((8, 8))\n >>> d = da.from_array(x, chunks=(4, 4))\n >>> d.chunks\n ((4, 4), (4, 4))\n\n >>> g = da.overlap.overlap(d, depth={0: 2, 1: 1},\n ... boundary={0: 100, 1: 'reflect'})\n >>> g.chunks\n ((8, 8), (6, 6))\n\n >>> np.array(g)\n array([[100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100],\n [100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100],\n [ 0, 0, 1, 2, 3, 4, 3, 4, 5, 6, 7, 7],\n [ 8, 8, 9, 10, 11, 12, 11, 12, 13, 14, 15, 15],\n [ 16, 16, 17, 18, 19, 20, 19, 20, 21, 22, 23, 23],\n [ 24, 24, 25, 26, 27, 28, 27, 28, 29, 30, 31, 31],\n [ 32, 32, 33, 34, 35, 36, 35, 36, 37, 38, 39, 39],\n [ 40, 40, 41, 42, 43, 44, 43, 44, 45, 46, 47, 47],\n [ 16, 16, 17, 18, 19, 20, 19, 20, 21, 22, 23, 23],\n [ 24, 24, 25, 26, 27, 28, 27, 28, 29, 30, 31, 31],\n [ 32, 32, 33, 34, 35, 36, 35, 36, 37, 38, 39, 39],\n [ 40, 40, 41, 42, 43, 44, 43, 44, 45, 46, 47, 47],\n [ 48, 48, 49, 50, 51, 52, 51, 52, 53, 54, 55, 55],\n [ 56, 56, 57, 58, 59, 60, 59, 60, 61, 62, 63, 63],\n [100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100],\n [100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100]])\n \"\"\"\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_overlap.depth2_overlap.return.x4": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_overlap.depth2_overlap.return.x4", "embedding": null, "metadata": {"file_path": "dask/array/overlap.py", "file_name": "overlap.py", "file_type": "text/x-python", "category": "implementation", "start_line": 477, "end_line": 498, "span_ids": ["overlap"], "tokens": 245}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def overlap(x, depth, boundary):\n depth2 = coerce_depth(x.ndim, depth)\n boundary2 = coerce_boundary(x.ndim, boundary)\n\n # is depth larger than chunk size?\n depth_values = [depth2.get(i, 0) for i in range(x.ndim)]\n for d, c in zip(depth_values, x.chunks):\n maxd = max(d) if isinstance(d, tuple) else d\n if maxd > min(c):\n raise ValueError(\n \"The overlapping depth %d is larger than your\\n\"\n \"smallest chunk size %d. Rechunk your array\\n\"\n \"with a larger chunk size or a chunk size that\\n\"\n \"more evenly divides the shape of your array.\" % (d, min(c))\n )\n x2 = boundaries(x, depth2, boundary2)\n x3 = overlap_internal(x2, depth2)\n trim = dict(\n (k, v * 2 if boundary2.get(k, \"none\") != \"none\" else 0)\n for k, v in depth2.items()\n )\n x4 = chunk.trim(x3, trim)\n return x4", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_map_overlap._Look_for_invocation_usi_map_overlap.if_trim_.else_.return.x": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_map_overlap._Look_for_invocation_usi_map_overlap.if_trim_.else_.return.x", "embedding": null, "metadata": {"file_path": "dask/array/overlap.py", "file_name": "overlap.py", "file_type": "text/x-python", "category": "implementation", "start_line": 685, "end_line": 756, "span_ids": ["map_overlap"], "tokens": 789}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def map_overlap(\n func, *args, depth=None, boundary=None, trim=True, align_arrays=True, **kwargs\n):\n # Look for invocation using deprecated single-array signature\n # map_overlap(x, func, depth, boundary=None, trim=True, **kwargs)\n if isinstance(func, Array) and callable(args[0]):\n warnings.warn(\n \"The use of map_overlap(array, func, **kwargs) is deprecated since dask 2.17.0 \"\n \"and will be an error in a future release. To silence this warning, use the syntax \"\n \"map_overlap(func, array0,[ array1, ...,] **kwargs) instead.\",\n FutureWarning,\n )\n sig = [\"func\", \"depth\", \"boundary\", \"trim\"]\n depth = get(sig.index(\"depth\"), args, depth)\n boundary = get(sig.index(\"boundary\"), args, boundary)\n trim = get(sig.index(\"trim\"), args, trim)\n func, args = args[0], [func]\n\n if not callable(func):\n raise TypeError(\n \"First argument must be callable function, not {}\\n\"\n \"Usage: da.map_overlap(function, x)\\n\"\n \" or: da.map_overlap(function, x, y, z)\".format(type(func).__name__)\n )\n if not all(isinstance(x, Array) for x in args):\n raise TypeError(\n \"All variadic arguments must be arrays, not {}\\n\"\n \"Usage: da.map_overlap(function, x)\\n\"\n \" or: da.map_overlap(function, x, y, z)\".format(\n [type(x).__name__ for x in args]\n )\n )\n\n # Coerce depth and boundary arguments to lists of individual\n # specifications for each array argument\n def coerce(xs, arg, fn):\n if not isinstance(arg, list):\n arg = [arg] * len(xs)\n return [fn(x.ndim, a) for x, a in zip(xs, arg)]\n\n depth = coerce(args, depth, coerce_depth)\n boundary = coerce(args, boundary, coerce_boundary)\n\n # Align chunks in each array to a common size\n if align_arrays:\n # Reverse unification order to allow block broadcasting\n inds = [list(reversed(range(x.ndim))) for x in args]\n _, args = unify_chunks(*list(concat(zip(args, inds))), warn=False)\n\n for i, x in enumerate(args):\n for j in range(x.ndim):\n if isinstance(depth[i][j], tuple) and boundary[i][j] != \"none\":\n raise NotImplementedError(\n \"Asymmetric overlap is currently only implemented \"\n \"for boundary='none', however boundary for dimension \"\n \"{} in array argument {} is {}\".format(j, i, boundary[i][j])\n )\n\n def assert_int_chunksize(xs):\n assert all(type(c) is int for x in xs for cc in x.chunks for c in cc)\n\n assert_int_chunksize(args)\n if not trim and \"chunks\" not in kwargs:\n kwargs[\"chunks\"] = args[0].chunks\n args = [overlap(x, depth=d, boundary=b) for x, d, b in zip(args, depth, boundary)]\n assert_int_chunksize(args)\n x = map_blocks(func, *args, **kwargs)\n assert_int_chunksize([x])\n if trim:\n # Find index of array argument with maximum rank and break ties by choosing first provided\n i = sorted(enumerate(args), key=lambda v: (v[1].ndim, -v[0]))[-1][0]\n # Trim using depth/boundary setting for array of highest rank\n return trim_internal(x, depth[i], boundary[i])\n else:\n return x", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_coerce_depth_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_coerce_depth_", "embedding": null, "metadata": {"file_path": "dask/array/overlap.py", "file_name": "overlap.py", "file_type": "text/x-python", "category": "implementation", "start_line": 729, "end_line": 757, "span_ids": ["coerce_boundary", "coerce_depth"], "tokens": 193}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def coerce_depth(ndim, depth):\n default = 0\n if depth is None:\n depth = default\n if isinstance(depth, Integral):\n depth = (depth,) * ndim\n if isinstance(depth, tuple):\n depth = dict(zip(range(ndim), depth))\n if isinstance(depth, dict):\n for i in range(ndim):\n if i not in depth:\n depth[i] = 0\n return depth\n\n\ndef coerce_boundary(ndim, boundary):\n default = \"reflect\"\n if boundary is None:\n boundary = default\n if not isinstance(boundary, (tuple, dict)):\n boundary = (boundary,) * ndim\n if isinstance(boundary, tuple):\n boundary = dict(zip(range(ndim), boundary))\n if isinstance(boundary, dict):\n for i in range(ndim):\n if i not in boundary:\n boundary[i] = default\n return boundary", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/percentile.py_from_collections_abc_impo__percentiles_from_tdigest.return.np_array_t_quantile_qs_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/percentile.py_from_collections_abc_impo__percentiles_from_tdigest.return.np_array_t_quantile_qs_", "embedding": null, "metadata": {"file_path": "dask/array/percentile.py", "file_name": "percentile.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 51, "span_ids": ["_percentile", "imports", "_tdigest_chunk", "_percentiles_from_tdigest"], "tokens": 316}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from collections.abc import Iterator\nfrom functools import wraps\nfrom numbers import Number\n\nimport numpy as np\nfrom tlz import merge, merge_sorted\n\nfrom .core import Array\nfrom ..base import tokenize\nfrom ..highlevelgraph import HighLevelGraph\n\n\n@wraps(np.percentile)\ndef _percentile(a, q, interpolation=\"linear\"):\n n = len(a)\n if not len(a):\n return None, n\n if isinstance(q, Iterator):\n q = list(q)\n if a.dtype.name == \"category\":\n result = np.percentile(a.codes, q, interpolation=interpolation)\n import pandas as pd\n\n return pd.Categorical.from_codes(result, a.categories, a.ordered), n\n if np.issubdtype(a.dtype, np.datetime64):\n a2 = a.astype(\"i8\")\n result = np.percentile(a2, q, interpolation=interpolation)\n return result.astype(a.dtype), n\n if not np.issubdtype(a.dtype, np.number):\n interpolation = \"nearest\"\n return np.percentile(a, q, interpolation=interpolation), n\n\n\ndef _tdigest_chunk(a):\n\n from crick import TDigest\n\n t = TDigest()\n t.update(a)\n\n return t\n\n\ndef _percentiles_from_tdigest(qs, digests):\n\n from crick import TDigest\n\n t = TDigest()\n t.merge(*digests)\n\n return np.array(t.quantile(qs / 100.0))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/percentile.py_percentile_percentile._Allow_using_t_digest_if": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/percentile.py_percentile_percentile._Allow_using_t_digest_if", "embedding": null, "metadata": {"file_path": "dask/array/percentile.py", "file_name": "percentile.py", "file_type": "text/x-python", "category": "implementation", "start_line": 54, "end_line": 104, "span_ids": ["percentile"], "tokens": 489}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def percentile(a, q, interpolation=\"linear\", method=\"default\"):\n \"\"\"Approximate percentile of 1-D array\n\n Parameters\n ----------\n a : Array\n q : array_like of float\n Percentile or sequence of percentiles to compute, which must be between\n 0 and 100 inclusive.\n interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}, optional\n The interpolation method to use when the desired percentile lies\n between two data points ``i < j``. Only valid for ``method='dask'``.\n\n - 'linear': ``i + (j - i) * fraction``, where ``fraction``\n is the fractional part of the index surrounded by ``i``\n and ``j``.\n - 'lower': ``i``.\n - 'higher': ``j``.\n - 'nearest': ``i`` or ``j``, whichever is nearest.\n - 'midpoint': ``(i + j) / 2``.\n\n method : {'default', 'dask', 'tdigest'}, optional\n What method to use. By default will use dask's internal custom\n algorithm (``'dask'``). If set to ``'tdigest'`` will use tdigest for\n floats and ints and fallback to the ``'dask'`` otherwise.\n\n See Also\n --------\n numpy.percentile : Numpy's equivalent Percentile function\n \"\"\"\n if not a.ndim == 1:\n raise NotImplementedError(\"Percentiles only implemented for 1-d arrays\")\n if isinstance(q, Number):\n q = [q]\n q = np.array(q)\n token = tokenize(a, q, interpolation)\n\n dtype = a.dtype\n if np.issubdtype(dtype, np.integer):\n dtype = (np.array([], dtype=dtype) / 0.5).dtype\n\n allowed_methods = [\"default\", \"dask\", \"tdigest\"]\n if method not in allowed_methods:\n raise ValueError(\"method can only be 'default', 'dask' or 'tdigest'\")\n\n if method == \"default\":\n internal_method = \"dask\"\n else:\n internal_method = method\n\n # Allow using t-digest if interpolation is allowed and dtype is of floating or integer type\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/percentile.py_percentile.if__percentile.return.Array_graph_name2_chunk": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/percentile.py_percentile.if__percentile.return.Array_graph_name2_chunk", "embedding": null, "metadata": {"file_path": "dask/array/percentile.py", "file_name": "percentile.py", "file_type": "text/x-python", "category": "implementation", "start_line": 105, "end_line": 151, "span_ids": ["percentile"], "tokens": 399}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def percentile(a, q, interpolation=\"linear\", method=\"default\"):\n # ... other code\n if (\n internal_method == \"tdigest\"\n and interpolation == \"linear\"\n and (np.issubdtype(dtype, np.floating) or np.issubdtype(dtype, np.integer))\n ):\n\n from dask.utils import import_required\n\n import_required(\n \"crick\", \"crick is a required dependency for using the t-digest method.\"\n )\n\n name = \"percentile_tdigest_chunk-\" + token\n dsk = dict(\n ((name, i), (_tdigest_chunk, key))\n for i, key in enumerate(a.__dask_keys__())\n )\n\n name2 = \"percentile_tdigest-\" + token\n\n dsk2 = {(name2, 0): (_percentiles_from_tdigest, q, sorted(dsk))}\n\n # Otherwise use the custom percentile algorithm\n else:\n # Add 0 and 100 during calculation for more robust behavior (hopefully)\n calc_q = np.pad(q, 1, mode=\"constant\")\n calc_q[-1] = 100\n name = \"percentile_chunk-\" + token\n dsk = dict(\n ((name, i), (_percentile, key, calc_q, interpolation))\n for i, key in enumerate(a.__dask_keys__())\n )\n\n name2 = \"percentile-\" + token\n dsk2 = {\n (name2, 0): (\n merge_percentiles,\n q,\n [calc_q] * len(a.chunks[0]),\n sorted(dsk),\n interpolation,\n )\n }\n\n dsk = merge(dsk, dsk2)\n graph = HighLevelGraph.from_collections(name2, dsk, dependencies=[a])\n return Array(graph, name2, chunks=((len(q),),), dtype=dtype)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/percentile.py_merge_percentiles_merge_percentiles.combined_vals_counts.merge_sorted_map_zip_va": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/percentile.py_merge_percentiles_merge_percentiles.combined_vals_counts.merge_sorted_map_zip_va", "embedding": null, "metadata": {"file_path": "dask/array/percentile.py", "file_name": "percentile.py", "file_type": "text/x-python", "category": "implementation", "start_line": 154, "end_line": 229, "span_ids": ["merge_percentiles"], "tokens": 800}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def merge_percentiles(finalq, qs, vals, interpolation=\"lower\", Ns=None):\n \"\"\"Combine several percentile calculations of different data.\n\n Parameters\n ----------\n\n finalq : numpy.array\n Percentiles to compute (must use same scale as ``qs``).\n qs : sequence of :class:`numpy.array`s\n Percentiles calculated on different sets of data.\n vals : sequence of :class:`numpy.array`s\n Resulting values associated with percentiles ``qs``.\n Ns : sequence of integers\n The number of data elements associated with each data set.\n interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}\n Specify the type of interpolation to use to calculate final\n percentiles. For more information, see :func:`numpy.percentile`.\n\n Examples\n --------\n\n >>> finalq = [10, 20, 30, 40, 50, 60, 70, 80]\n >>> qs = [[20, 40, 60, 80], [20, 40, 60, 80]]\n >>> vals = [np.array([1, 2, 3, 4]), np.array([10, 11, 12, 13])]\n >>> Ns = [100, 100] # Both original arrays had 100 elements\n\n >>> merge_percentiles(finalq, qs, vals, Ns=Ns)\n array([ 1, 2, 3, 4, 10, 11, 12, 13])\n \"\"\"\n if isinstance(finalq, Iterator):\n finalq = list(finalq)\n finalq = np.array(finalq)\n qs = list(map(list, qs))\n vals = list(vals)\n if Ns is None:\n vals, Ns = zip(*vals)\n Ns = list(Ns)\n\n L = list(zip(*[(q, val, N) for q, val, N in zip(qs, vals, Ns) if N]))\n if not L:\n raise ValueError(\"No non-trivial arrays found\")\n qs, vals, Ns = L\n\n # TODO: Perform this check above in percentile once dtype checking is easy\n # Here we silently change meaning\n if vals[0].dtype.name == \"category\":\n result = merge_percentiles(\n finalq, qs, [v.codes for v in vals], interpolation, Ns\n )\n import pandas as pd\n\n return pd.Categorical.from_codes(result, vals[0].categories, vals[0].ordered)\n if not np.issubdtype(vals[0].dtype, np.number):\n interpolation = \"nearest\"\n\n if len(vals) != len(qs) or len(Ns) != len(qs):\n raise ValueError(\"qs, vals, and Ns parameters must be the same length\")\n\n # transform qs and Ns into number of observations between percentiles\n counts = []\n for q, N in zip(qs, Ns):\n count = np.empty(len(q))\n count[1:] = np.diff(q)\n count[0] = q[0]\n count *= N\n counts.append(count)\n\n # Sort by calculated percentile values, then number of observations.\n # >95% of the time in this function is spent in `merge_sorted` below.\n # An alternative that uses numpy sort is shown. It is sometimes\n # comparable to, but typically slower than, `merge_sorted`.\n #\n # >>> A = np.concatenate(map(np.array, map(zip, vals, counts)))\n # >>> A.sort(0, kind='mergesort')\n\n combined_vals_counts = merge_sorted(*map(zip, vals, counts))\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/percentile.py_merge_percentiles.combined_vals_combined_c_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/percentile.py_merge_percentiles.combined_vals_combined_c_", "embedding": null, "metadata": {"file_path": "dask/array/percentile.py", "file_name": "percentile.py", "file_type": "text/x-python", "category": "implementation", "start_line": 230, "end_line": 270, "span_ids": ["merge_percentiles"], "tokens": 401}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def merge_percentiles(finalq, qs, vals, interpolation=\"lower\", Ns=None):\n # ... other code\n combined_vals, combined_counts = zip(*combined_vals_counts)\n\n combined_vals = np.array(combined_vals)\n combined_counts = np.array(combined_counts)\n\n # percentile-like, but scaled by total number of observations\n combined_q = np.cumsum(combined_counts)\n\n # rescale finalq percentiles to match combined_q\n desired_q = finalq * sum(Ns)\n\n # the behavior of different interpolation methods should be\n # investigated further.\n if interpolation == \"linear\":\n rv = np.interp(desired_q, combined_q, combined_vals)\n else:\n left = np.searchsorted(combined_q, desired_q, side=\"left\")\n right = np.searchsorted(combined_q, desired_q, side=\"right\") - 1\n np.minimum(left, len(combined_vals) - 1, left) # don't exceed max index\n lower = np.minimum(left, right)\n upper = np.maximum(left, right)\n if interpolation == \"lower\":\n rv = combined_vals[lower]\n elif interpolation == \"higher\":\n rv = combined_vals[upper]\n elif interpolation == \"midpoint\":\n rv = 0.5 * (combined_vals[lower] + combined_vals[upper])\n elif interpolation == \"nearest\":\n lower_residual = np.abs(combined_q[lower] - desired_q)\n upper_residual = np.abs(combined_q[upper] - desired_q)\n mask = lower_residual > upper_residual\n index = lower # alias; we no longer need lower\n index[mask] = upper[mask]\n rv = combined_vals[index]\n else:\n raise ValueError(\n \"interpolation can only be 'linear', 'lower', \"\n \"'higher', 'midpoint', or 'nearest'\"\n )\n return rv", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/random.py_numbers_doc_wraps.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/random.py_numbers_doc_wraps.return._", "embedding": null, "metadata": {"file_path": "dask/array/random.py", "file_name": "random.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 35, "span_ids": ["imports", "doc_wraps"], "tokens": 182}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import numbers\nimport warnings\nfrom itertools import product\nfrom numbers import Integral\nfrom operator import getitem\n\nimport numpy as np\n\nfrom .core import (\n normalize_chunks,\n Array,\n slices_from_chunks,\n asarray,\n broadcast_shapes,\n broadcast_to,\n)\nfrom .creation import arange\nfrom ..base import tokenize\nfrom ..highlevelgraph import HighLevelGraph\nfrom ..utils import ignoring, random_state_data, derived_from, skip_doctest\n\n\ndef doc_wraps(func):\n \"\"\" Copy docstring from one function to another \"\"\"\n warnings.warn(\n \"dask.array.random.doc_wraps is deprecated and will be removed in a future version\",\n FutureWarning,\n )\n\n def _(func2):\n if func.__doc__ is not None:\n func2.__doc__ = skip_doctest(func.__doc__)\n return func2\n\n return _", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/random.py_RandomState_RandomState.seed.self__numpy_state_seed_se": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/random.py_RandomState_RandomState.seed.self__numpy_state_seed_se", "embedding": null, "metadata": {"file_path": "dask/array/random.py", "file_name": "random.py", "file_type": "text/x-python", "category": "implementation", "start_line": 38, "end_line": 76, "span_ids": ["RandomState.seed", "RandomState", "RandomState.__init__"], "tokens": 319}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class RandomState(object):\n \"\"\"\n Mersenne Twister pseudo-random number generator\n\n This object contains state to deterministically generate pseudo-random\n numbers from a variety of probability distributions. It is identical to\n ``np.random.RandomState`` except that all functions also take a ``chunks=``\n keyword argument.\n\n Parameters\n ----------\n seed: Number\n Object to pass to RandomState to serve as deterministic seed\n RandomState: Callable[seed] -> RandomState\n A callable that, when provided with a ``seed`` keyword provides an\n object that operates identically to ``np.random.RandomState`` (the\n default). This might also be a function that returns a\n ``randomgen.RandomState``, ``mkl_random``, or\n ``cupy.random.RandomState`` object.\n\n Examples\n --------\n >>> import dask.array as da\n >>> state = da.random.RandomState(1234) # a seed\n >>> x = state.normal(10, 0.1, size=3, chunks=(2,))\n >>> x.compute()\n array([10.01867852, 10.04812289, 9.89649746])\n\n See Also\n --------\n np.random.RandomState\n \"\"\"\n\n def __init__(self, seed=None, RandomState=None):\n self._numpy_state = np.random.RandomState(seed)\n self._RandomState = RandomState\n\n def seed(self, seed=None):\n self._numpy_state.seed(seed)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/random.py_RandomState._wrap_RandomState._wrap.vals._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/random.py_RandomState._wrap_RandomState._wrap.vals._", "embedding": null, "metadata": {"file_path": "dask/array/random.py", "file_name": "random.py", "file_type": "text/x-python", "category": "implementation", "start_line": 78, "end_line": 159, "span_ids": ["RandomState._wrap"], "tokens": 673}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class RandomState(object):\n\n def _wrap(\n self, funcname, *args, size=None, chunks=\"auto\", extra_chunks=(), **kwargs\n ):\n \"\"\"Wrap numpy random function to produce dask.array random function\n\n extra_chunks should be a chunks tuple to append to the end of chunks\n \"\"\"\n if size is not None and not isinstance(size, (tuple, list)):\n size = (size,)\n\n args_shapes = {ar.shape for ar in args if isinstance(ar, (Array, np.ndarray))}\n args_shapes.union(\n {ar.shape for ar in kwargs.values() if isinstance(ar, (Array, np.ndarray))}\n )\n\n shapes = list(args_shapes)\n if size is not None:\n shapes.extend([size])\n # broadcast to the final size(shape)\n size = broadcast_shapes(*shapes)\n chunks = normalize_chunks(\n chunks,\n size, # ideally would use dtype here\n dtype=kwargs.get(\"dtype\", np.float64),\n )\n slices = slices_from_chunks(chunks)\n\n def _broadcast_any(ar, shape, chunks):\n if isinstance(ar, Array):\n return broadcast_to(ar, shape).rechunk(chunks)\n if isinstance(ar, np.ndarray):\n return np.ascontiguousarray(np.broadcast_to(ar, shape))\n\n # Broadcast all arguments, get tiny versions as well\n # Start adding the relevant bits to the graph\n dsk = {}\n dsks = []\n lookup = {}\n small_args = []\n dependencies = []\n for i, ar in enumerate(args):\n if isinstance(ar, (np.ndarray, Array)):\n res = _broadcast_any(ar, size, chunks)\n if isinstance(res, Array):\n dependencies.append(res)\n dsks.append(res.dask)\n lookup[i] = res.name\n elif isinstance(res, np.ndarray):\n name = \"array-{}\".format(tokenize(res))\n lookup[i] = name\n dsk[name] = res\n small_args.append(ar[tuple(0 for _ in ar.shape)])\n else:\n small_args.append(ar)\n\n small_kwargs = {}\n for key, ar in kwargs.items():\n if isinstance(ar, (np.ndarray, Array)):\n res = _broadcast_any(ar, size, chunks)\n if isinstance(res, Array):\n dependencies.append(res)\n dsks.append(res.dask)\n lookup[key] = res.name\n elif isinstance(res, np.ndarray):\n name = \"array-{}\".format(tokenize(res))\n lookup[key] = name\n dsk[name] = res\n small_kwargs[key] = ar[tuple(0 for _ in ar.shape)]\n else:\n small_kwargs[key] = ar\n\n sizes = list(product(*chunks))\n seeds = random_state_data(len(sizes), self._numpy_state)\n token = tokenize(seeds, size, chunks, args, kwargs)\n name = \"{0}-{1}\".format(funcname, token)\n\n keys = product(\n [name], *([range(len(bd)) for bd in chunks] + [[0]] * len(extra_chunks))\n )\n blocks = product(*[range(len(bd)) for bd in chunks])\n\n vals = []\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/random.py_RandomState._wrap.for_seed_size_slc_bloc_RandomState._wrap.return.Array_graph_name_chunks": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/random.py_RandomState._wrap.for_seed_size_slc_bloc_RandomState._wrap.return.Array_graph_name_chunks", "embedding": null, "metadata": {"file_path": "dask/array/random.py", "file_name": "random.py", "file_type": "text/x-python", "category": "implementation", "start_line": 160, "end_line": 197, "span_ids": ["RandomState._wrap"], "tokens": 323}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class RandomState(object):\n\n def _wrap(\n self, funcname, *args, size=None, chunks=\"auto\", extra_chunks=(), **kwargs\n ):\n # ... other code\n for seed, size, slc, block in zip(seeds, sizes, slices, blocks):\n arg = []\n for i, ar in enumerate(args):\n if i not in lookup:\n arg.append(ar)\n else:\n if isinstance(ar, Array):\n dependencies.append(ar)\n arg.append((lookup[i],) + block)\n else: # np.ndarray\n arg.append((getitem, lookup[i], slc))\n kwrg = {}\n for k, ar in kwargs.items():\n if k not in lookup:\n kwrg[k] = ar\n else:\n if isinstance(ar, Array):\n dependencies.append(ar)\n kwrg[k] = (lookup[k],) + block\n else: # np.ndarray\n kwrg[k] = (getitem, lookup[k], slc)\n vals.append(\n (_apply_random, self._RandomState, funcname, seed, size, arg, kwrg)\n )\n\n meta = _apply_random(\n self._RandomState,\n funcname,\n seed,\n (0,) * len(size),\n small_args,\n small_kwargs,\n )\n\n dsk.update(dict(zip(keys, vals)))\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=dependencies)\n return Array(graph, name, chunks + extra_chunks, meta=meta)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/random.py_RandomState.beta_RandomState.with_ignoring_AttributeEr.choice.return.Array_graph_name_chunks": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/random.py_RandomState.beta_RandomState.with_ignoring_AttributeEr.choice.return.Array_graph_name_chunks", "embedding": null, "metadata": {"file_path": "dask/array/random.py", "file_name": "random.py", "file_type": "text/x-python", "category": "implementation", "start_line": 199, "end_line": 283, "span_ids": ["RandomState.binomial", "RandomState.beta", "RandomState.chisquare", "RandomState:3"], "tokens": 801}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class RandomState(object):\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def beta(self, a, b, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\"beta\", a, b, size=size, chunks=chunks, **kwargs)\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def binomial(self, n, p, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\"binomial\", n, p, size=size, chunks=chunks, **kwargs)\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def chisquare(self, df, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\"chisquare\", df, size=size, chunks=chunks, **kwargs)\n\n with ignoring(AttributeError):\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def choice(self, a, size=None, replace=True, p=None, chunks=\"auto\"):\n dependencies = []\n # Normalize and validate `a`\n if isinstance(a, Integral):\n # On windows the output dtype differs if p is provided or\n # absent, see https://github.com/numpy/numpy/issues/9867\n dummy_p = np.array([1]) if p is not None else p\n dtype = np.random.choice(1, size=(), p=dummy_p).dtype\n len_a = a\n if a < 0:\n raise ValueError(\"a must be greater than 0\")\n else:\n a = asarray(a)\n a = a.rechunk(a.shape)\n dtype = a.dtype\n if a.ndim != 1:\n raise ValueError(\"a must be one dimensional\")\n len_a = len(a)\n dependencies.append(a)\n a = a.__dask_keys__()[0]\n\n # Normalize and validate `p`\n if p is not None:\n if not isinstance(p, Array):\n # If p is not a dask array, first check the sum is close\n # to 1 before converting.\n p = np.asarray(p)\n if not np.isclose(p.sum(), 1, rtol=1e-7, atol=0):\n raise ValueError(\"probabilities do not sum to 1\")\n p = asarray(p)\n else:\n p = p.rechunk(p.shape)\n\n if p.ndim != 1:\n raise ValueError(\"p must be one dimensional\")\n if len(p) != len_a:\n raise ValueError(\"a and p must have the same size\")\n\n dependencies.append(p)\n p = p.__dask_keys__()[0]\n\n if size is None:\n size = ()\n elif not isinstance(size, (tuple, list)):\n size = (size,)\n\n chunks = normalize_chunks(chunks, size, dtype=np.float64)\n if not replace and len(chunks[0]) > 1:\n err_msg = (\n \"replace=False is not currently supported for \"\n \"dask.array.choice with multi-chunk output \"\n \"arrays\"\n )\n raise NotImplementedError(err_msg)\n sizes = list(product(*chunks))\n state_data = random_state_data(len(sizes), self._numpy_state)\n\n name = \"da.random.choice-%s\" % tokenize(\n state_data, size, chunks, a, replace, p\n )\n keys = product([name], *(range(len(bd)) for bd in chunks))\n dsk = {\n k: (_choice, state, a, size, replace, p)\n for k, state, size in zip(keys, state_data, sizes)\n }\n\n graph = HighLevelGraph.from_collections(\n name, dsk, dependencies=dependencies\n )\n return Array(graph, name, chunks, dtype=dtype)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/random.py_RandomState._derived_from_np_random_RandomState.multinomial.return.self__wrap_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/random.py_RandomState._derived_from_np_random_RandomState.multinomial.return.self__wrap_", "embedding": null, "metadata": {"file_path": "dask/array/random.py", "file_name": "random.py", "file_type": "text/x-python", "category": "implementation", "start_line": 285, "end_line": 339, "span_ids": ["RandomState.f", "RandomState.multinomial", "RandomState.lognormal", "RandomState.hypergeometric", "RandomState.logistic", "RandomState:3", "RandomState.gamma", "RandomState.gumbel", "RandomState.exponential", "RandomState.logseries", "RandomState.laplace", "RandomState.geometric"], "tokens": 726}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class RandomState(object):\n\n # @derived_from(np.random.RandomState, skipblocks=1)\n # def dirichlet(self, alpha, size=None, chunks=\"auto\"):\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def exponential(self, scale=1.0, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\"exponential\", scale, size=size, chunks=chunks, **kwargs)\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def f(self, dfnum, dfden, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\"f\", dfnum, dfden, size=size, chunks=chunks, **kwargs)\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def gamma(self, shape, scale=1.0, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\"gamma\", shape, scale, size=size, chunks=chunks, **kwargs)\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def geometric(self, p, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\"geometric\", p, size=size, chunks=chunks, **kwargs)\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def gumbel(self, loc=0.0, scale=1.0, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\"gumbel\", loc, scale, size=size, chunks=chunks, **kwargs)\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def hypergeometric(self, ngood, nbad, nsample, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\n \"hypergeometric\", ngood, nbad, nsample, size=size, chunks=chunks, **kwargs\n )\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def laplace(self, loc=0.0, scale=1.0, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\"laplace\", loc, scale, size=size, chunks=chunks, **kwargs)\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def logistic(self, loc=0.0, scale=1.0, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\"logistic\", loc, scale, size=size, chunks=chunks, **kwargs)\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def lognormal(self, mean=0.0, sigma=1.0, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\"lognormal\", mean, sigma, size=size, chunks=chunks, **kwargs)\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def logseries(self, p, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\"logseries\", p, size=size, chunks=chunks, **kwargs)\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def multinomial(self, n, pvals, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\n \"multinomial\",\n n,\n pvals,\n size=size,\n chunks=chunks,\n extra_chunks=((len(pvals),),),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/random.py_RandomState.negative_binomial_RandomState.rayleigh.return.self__wrap_rayleigh_sc": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/random.py_RandomState.negative_binomial_RandomState.rayleigh.return.self__wrap_rayleigh_sc", "embedding": null, "metadata": {"file_path": "dask/array/random.py", "file_name": "random.py", "file_type": "text/x-python", "category": "implementation", "start_line": 341, "end_line": 404, "span_ids": ["RandomState.pareto", "RandomState.noncentral_chisquare", "RandomState.noncentral_f", "RandomState:4", "RandomState.normal", "RandomState.permutation", "RandomState.power", "RandomState.randint", "RandomState.rayleigh", "RandomState.random_sample", "RandomState.random_integers", "RandomState.poisson", "RandomState.negative_binomial"], "tokens": 748}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class RandomState(object):\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def negative_binomial(self, n, p, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\"negative_binomial\", n, p, size=size, chunks=chunks, **kwargs)\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def noncentral_chisquare(self, df, nonc, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\n \"noncentral_chisquare\", df, nonc, size=size, chunks=chunks, **kwargs\n )\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def noncentral_f(self, dfnum, dfden, nonc, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\n \"noncentral_f\", dfnum, dfden, nonc, size=size, chunks=chunks, **kwargs\n )\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def normal(self, loc=0.0, scale=1.0, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\"normal\", loc, scale, size=size, chunks=chunks, **kwargs)\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def pareto(self, a, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\"pareto\", a, size=size, chunks=chunks, **kwargs)\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def permutation(self, x):\n from .slicing import shuffle_slice\n\n if isinstance(x, numbers.Number):\n x = arange(x, chunks=\"auto\")\n\n index = np.arange(len(x))\n self._numpy_state.shuffle(index)\n return shuffle_slice(x, index)\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def poisson(self, lam=1.0, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\"poisson\", lam, size=size, chunks=chunks, **kwargs)\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def power(self, a, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\"power\", a, size=size, chunks=chunks, **kwargs)\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def randint(self, low, high=None, size=None, chunks=\"auto\", dtype=\"l\", **kwargs):\n return self._wrap(\n \"randint\", low, high, size=size, chunks=chunks, dtype=dtype, **kwargs\n )\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def random_integers(self, low, high=None, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\n \"random_integers\", low, high, size=size, chunks=chunks, **kwargs\n )\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def random_sample(self, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\"random_sample\", size=size, chunks=chunks, **kwargs)\n\n random = random_sample\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def rayleigh(self, scale=1.0, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\"rayleigh\", scale, size=size, chunks=chunks, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/random.py_RandomState.standard_cauchy_RandomState.zipf.return.self__wrap_zipf_a_siz": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/random.py_RandomState.standard_cauchy_RandomState.zipf.return.self__wrap_zipf_a_siz", "embedding": null, "metadata": {"file_path": "dask/array/random.py", "file_name": "random.py", "file_type": "text/x-python", "category": "implementation", "start_line": 406, "end_line": 454, "span_ids": ["RandomState.tomaxint", "RandomState.standard_t", "RandomState.triangular", "RandomState.weibull", "RandomState.standard_gamma", "RandomState.standard_cauchy", "RandomState.standard_normal", "RandomState.vonmises", "RandomState.zipf", "RandomState.uniform", "RandomState.standard_exponential", "RandomState.wald"], "tokens": 665}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class RandomState(object):\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def standard_cauchy(self, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\"standard_cauchy\", size=size, chunks=chunks, **kwargs)\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def standard_exponential(self, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\"standard_exponential\", size=size, chunks=chunks, **kwargs)\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def standard_gamma(self, shape, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\"standard_gamma\", shape, size=size, chunks=chunks, **kwargs)\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def standard_normal(self, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\"standard_normal\", size=size, chunks=chunks, **kwargs)\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def standard_t(self, df, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\"standard_t\", df, size=size, chunks=chunks, **kwargs)\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def tomaxint(self, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\"tomaxint\", size=size, chunks=chunks, **kwargs)\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def triangular(self, left, mode, right, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\n \"triangular\", left, mode, right, size=size, chunks=chunks, **kwargs\n )\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def uniform(self, low=0.0, high=1.0, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\"uniform\", low, high, size=size, chunks=chunks, **kwargs)\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def vonmises(self, mu, kappa, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\"vonmises\", mu, kappa, size=size, chunks=chunks, **kwargs)\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def wald(self, mean, scale, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\"wald\", mean, scale, size=size, chunks=chunks, **kwargs)\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def weibull(self, a, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\"weibull\", a, size=size, chunks=chunks, **kwargs)\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def zipf(self, a, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\"zipf\", a, size=size, chunks=chunks, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/random.py__choice_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/random.py__choice_", "embedding": null, "metadata": {"file_path": "dask/array/random.py", "file_name": "random.py", "file_type": "text/x-python", "category": "implementation", "start_line": 457, "end_line": 522, "span_ids": ["_apply_random", "impl", "_choice"], "tokens": 468}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _choice(state_data, a, size, replace, p):\n state = np.random.RandomState(state_data)\n return state.choice(a, size=size, replace=replace, p=p)\n\n\ndef _apply_random(RandomState, funcname, state_data, size, args, kwargs):\n \"\"\"Apply RandomState method with seed\"\"\"\n if RandomState is None:\n RandomState = np.random.RandomState\n state = RandomState(state_data)\n func = getattr(state, funcname)\n return func(*args, size=size, **kwargs)\n\n\n_state = RandomState()\n\n\nseed = _state.seed\n\n\nbeta = _state.beta\nbinomial = _state.binomial\nchisquare = _state.chisquare\nif hasattr(_state, \"choice\"):\n choice = _state.choice\nexponential = _state.exponential\nf = _state.f\ngamma = _state.gamma\ngeometric = _state.geometric\ngumbel = _state.gumbel\nhypergeometric = _state.hypergeometric\nlaplace = _state.laplace\nlogistic = _state.logistic\nlognormal = _state.lognormal\nlogseries = _state.logseries\nmultinomial = _state.multinomial\nnegative_binomial = _state.negative_binomial\nnoncentral_chisquare = _state.noncentral_chisquare\nnoncentral_f = _state.noncentral_f\nnormal = _state.normal\npareto = _state.pareto\npermutation = _state.permutation\npoisson = _state.poisson\npower = _state.power\nrayleigh = _state.rayleigh\nrandom_sample = _state.random_sample\nrandom = random_sample\nrandint = _state.randint\nrandom_integers = _state.random_integers\ntriangular = _state.triangular\nuniform = _state.uniform\nvonmises = _state.vonmises\nwald = _state.wald\nweibull = _state.weibull\nzipf = _state.zipf\n\n\"\"\"\nStandard distributions\n\"\"\"\n\nstandard_cauchy = _state.standard_cauchy\nstandard_exponential = _state.standard_exponential\nstandard_gamma = _state.standard_gamma\nstandard_normal = _state.standard_normal\nstandard_t = _state.standard_t", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py___config": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py___config", "embedding": null, "metadata": {"file_path": "dask/array/rechunk.py", "file_name": "rechunk.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 27, "span_ids": ["docstring"], "tokens": 161}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\"\nThe rechunk module defines:\n intersect_chunks: a function for\n converting chunks to new dimensions\n rechunk: a function to convert the blocks\n of an existing dask array to new chunks or blockshape\n\"\"\"\nimport math\nimport heapq\nfrom functools import reduce\nfrom typing import Tuple\nfrom warnings import warn\n\nfrom itertools import product, chain, count\nfrom operator import getitem, add, mul, itemgetter\n\nimport numpy as np\nimport tlz as toolz\nfrom tlz import accumulate\n\nfrom ..base import tokenize\nfrom ..highlevelgraph import HighLevelGraph\nfrom ..utils import parse_bytes\nfrom .core import concatenate3, Array, normalize_chunks\nfrom .utils import validate_axis\nfrom .wrap import empty\nfrom .. import config", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py_cumdims_label_cumdims_label.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py_cumdims_label_cumdims_label.return._", "embedding": null, "metadata": {"file_path": "dask/array/rechunk.py", "file_name": "rechunk.py", "file_type": "text/x-python", "category": "implementation", "start_line": 28, "end_line": 38, "span_ids": ["cumdims_label"], "tokens": 146}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def cumdims_label(chunks, const):\n \"\"\"Internal utility for cumulative sum with label.\n\n >>> cumdims_label(((5, 3, 3), (2, 2, 1)), 'n') # doctest: +NORMALIZE_WHITESPACE\n [(('n', 0), ('n', 5), ('n', 8), ('n', 11)),\n (('n', 0), ('n', 2), ('n', 4), ('n', 5))]\n \"\"\"\n return [\n tuple(zip((const,) * (1 + len(bds)), accumulate(add, (0,) + bds)))\n for bds in chunks\n ]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py__breakpoints__breakpoints.return.tuple_sorted_cumold_cum": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py__breakpoints__breakpoints.return.tuple_sorted_cumold_cum", "embedding": null, "metadata": {"file_path": "dask/array/rechunk.py", "file_name": "rechunk.py", "file_type": "text/x-python", "category": "implementation", "start_line": 41, "end_line": 52, "span_ids": ["_breakpoints"], "tokens": 186}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _breakpoints(cumold, cumnew):\n \"\"\"\n\n >>> new = cumdims_label(((2, 3), (2, 2, 1)), 'n')\n >>> old = cumdims_label(((2, 2, 1), (5,)), 'o')\n\n >>> _breakpoints(new[0], old[0])\n (('n', 0), ('o', 0), ('n', 2), ('o', 2), ('o', 4), ('n', 5), ('o', 5))\n >>> _breakpoints(new[1], old[1])\n (('n', 0), ('o', 0), ('n', 2), ('n', 4), ('n', 5), ('o', 5))\n \"\"\"\n return tuple(sorted(cumold + cumnew, key=itemgetter(1)))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py__intersect_1d__intersect_1d.return.ret": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py__intersect_1d__intersect_1d.return.ret", "embedding": null, "metadata": {"file_path": "dask/array/rechunk.py", "file_name": "rechunk.py", "file_type": "text/x-python", "category": "implementation", "start_line": 55, "end_line": 111, "span_ids": ["_intersect_1d"], "tokens": 495}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _intersect_1d(breaks):\n \"\"\"\n Internal utility to intersect chunks for 1d after preprocessing.\n\n >>> new = cumdims_label(((2, 3), (2, 2, 1)), 'n')\n >>> old = cumdims_label(((2, 2, 1), (5,)), 'o')\n\n >>> _intersect_1d(_breakpoints(old[0], new[0])) # doctest: +NORMALIZE_WHITESPACE\n [[(0, slice(0, 2, None))],\n [(1, slice(0, 2, None)), (2, slice(0, 1, None))]]\n >>> _intersect_1d(_breakpoints(old[1], new[1])) # doctest: +NORMALIZE_WHITESPACE\n [[(0, slice(0, 2, None))],\n [(0, slice(2, 4, None))],\n [(0, slice(4, 5, None))]]\n\n Parameters\n ----------\n\n breaks: list of tuples\n Each tuple is ('o', 8) or ('n', 8)\n These are pairs of 'o' old or new 'n'\n indicator with a corresponding cumulative sum.\n\n Uses 'o' and 'n' to make new tuples of slices for\n the new block crosswalk to old blocks.\n \"\"\"\n start = 0\n last_end = 0\n old_idx = 0\n ret = []\n ret_next = []\n for idx in range(1, len(breaks)):\n label, br = breaks[idx]\n last_label, last_br = breaks[idx - 1]\n if last_label == \"n\":\n if ret_next:\n ret.append(ret_next)\n ret_next = []\n if last_label == \"o\":\n start = 0\n else:\n start = last_end\n end = br - last_br + start\n last_end = end\n if br == last_br:\n if label == \"o\":\n old_idx += 1\n continue\n ret_next.append((old_idx, slice(start, end)))\n if label == \"o\":\n old_idx += 1\n start = 0\n\n if ret_next:\n ret.append(ret_next)\n\n return ret", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py__old_to_new__old_to_new.return.old_to_new": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py__old_to_new__old_to_new.return.old_to_new", "embedding": null, "metadata": {"file_path": "dask/array/rechunk.py", "file_name": "rechunk.py", "file_type": "text/x-python", "category": "implementation", "start_line": 114, "end_line": 155, "span_ids": ["_old_to_new"], "tokens": 490}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _old_to_new(old_chunks, new_chunks):\n \"\"\"Helper to build old_chunks to new_chunks.\n\n Handles missing values, as long as the missing dimension\n is unchanged.\n\n Examples\n --------\n >>> old = ((10, 10, 10, 10, 10), )\n >>> new = ((25, 5, 20), )\n >>> _old_to_new(old, new) # doctest: +NORMALIZE_WHITESPACE\n [[[(0, slice(0, 10, None)), (1, slice(0, 10, None)), (2, slice(0, 5, None))],\n [(2, slice(5, 10, None))],\n [(3, slice(0, 10, None)), (4, slice(0, 10, None))]]]\n \"\"\"\n old_known = [x for x in old_chunks if not any(math.isnan(y) for y in x)]\n new_known = [x for x in new_chunks if not any(math.isnan(y) for y in x)]\n\n n_missing = [sum(math.isnan(y) for y in x) for x in old_chunks]\n n_missing2 = [sum(math.isnan(y) for y in x) for x in new_chunks]\n\n cmo = cumdims_label(old_known, \"o\")\n cmn = cumdims_label(new_known, \"n\")\n\n sums = [sum(o) for o in old_known]\n sums2 = [sum(n) for n in new_known]\n\n if not sums == sums2:\n raise ValueError(\"Cannot change dimensions from %r to %r\" % (sums, sums2))\n if not n_missing == n_missing2:\n raise ValueError(\n \"Chunks must be unchanging along unknown dimensions.\\n\\n\"\n \"A possible solution:\\n x.compute_chunk_sizes()\"\n )\n\n old_to_new = [_intersect_1d(_breakpoints(cm[0], cm[1])) for cm in zip(cmo, cmn)]\n for idx, missing in enumerate(n_missing):\n if missing:\n # Missing dimensions are always unchanged, so old -> new is everything\n extra = [[(i, slice(0, None))] for i in range(missing)]\n old_to_new.insert(idx, extra)\n return old_to_new", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py_intersect_chunks_intersect_chunks.return.cross": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py_intersect_chunks_intersect_chunks.return.cross", "embedding": null, "metadata": {"file_path": "dask/array/rechunk.py", "file_name": "rechunk.py", "file_type": "text/x-python", "category": "implementation", "start_line": 158, "end_line": 182, "span_ids": ["intersect_chunks"], "tokens": 266}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def intersect_chunks(old_chunks, new_chunks):\n \"\"\"\n Make dask.array slices as intersection of old and new chunks.\n\n >>> intersections = intersect_chunks(((4, 4), (2,)),\n ... ((8,), (1, 1)))\n >>> list(intersections) # doctest: +NORMALIZE_WHITESPACE\n [(((0, slice(0, 4, None)), (0, slice(0, 1, None))),\n ((1, slice(0, 4, None)), (0, slice(0, 1, None)))),\n (((0, slice(0, 4, None)), (0, slice(1, 2, None))),\n ((1, slice(0, 4, None)), (0, slice(1, 2, None))))]\n\n Parameters\n ----------\n\n old_chunks : iterable of tuples\n block sizes along each dimension (convert from old_chunks)\n new_chunks: iterable of tuples\n block sizes along each dimension (converts to new_chunks)\n \"\"\"\n old_to_new = _old_to_new(old_chunks, new_chunks)\n\n cross1 = product(*old_to_new)\n cross = chain(tuple(product(*cr)) for cr in cross1)\n return cross", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py_rechunk_rechunk.return.x": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py_rechunk_rechunk.return.x", "embedding": null, "metadata": {"file_path": "dask/array/rechunk.py", "file_name": "rechunk.py", "file_type": "text/x-python", "category": "implementation", "start_line": 187, "end_line": 280, "span_ids": ["rechunk"], "tokens": 840}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def rechunk(x, chunks=\"auto\", threshold=None, block_size_limit=None, balance=False):\n \"\"\"\n Convert blocks in dask array x for new chunks.\n\n Parameters\n ----------\n x: dask array\n Array to be rechunked.\n chunks: int, tuple, dict or str, optional\n The new block dimensions to create. -1 indicates the full size of the\n corresponding dimension. Default is \"auto\" which automatically\n determines chunk sizes.\n threshold: int, optional\n The graph growth factor under which we don't bother introducing an\n intermediate step.\n block_size_limit: int, optional\n The maximum block size (in bytes) we want to produce\n Defaults to the configuration value ``array.chunk-size``\n balance : bool, default False\n If True, try to make each chunk to be the same size.\n\n This means ``balance=True`` will remove any small leftover chunks, so\n using ``x.rechunk(chunks=len(x) // N, balance=True)``\n will almost certainly result in ``N`` chunks.\n\n Examples\n --------\n >>> import dask.array as da\n >>> x = da.ones((1000, 1000), chunks=(100, 100))\n\n Specify uniform chunk sizes with a tuple\n\n >>> y = x.rechunk((1000, 10))\n\n Or chunk only specific dimensions with a dictionary\n\n >>> y = x.rechunk({0: 1000})\n\n Use the value ``-1`` to specify that you want a single chunk along a\n dimension or the value ``\"auto\"`` to specify that dask can freely rechunk a\n dimension to attain blocks of a uniform block size\n\n >>> y = x.rechunk({0: -1, 1: 'auto'}, block_size_limit=1e8)\n\n If a chunk size does not divide the dimension then rechunk will leave any\n unevenness to the last chunk.\n\n >>> x.rechunk(chunks=(400, -1)).chunks\n ((400, 400, 200), (1000,))\n\n However if you want more balanced chunks, and don't mind Dask choosing a\n different chunksize for you then you can use the ``balance=True`` option.\n\n >>> x.rechunk(chunks=(400, -1), balance=True).chunks\n ((500, 500), (1000,))\n \"\"\"\n # don't rechunk if array is empty\n if x.ndim > 0 and all(s == 0 for s in x.shape):\n return x\n\n if isinstance(chunks, dict):\n chunks = {validate_axis(c, x.ndim): v for c, v in chunks.items()}\n for i in range(x.ndim):\n if i not in chunks:\n chunks[i] = x.chunks[i]\n if isinstance(chunks, (tuple, list)):\n chunks = tuple(lc if lc is not None else rc for lc, rc in zip(chunks, x.chunks))\n chunks = normalize_chunks(\n chunks, x.shape, limit=block_size_limit, dtype=x.dtype, previous_chunks=x.chunks\n )\n\n # Now chunks are tuple of tuples\n if not balance and (chunks == x.chunks):\n return x\n ndim = x.ndim\n if not len(chunks) == ndim:\n raise ValueError(\"Provided chunks are not consistent with shape\")\n\n if balance:\n chunks = tuple([_balance_chunksizes(chunk) for chunk in chunks])\n\n new_shapes = tuple(map(sum, chunks))\n\n for new, old in zip(new_shapes, x.shape):\n if new != old and not math.isnan(old) and not math.isnan(new):\n raise ValueError(\"Provided chunks are not consistent with shape\")\n\n steps = plan_rechunk(\n x.chunks, chunks, x.dtype.itemsize, threshold, block_size_limit\n )\n for c in steps:\n x = _compute_rechunk(x, c)\n\n return x", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py__number_of_blocks_divide_to_width.return.tuple_chunks_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py__number_of_blocks_divide_to_width.return.tuple_chunks_", "embedding": null, "metadata": {"file_path": "dask/array/rechunk.py", "file_name": "rechunk.py", "file_type": "text/x-python", "category": "implementation", "start_line": 257, "end_line": 291, "span_ids": ["estimate_graph_size", "divide_to_width", "_number_of_blocks", "_largest_block_size"], "tokens": 255}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _number_of_blocks(chunks):\n return reduce(mul, map(len, chunks))\n\n\ndef _largest_block_size(chunks):\n return reduce(mul, map(max, chunks))\n\n\ndef estimate_graph_size(old_chunks, new_chunks):\n \"\"\"Estimate the graph size during a rechunk computation.\"\"\"\n # Estimate the number of intermediate blocks that will be produced\n # (we don't use intersect_chunks() which is much more expensive)\n crossed_size = reduce(\n mul,\n (\n (len(oc) + len(nc) - 1 if oc != nc else len(oc))\n for oc, nc in zip(old_chunks, new_chunks)\n ),\n )\n return crossed_size\n\n\ndef divide_to_width(desired_chunks, max_width):\n \"\"\"Minimally divide the given chunks so as to make the largest chunk\n width less or equal than *max_width*.\n \"\"\"\n chunks = []\n for c in desired_chunks:\n nb_divides = int(np.ceil(c / max_width))\n for i in range(nb_divides):\n n = c // (nb_divides - i)\n chunks.append(n)\n c -= n\n assert c == 0\n return tuple(chunks)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py_merge_to_number_merge_to_number.return.tuple_filter_None_chunks": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py_merge_to_number_merge_to_number.return.tuple_filter_None_chunks", "embedding": null, "metadata": {"file_path": "dask/array/rechunk.py", "file_name": "rechunk.py", "file_type": "text/x-python", "category": "implementation", "start_line": 294, "end_line": 345, "span_ids": ["merge_to_number"], "tokens": 436}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def merge_to_number(desired_chunks, max_number):\n \"\"\"Minimally merge the given chunks so as to drop the number of\n chunks below *max_number*, while minimizing the largest width.\n \"\"\"\n if len(desired_chunks) <= max_number:\n return desired_chunks\n\n distinct = set(desired_chunks)\n if len(distinct) == 1:\n # Fast path for homogeneous target, also ensuring a regular result\n w = distinct.pop()\n n = len(desired_chunks)\n total = n * w\n\n desired_width = total // max_number\n width = w * (desired_width // w)\n adjust = (total - max_number * width) // w\n\n return (width + w,) * adjust + (width,) * (max_number - adjust)\n\n desired_width = sum(desired_chunks) // max_number\n nmerges = len(desired_chunks) - max_number\n\n heap = [\n (desired_chunks[i] + desired_chunks[i + 1], i, i + 1)\n for i in range(len(desired_chunks) - 1)\n ]\n heapq.heapify(heap)\n\n chunks = list(desired_chunks)\n\n while nmerges > 0:\n # Find smallest interval to merge\n width, i, j = heapq.heappop(heap)\n # If interval was made invalid by another merge, recompute\n # it, re-insert it and retry.\n if chunks[j] == 0:\n j += 1\n while chunks[j] == 0:\n j += 1\n heapq.heappush(heap, (chunks[i] + chunks[j], i, j))\n continue\n elif chunks[i] + chunks[j] != width:\n heapq.heappush(heap, (chunks[i] + chunks[j], i, j))\n continue\n # Merge\n assert chunks[i] != 0\n chunks[i] = 0 # mark deleted\n chunks[j] = width\n nmerges -= 1\n\n return tuple(filter(None, chunks))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py_find_merge_rechunk_find_merge_rechunk.return.tuple_chunks_memory_lim": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py_find_merge_rechunk_find_merge_rechunk.return.tuple_chunks_memory_lim", "embedding": null, "metadata": {"file_path": "dask/array/rechunk.py", "file_name": "rechunk.py", "file_type": "text/x-python", "category": "implementation", "start_line": 348, "end_line": 418, "span_ids": ["find_merge_rechunk"], "tokens": 684}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def find_merge_rechunk(old_chunks, new_chunks, block_size_limit):\n \"\"\"\n Find an intermediate rechunk that would merge some adjacent blocks\n together in order to get us nearer the *new_chunks* target, without\n violating the *block_size_limit* (in number of elements).\n \"\"\"\n ndim = len(old_chunks)\n\n old_largest_width = [max(c) for c in old_chunks]\n new_largest_width = [max(c) for c in new_chunks]\n\n graph_size_effect = {\n dim: len(nc) / len(oc)\n for dim, (oc, nc) in enumerate(zip(old_chunks, new_chunks))\n }\n\n block_size_effect = {\n dim: new_largest_width[dim] / (old_largest_width[dim] or 1)\n for dim in range(ndim)\n }\n\n # Our goal is to reduce the number of nodes in the rechunk graph\n # by merging some adjacent chunks, so consider dimensions where we can\n # reduce the # of chunks\n merge_candidates = [dim for dim in range(ndim) if graph_size_effect[dim] <= 1.0]\n\n # Merging along each dimension reduces the graph size by a certain factor\n # and increases memory largest block size by a certain factor.\n # We want to optimize the graph size while staying below the given\n # block_size_limit. This is in effect a knapsack problem, except with\n # multiplicative values and weights. Just use a greedy algorithm\n # by trying dimensions in decreasing value / weight order.\n def key(k):\n gse = graph_size_effect[k]\n bse = block_size_effect[k]\n if bse == 1:\n bse = 1 + 1e-9\n return (np.log(gse) / np.log(bse)) if bse > 0 else 0\n\n sorted_candidates = sorted(merge_candidates, key=key)\n\n largest_block_size = reduce(mul, old_largest_width)\n\n chunks = list(old_chunks)\n memory_limit_hit = False\n\n for dim in sorted_candidates:\n # Examine this dimension for possible graph reduction\n new_largest_block_size = (\n largest_block_size * new_largest_width[dim] // (old_largest_width[dim] or 1)\n )\n if new_largest_block_size <= block_size_limit:\n # Full replacement by new chunks is possible\n chunks[dim] = new_chunks[dim]\n largest_block_size = new_largest_block_size\n else:\n # Try a partial rechunk, dividing the new chunks into\n # smaller pieces\n largest_width = old_largest_width[dim]\n chunk_limit = int(block_size_limit * largest_width / largest_block_size)\n c = divide_to_width(new_chunks[dim], chunk_limit)\n if len(c) <= len(old_chunks[dim]):\n # We manage to reduce the number of blocks, so do it\n chunks[dim] = c\n largest_block_size = largest_block_size * max(c) // largest_width\n\n memory_limit_hit = True\n\n assert largest_block_size == _largest_block_size(chunks)\n assert largest_block_size <= block_size_limit\n return tuple(chunks), memory_limit_hit", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py_find_split_rechunk_find_split_rechunk.return.tuple_chunks_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py_find_split_rechunk_find_split_rechunk.return.tuple_chunks_", "embedding": null, "metadata": {"file_path": "dask/array/rechunk.py", "file_name": "rechunk.py", "file_type": "text/x-python", "category": "implementation", "start_line": 421, "end_line": 446, "span_ids": ["find_split_rechunk"], "tokens": 237}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def find_split_rechunk(old_chunks, new_chunks, graph_size_limit):\n \"\"\"\n Find an intermediate rechunk that would split some chunks to\n get us nearer *new_chunks*, without violating the *graph_size_limit*.\n \"\"\"\n ndim = len(old_chunks)\n\n chunks = list(old_chunks)\n\n for dim in range(ndim):\n graph_size = estimate_graph_size(chunks, new_chunks)\n if graph_size > graph_size_limit:\n break\n if len(old_chunks[dim]) > len(new_chunks[dim]):\n # It's not interesting to split\n continue\n # Merge the new chunks so as to stay within the graph size budget\n max_number = int(len(old_chunks[dim]) * graph_size_limit / graph_size)\n c = merge_to_number(new_chunks[dim], max_number)\n assert len(c) <= max_number\n # Consider the merge successful if its result has a greater length\n # and smaller max width than the old chunks\n if len(c) >= len(old_chunks[dim]) and max(c) <= max(old_chunks[dim]):\n chunks[dim] = c\n\n return tuple(chunks)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py_plan_rechunk_plan_rechunk.return.steps_new_chunks_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py_plan_rechunk_plan_rechunk.return.steps_new_chunks_", "embedding": null, "metadata": {"file_path": "dask/array/rechunk.py", "file_name": "rechunk.py", "file_type": "text/x-python", "category": "implementation", "start_line": 449, "end_line": 528, "span_ids": ["plan_rechunk"], "tokens": 631}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def plan_rechunk(\n old_chunks, new_chunks, itemsize, threshold=None, block_size_limit=None\n):\n \"\"\"Plan an iterative rechunking from *old_chunks* to *new_chunks*.\n The plan aims to minimize the rechunk graph size.\n\n Parameters\n ----------\n itemsize: int\n The item size of the array\n threshold: int\n The graph growth factor under which we don't bother\n introducing an intermediate step\n block_size_limit: int\n The maximum block size (in bytes) we want to produce during an\n intermediate step\n\n Notes\n -----\n No intermediate steps will be planned if any dimension of ``old_chunks``\n is unknown.\n \"\"\"\n threshold = threshold or config.get(\"array.rechunk-threshold\")\n block_size_limit = block_size_limit or config.get(\"array.chunk-size\")\n if isinstance(block_size_limit, str):\n block_size_limit = parse_bytes(block_size_limit)\n\n ndim = len(new_chunks)\n steps = []\n has_nans = [any(math.isnan(y) for y in x) for x in old_chunks]\n\n if ndim <= 1 or not all(new_chunks) or any(has_nans):\n # Trivial array / unknown dim => no need / ability for an intermediate\n return steps + [new_chunks]\n\n # Make it a number ef elements\n block_size_limit /= itemsize\n\n # Fix block_size_limit if too small for either old_chunks or new_chunks\n largest_old_block = _largest_block_size(old_chunks)\n largest_new_block = _largest_block_size(new_chunks)\n block_size_limit = max([block_size_limit, largest_old_block, largest_new_block])\n\n # The graph size above which to optimize\n graph_size_threshold = threshold * (\n _number_of_blocks(old_chunks) + _number_of_blocks(new_chunks)\n )\n\n current_chunks = old_chunks\n first_pass = True\n\n while True:\n graph_size = estimate_graph_size(current_chunks, new_chunks)\n if graph_size < graph_size_threshold:\n break\n\n if first_pass:\n chunks = current_chunks\n else:\n # We hit the block_size_limit in a previous merge pass =>\n # accept a significant increase in graph size in exchange for\n # 1) getting nearer the goal 2) reducing the largest block size\n # to make place for the following merge.\n # To see this pass in action, make the block_size_limit very small.\n chunks = find_split_rechunk(\n current_chunks, new_chunks, graph_size * threshold\n )\n chunks, memory_limit_hit = find_merge_rechunk(\n chunks, new_chunks, block_size_limit\n )\n if (chunks == current_chunks and not first_pass) or chunks == new_chunks:\n break\n if chunks != current_chunks:\n steps.append(chunks)\n current_chunks = chunks\n if not memory_limit_hit:\n break\n first_pass = False\n\n return steps + [new_chunks]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py__compute_rechunk__compute_rechunk.return.Array_graph_merge_name_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py__compute_rechunk__compute_rechunk.return.Array_graph_merge_name_", "embedding": null, "metadata": {"file_path": "dask/array/rechunk.py", "file_name": "rechunk.py", "file_type": "text/x-python", "category": "implementation", "start_line": 531, "end_line": 589, "span_ids": ["_compute_rechunk"], "tokens": 613}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _compute_rechunk(x, chunks):\n \"\"\"Compute the rechunk of *x* to the given *chunks*.\"\"\"\n if x.size == 0:\n # Special case for empty array, as the algorithm below does not behave correctly\n return empty(x.shape, chunks=chunks, dtype=x.dtype)\n\n ndim = x.ndim\n crossed = intersect_chunks(x.chunks, chunks)\n x2 = dict()\n intermediates = dict()\n token = tokenize(x, chunks)\n merge_name = \"rechunk-merge-\" + token\n split_name = \"rechunk-split-\" + token\n split_name_suffixes = count()\n\n # Pre-allocate old block references, to allow re-use and reduce the\n # graph's memory footprint a bit.\n old_blocks = np.empty([len(c) for c in x.chunks], dtype=\"O\")\n for index in np.ndindex(old_blocks.shape):\n old_blocks[index] = (x.name,) + index\n\n # Iterate over all new blocks\n new_index = product(*(range(len(c)) for c in chunks))\n\n for new_idx, cross1 in zip(new_index, crossed):\n key = (merge_name,) + new_idx\n old_block_indices = [[cr[i][0] for cr in cross1] for i in range(ndim)]\n subdims1 = [len(set(old_block_indices[i])) for i in range(ndim)]\n\n rec_cat_arg = np.empty(subdims1, dtype=\"O\")\n rec_cat_arg_flat = rec_cat_arg.flat\n\n # Iterate over the old blocks required to build the new block\n for rec_cat_index, ind_slices in enumerate(cross1):\n old_block_index, slices = zip(*ind_slices)\n name = (split_name, next(split_name_suffixes))\n old_index = old_blocks[old_block_index][1:]\n if all(\n slc.start == 0 and slc.stop == x.chunks[i][ind]\n for i, (slc, ind) in enumerate(zip(slices, old_index))\n ):\n rec_cat_arg_flat[rec_cat_index] = old_blocks[old_block_index]\n else:\n intermediates[name] = (getitem, old_blocks[old_block_index], slices)\n rec_cat_arg_flat[rec_cat_index] = name\n\n assert rec_cat_index == rec_cat_arg.size - 1\n\n # New block is formed by concatenation of sliced old blocks\n if all(d == 1 for d in rec_cat_arg.shape):\n x2[key] = rec_cat_arg.flat[0]\n else:\n x2[key] = (concatenate3, rec_cat_arg.tolist())\n\n del old_blocks, new_index\n\n layer = toolz.merge(x2, intermediates)\n graph = HighLevelGraph.from_collections(merge_name, layer, dependencies=[x])\n return Array(graph, merge_name, chunks, meta=x)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py__PrettyBlocks__PrettyBlocks.__repr__.__str__": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py__PrettyBlocks__PrettyBlocks.__repr__.__str__", "embedding": null, "metadata": {"file_path": "dask/array/rechunk.py", "file_name": "rechunk.py", "file_type": "text/x-python", "category": "implementation", "start_line": 592, "end_line": 628, "span_ids": ["_PrettyBlocks.__init__", "_PrettyBlocks.__str__", "_PrettyBlocks", "_PrettyBlocks:2"], "tokens": 249}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _PrettyBlocks(object):\n def __init__(self, blocks):\n self.blocks = blocks\n\n def __str__(self):\n runs = []\n run = []\n repeats = 0\n for c in self.blocks:\n if run and run[-1] == c:\n if repeats == 0 and len(run) > 1:\n runs.append((None, run[:-1]))\n run = run[-1:]\n repeats += 1\n else:\n if repeats > 0:\n assert len(run) == 1\n runs.append((repeats + 1, run[-1]))\n run = []\n repeats = 0\n run.append(c)\n if run:\n if repeats == 0:\n runs.append((None, run))\n else:\n assert len(run) == 1\n runs.append((repeats + 1, run[-1]))\n\n parts = []\n for repeats, run in runs:\n if repeats is None:\n parts.append(str(run))\n else:\n parts.append(\"%d*[%s]\" % (repeats, run))\n return \" | \".join(parts)\n\n __repr__ = __str__", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py_format_blocks_format_blocks.return._PrettyBlocks_blocks_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py_format_blocks_format_blocks.return._PrettyBlocks_blocks_", "embedding": null, "metadata": {"file_path": "dask/array/rechunk.py", "file_name": "rechunk.py", "file_type": "text/x-python", "category": "implementation", "start_line": 631, "end_line": 645, "span_ids": ["format_blocks"], "tokens": 144}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def format_blocks(blocks):\n \"\"\"\n Pretty-format *blocks*.\n\n >>> format_blocks((10, 10, 10))\n 3*[10]\n >>> format_blocks((2, 3, 4))\n [2, 3, 4]\n >>> format_blocks((10, 10, 5, 6, 2, 2, 2, 7))\n 2*[10] | [5, 6] | 3*[2] | [7]\n \"\"\"\n assert isinstance(blocks, tuple) and all(\n isinstance(x, int) or math.isnan(x) for x in blocks\n )\n return _PrettyBlocks(blocks)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_builtins_divide.return.f_a_b_dtype_dtype_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_builtins_divide.return.f_a_b_dtype_dtype_", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 46, "span_ids": ["imports", "divide"], "tokens": 341}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import builtins\nfrom collections.abc import Iterable\nimport operator\nfrom functools import partial\nfrom itertools import product, repeat\nfrom math import factorial, log, ceil, log2\n\nimport numpy as np\nfrom numbers import Integral, Number\n\nfrom tlz import compose, partition_all, get, accumulate, pluck, drop\n\nfrom . import chunk\nfrom .core import _concatenate2, Array, handle_out, implements\nfrom .blockwise import blockwise\nfrom ..blockwise import lol_tuples\nfrom .creation import arange, diagonal\nfrom .utils import full_like_safe, validate_axis, compute_meta, is_arraylike\nfrom .wrap import zeros, ones\nfrom .numpy_compat import ma_divide, divide as np_divide\nfrom ..base import tokenize\nfrom ..highlevelgraph import HighLevelGraph\nfrom ..utils import (\n ignoring,\n funcname,\n Dispatch,\n deepmap,\n getargspec,\n derived_from,\n is_series_like,\n)\nfrom .. import config\n\n# Generic functions to support chunks of different types\nempty_lookup = Dispatch(\"empty\")\nempty_lookup.register((object, np.ndarray), np.empty)\nempty_lookup.register(np.ma.masked_array, np.ma.empty)\ndivide_lookup = Dispatch(\"divide\")\ndivide_lookup.register((object, np.ndarray), np_divide)\ndivide_lookup.register(np.ma.masked_array, ma_divide)\n\n\ndef divide(a, b, dtype=None):\n key = lambda x: getattr(x, \"__array_priority__\", float(\"-inf\"))\n f = divide_lookup.dispatch(type(builtins.max(a, b, key=key)))\n return f(a, b, dtype=dtype)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_reduction_reduction._General_version_of_red": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_reduction_reduction._General_version_of_red", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 41, "end_line": 139, "span_ids": ["reduction"], "tokens": 999}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def reduction(\n x,\n chunk,\n aggregate,\n axis=None,\n keepdims=False,\n dtype=None,\n split_every=None,\n combine=None,\n name=None,\n out=None,\n concatenate=True,\n output_size=1,\n meta=None,\n):\n \"\"\"General version of reductions\n\n Parameters\n ----------\n x: Array\n Data being reduced along one or more axes\n chunk: callable(x_chunk, axis, keepdims)\n First function to be executed when resolving the dask graph.\n This function is applied in parallel to all original chunks of x.\n See below for function parameters.\n combine: callable(x_chunk, axis, keepdims), optional\n Function used for intermediate recursive aggregation (see\n split_every below). If omitted, it defaults to aggregate.\n If the reduction can be performed in less than 3 steps, it will not\n be invoked at all.\n aggregate: callable(x_chunk, axis, keepdims)\n Last function to be executed when resolving the dask graph,\n producing the final output. It is always invoked, even when the reduced\n Array counts a single chunk along the reduced axes.\n axis: int or sequence of ints, optional\n Axis or axes to aggregate upon. If omitted, aggregate along all axes.\n keepdims: boolean, optional\n Whether the reduction function should preserve the reduced axes,\n leaving them at size ``output_size``, or remove them.\n dtype: np.dtype\n data type of output. This argument was previously optional, but\n leaving as ``None`` will now raise an exception.\n split_every: int >= 2 or dict(axis: int), optional\n Determines the depth of the recursive aggregation. If set to or more\n than the number of input chunks, the aggregation will be performed in\n two steps, one ``chunk`` function per input chunk and a single\n ``aggregate`` function at the end. If set to less than that, an\n intermediate ``combine`` function will be used, so that any one\n ``combine`` or ``aggregate`` function has no more than ``split_every``\n inputs. The depth of the aggregation graph will be\n :math:`log_{split_every}(input chunks along reduced axes)`. Setting to\n a low value can reduce cache size and network transfers, at the cost of\n more CPU and a larger dask graph.\n\n Omit to let dask heuristically decide a good default. A default can\n also be set globally with the ``split_every`` key in\n :mod:`dask.config`.\n name: str, optional\n Prefix of the keys of the intermediate and output nodes. If omitted it\n defaults to the function names.\n out: Array, optional\n Another dask array whose contents will be replaced. Omit to create a\n new one. Note that, unlike in numpy, this setting gives no performance\n benefits whatsoever, but can still be useful if one needs to preserve\n the references to a previously existing Array.\n concatenate: bool, optional\n If True (the default), the outputs of the ``chunk``/``combine``\n functions are concatenated into a single np.array before being passed\n to the ``combine``/``aggregate`` functions. If False, the input of\n ``combine`` and ``aggregate`` will be either a list of the raw outputs\n of the previous step or a single output, and the function will have to\n concatenate it itself. It can be useful to set this to False if the\n chunk and/or combine steps do not produce np.arrays.\n output_size: int >= 1, optional\n Size of the output of the ``aggregate`` function along the reduced\n axes. Ignored if keepdims is False.\n\n Returns\n -------\n dask array\n\n **Function Parameters**\n\n x_chunk: numpy.ndarray\n Individual input chunk. For ``chunk`` functions, it is one of the\n original chunks of x. For ``combine`` and ``aggregate`` functions, it's\n the concatenation of the outputs produced by the previous ``chunk`` or\n ``combine`` functions. If concatenate=False, it's a list of the raw\n outputs from the previous functions.\n axis: tuple\n Normalized list of axes to reduce upon, e.g. ``(0, )``\n Scalar, negative, and None axes have been normalized away.\n Note that some numpy reduction functions cannot reduce along multiple\n axes at once and strictly require an int in input. Such functions have\n to be wrapped to cope.\n keepdims: bool\n Whether the reduction function should preserve the reduced axes or\n remove them.\n \"\"\"\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_reduction.if_axis_is_None__reduction.return.handle_out_out_result_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_reduction.if_axis_is_None__reduction.return.handle_out_out_result_", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 148, "end_line": 205, "span_ids": ["reduction"], "tokens": 476}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def reduction(\n x,\n chunk,\n aggregate,\n axis=None,\n keepdims=False,\n dtype=None,\n split_every=None,\n combine=None,\n name=None,\n out=None,\n concatenate=True,\n output_size=1,\n meta=None,\n):\n if axis is None:\n axis = tuple(range(x.ndim))\n if isinstance(axis, Integral):\n axis = (axis,)\n axis = validate_axis(axis, x.ndim)\n\n if dtype is None:\n raise ValueError(\"Must specify dtype\")\n if \"dtype\" in getargspec(chunk).args:\n chunk = partial(chunk, dtype=dtype)\n if \"dtype\" in getargspec(aggregate).args:\n aggregate = partial(aggregate, dtype=dtype)\n if is_series_like(x):\n x = x.values\n\n # Map chunk across all blocks\n inds = tuple(range(x.ndim))\n # The dtype of `tmp` doesn't actually matter, and may be incorrect.\n tmp = blockwise(\n chunk, inds, x, inds, axis=axis, keepdims=True, token=name, dtype=dtype or float\n )\n tmp._chunks = tuple(\n (output_size,) * len(c) if i in axis else c for i, c in enumerate(tmp.chunks)\n )\n\n if meta is None and hasattr(x, \"_meta\"):\n try:\n reduced_meta = compute_meta(\n chunk, x.dtype, x._meta, axis=axis, keepdims=True, computing_meta=True\n )\n except TypeError:\n reduced_meta = compute_meta(\n chunk, x.dtype, x._meta, axis=axis, keepdims=True\n )\n except ValueError:\n pass\n else:\n reduced_meta = None\n\n result = _tree_reduce(\n tmp,\n aggregate,\n axis,\n keepdims,\n dtype,\n split_every,\n combine,\n name=name,\n concatenate=concatenate,\n reduced_meta=reduced_meta,\n )\n if keepdims and output_size != 1:\n result._chunks = tuple(\n (output_size,) if i in axis else c for i, c in enumerate(tmp.chunks)\n )\n if meta is not None:\n result._meta = meta\n return handle_out(out, result)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py__tree_reduce__tree_reduce.return.partial_reduce_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py__tree_reduce__tree_reduce.return.partial_reduce_", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 198, "end_line": 253, "span_ids": ["_tree_reduce"], "tokens": 428}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _tree_reduce(\n x,\n aggregate,\n axis,\n keepdims,\n dtype,\n split_every=None,\n combine=None,\n name=None,\n concatenate=True,\n reduced_meta=None,\n):\n \"\"\"Perform the tree reduction step of a reduction.\n\n Lower level, users should use ``reduction`` or ``arg_reduction`` directly.\n \"\"\"\n # Normalize split_every\n split_every = split_every or config.get(\"split_every\", 4)\n if isinstance(split_every, dict):\n split_every = dict((k, split_every.get(k, 2)) for k in axis)\n elif isinstance(split_every, Integral):\n n = builtins.max(int(split_every ** (1 / (len(axis) or 1))), 2)\n split_every = dict.fromkeys(axis, n)\n else:\n raise ValueError(\"split_every must be a int or a dict\")\n\n # Reduce across intermediates\n depth = 1\n for i, n in enumerate(x.numblocks):\n if i in split_every and split_every[i] != 1:\n depth = int(builtins.max(depth, ceil(log(n, split_every[i]))))\n func = partial(combine or aggregate, axis=axis, keepdims=True)\n if concatenate:\n func = compose(func, partial(_concatenate2, axes=axis))\n for i in range(depth - 1):\n x = partial_reduce(\n func,\n x,\n split_every,\n True,\n dtype=dtype,\n name=(name or funcname(combine or aggregate)) + \"-partial\",\n reduced_meta=reduced_meta,\n )\n func = partial(aggregate, axis=axis, keepdims=keepdims)\n if concatenate:\n func = compose(func, partial(_concatenate2, axes=axis))\n return partial_reduce(\n func,\n x,\n split_every,\n keepdims=keepdims,\n dtype=dtype,\n name=(name or funcname(aggregate)) + \"-aggregate\",\n reduced_meta=reduced_meta,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_partial_reduce_partial_reduce.if_np_isscalar_meta_.else_.return.Array_graph_name_out_ch": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_partial_reduce_partial_reduce.if_np_isscalar_meta_.else_.return.Array_graph_name_out_ch", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 266, "end_line": 340, "span_ids": ["partial_reduce"], "tokens": 707}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def partial_reduce(\n func, x, split_every, keepdims=False, dtype=None, name=None, reduced_meta=None\n):\n \"\"\"Partial reduction across multiple axes.\n\n Parameters\n ----------\n func : function\n x : Array\n split_every : dict\n Maximum reduction block sizes in each dimension.\n\n Examples\n --------\n Reduce across axis 0 and 2, merging a maximum of 1 block in the 0th\n dimension, and 3 blocks in the 2nd dimension:\n\n >>> partial_reduce(np.min, x, {0: 1, 2: 3}) # doctest: +SKIP\n \"\"\"\n name = (\n (name or funcname(func)) + \"-\" + tokenize(func, x, split_every, keepdims, dtype)\n )\n parts = [\n list(partition_all(split_every.get(i, 1), range(n)))\n for (i, n) in enumerate(x.numblocks)\n ]\n keys = product(*map(range, map(len, parts)))\n out_chunks = [\n tuple(1 for p in partition_all(split_every[i], c)) if i in split_every else c\n for (i, c) in enumerate(x.chunks)\n ]\n if not keepdims:\n out_axis = [i for i in range(x.ndim) if i not in split_every]\n getter = lambda k: get(out_axis, k)\n keys = map(getter, keys)\n out_chunks = list(getter(out_chunks))\n dsk = {}\n for k, p in zip(keys, product(*parts)):\n decided = dict((i, j[0]) for (i, j) in enumerate(p) if len(j) == 1)\n dummy = dict(i for i in enumerate(p) if i[0] not in decided)\n g = lol_tuples((x.name,), range(x.ndim), decided, dummy)\n dsk[(name,) + k] = (func, g)\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[x])\n\n meta = x._meta\n if reduced_meta is not None:\n try:\n meta = func(reduced_meta, computing_meta=True)\n # no meta keyword argument exists for func, and it isn't required\n except TypeError:\n try:\n meta = func(reduced_meta)\n except ValueError as e:\n # min/max functions have no identity, don't apply function to meta\n if \"zero-size array to reduction operation\" in str(e):\n meta = reduced_meta\n # when no work can be computed on the empty array (e.g., func is a ufunc)\n except ValueError:\n pass\n\n # some functions can't compute empty arrays (those for which reduced_meta\n # fall into the ValueError exception) and we have to rely on reshaping\n # the array according to len(out_chunks)\n if is_arraylike(meta) and meta.ndim != len(out_chunks):\n if len(out_chunks) == 0:\n meta = meta.sum()\n else:\n meta = meta.reshape((0,) * len(out_chunks))\n\n if np.isscalar(meta):\n return Array(graph, name, out_chunks, dtype=dtype)\n else:\n with ignoring(AttributeError):\n meta = meta.astype(dtype)\n return Array(graph, name, out_chunks, meta=meta)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_sum_prod.return.reduction_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_sum_prod.return.reduction_", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 328, "end_line": 360, "span_ids": ["sum", "prod"], "tokens": 208}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef sum(a, axis=None, dtype=None, keepdims=False, split_every=None, out=None):\n if dtype is None:\n dtype = getattr(np.zeros(1, dtype=a.dtype).sum(), \"dtype\", object)\n result = reduction(\n a,\n chunk.sum,\n chunk.sum,\n axis=axis,\n keepdims=keepdims,\n dtype=dtype,\n split_every=split_every,\n out=out,\n )\n return result\n\n\n@derived_from(np)\ndef prod(a, axis=None, dtype=None, keepdims=False, split_every=None, out=None):\n if dtype is not None:\n dt = dtype\n else:\n dt = getattr(np.empty((1,), dtype=a.dtype).prod(), \"dtype\", object)\n return reduction(\n a,\n chunk.prod,\n chunk.prod,\n axis=axis,\n keepdims=keepdims,\n dtype=dt,\n split_every=split_every,\n out=out,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_min_all.return.reduction_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_min_all.return.reduction_", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 363, "end_line": 418, "span_ids": ["max", "all", "any", "min"], "tokens": 290}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@implements(np.min, np.amin)\n@derived_from(np)\ndef min(a, axis=None, keepdims=False, split_every=None, out=None):\n return reduction(\n a,\n chunk.min,\n chunk.min,\n axis=axis,\n keepdims=keepdims,\n dtype=a.dtype,\n split_every=split_every,\n out=out,\n )\n\n\n@implements(np.max, np.amax)\n@derived_from(np)\ndef max(a, axis=None, keepdims=False, split_every=None, out=None):\n return reduction(\n a,\n chunk.max,\n chunk.max,\n axis=axis,\n keepdims=keepdims,\n dtype=a.dtype,\n split_every=split_every,\n out=out,\n )\n\n\n@derived_from(np)\ndef any(a, axis=None, keepdims=False, split_every=None, out=None):\n return reduction(\n a,\n chunk.any,\n chunk.any,\n axis=axis,\n keepdims=keepdims,\n dtype=\"bool\",\n split_every=split_every,\n out=out,\n )\n\n\n@derived_from(np)\ndef all(a, axis=None, keepdims=False, split_every=None, out=None):\n return reduction(\n a,\n chunk.all,\n chunk.all,\n axis=axis,\n keepdims=keepdims,\n dtype=\"bool\",\n split_every=split_every,\n out=out,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_nansum_nansum.return.reduction_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_nansum_nansum.return.reduction_", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 421, "end_line": 436, "span_ids": ["nansum"], "tokens": 112}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef nansum(a, axis=None, dtype=None, keepdims=False, split_every=None, out=None):\n if dtype is not None:\n dt = dtype\n else:\n dt = getattr(chunk.nansum(np.empty((1,), dtype=a.dtype)), \"dtype\", object)\n return reduction(\n a,\n chunk.nansum,\n chunk.sum,\n axis=axis,\n keepdims=keepdims,\n dtype=dt,\n split_every=split_every,\n out=out,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_with_ignoring_AttributeEr_nanmax.return.reduction_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_with_ignoring_AttributeEr_nanmax.return.reduction_", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 454, "end_line": 549, "span_ids": ["impl:9", "nanmax", "nanmin"], "tokens": 676}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "with ignoring(AttributeError):\n\n @derived_from(np)\n def nanprod(a, axis=None, dtype=None, keepdims=False, split_every=None, out=None):\n if dtype is not None:\n dt = dtype\n else:\n dt = getattr(chunk.nansum(np.empty((1,), dtype=a.dtype)), \"dtype\", object)\n return reduction(\n a,\n chunk.nanprod,\n chunk.prod,\n axis=axis,\n keepdims=keepdims,\n dtype=dt,\n split_every=split_every,\n out=out,\n )\n\n @derived_from(np)\n def nancumsum(x, axis, dtype=None, out=None, *, method=\"sequential\"):\n \"\"\"Dask added an additional keyword-only argument ``method``.\n\n method : {'sequential', 'blelloch'}, optional\n Choose which method to use to perform the cumsum. Default is 'sequential'.\n\n * 'sequential' performs the cumsum of each prior block before the current block.\n * 'blelloch' is a work-efficient parallel cumsum. It exposes parallelism by\n first taking the sum of each block and combines the sums via a binary tree.\n This method may be faster or more memory efficient depending on workload,\n scheduler, and hardware. More benchmarking is necessary.\n \"\"\"\n return cumreduction(\n chunk.nancumsum,\n operator.add,\n 0,\n x,\n axis,\n dtype,\n out=out,\n method=method,\n preop=np.nansum,\n )\n\n @derived_from(np)\n def nancumprod(x, axis, dtype=None, out=None, *, method=\"sequential\"):\n \"\"\"Dask added an additional keyword-only argument ``method``.\n\n method : {'sequential', 'blelloch'}, optional\n Choose which method to use to perform the cumprod. Default is 'sequential'.\n\n * 'sequential' performs the cumprod of each prior block before the current block.\n * 'blelloch' is a work-efficient parallel cumprod. It exposes parallelism by first\n taking the product of each block and combines the products via a binary tree.\n This method may be faster or more memory efficient depending on workload,\n scheduler, and hardware. More benchmarking is necessary.\n \"\"\"\n return cumreduction(\n chunk.nancumprod,\n operator.mul,\n 1,\n x,\n axis,\n dtype,\n out=out,\n method=method,\n preop=np.nanprod,\n )\n\n\n@derived_from(np)\ndef nanmin(a, axis=None, keepdims=False, split_every=None, out=None):\n return reduction(\n a,\n chunk.nanmin,\n chunk.nanmin,\n axis=axis,\n keepdims=keepdims,\n dtype=a.dtype,\n split_every=split_every,\n out=out,\n )\n\n\n@derived_from(np)\ndef nanmax(a, axis=None, keepdims=False, split_every=None, out=None):\n return reduction(\n a,\n chunk.nanmax,\n chunk.nanmax,\n axis=axis,\n keepdims=keepdims,\n dtype=a.dtype,\n split_every=split_every,\n out=out,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_numel_numel.return.full_like_safe_x_prod_s": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_numel_numel.return.full_like_safe_x_prod_s", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 495, "end_line": 524, "span_ids": ["numel"], "tokens": 236}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def numel(x, **kwargs):\n \"\"\" A reduction to count the number of elements \"\"\"\n\n if hasattr(x, \"mask\"):\n return chunk.sum(np.ones_like(x), **kwargs)\n\n shape = x.shape\n keepdims = kwargs.get(\"keepdims\", False)\n axis = kwargs.get(\"axis\", None)\n dtype = kwargs.get(\"dtype\", np.float64)\n\n if axis is None:\n prod = np.prod(shape, dtype=dtype)\n return (\n full_like_safe(x, prod, shape=(1,) * len(shape), dtype=dtype)\n if keepdims is True\n else prod\n )\n\n if not isinstance(axis, tuple or list):\n axis = [axis]\n\n prod = np.prod([shape[dim] for dim in axis])\n if keepdims is True:\n new_shape = tuple(\n shape[dim] if dim not in axis else 1 for dim in range(len(shape))\n )\n else:\n new_shape = tuple(shape[dim] for dim in range(len(shape)) if dim not in axis)\n return full_like_safe(x, prod, shape=new_shape, dtype=dtype)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_nannumel_mean_chunk.return._n_n_total_total_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_nannumel_mean_chunk.return._n_n_total_total_", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 527, "end_line": 541, "span_ids": ["mean_chunk", "nannumel"], "tokens": 110}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def nannumel(x, **kwargs):\n \"\"\" A reduction to count the number of elements \"\"\"\n return chunk.sum(~(np.isnan(x)), **kwargs)\n\n\ndef mean_chunk(\n x, sum=chunk.sum, numel=numel, dtype=\"f8\", computing_meta=False, **kwargs\n):\n if computing_meta:\n return x\n n = numel(x, dtype=dtype, **kwargs)\n\n total = sum(x, dtype=dtype, **kwargs)\n\n return {\"n\": n, \"total\": total}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_mean_combine_mean_combine.return._n_n_total_total_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_mean_combine_mean_combine.return._n_n_total_total_", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 601, "end_line": 622, "span_ids": ["mean_combine"], "tokens": 153}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def mean_combine(\n pairs,\n sum=chunk.sum,\n numel=numel,\n dtype=\"f8\",\n axis=None,\n computing_meta=False,\n **kwargs,\n):\n if not isinstance(pairs, list):\n pairs = [pairs]\n\n ns = deepmap(lambda pair: pair[\"n\"], pairs) if not computing_meta else pairs\n n = _concatenate2(ns, axes=axis).sum(axis=axis, **kwargs)\n\n if computing_meta:\n return n\n\n totals = deepmap(lambda pair: pair[\"total\"], pairs)\n total = _concatenate2(totals, axes=axis).sum(axis=axis, **kwargs)\n\n return {\"n\": n, \"total\": total}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_mean_agg_mean_agg.return.divide_total_n_dtype_dt": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_mean_agg_mean_agg.return.divide_total_n_dtype_dt", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 568, "end_line": 579, "span_ids": ["mean_agg"], "tokens": 134}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def mean_agg(pairs, dtype=\"f8\", axis=None, computing_meta=False, **kwargs):\n ns = deepmap(lambda pair: pair[\"n\"], pairs) if not computing_meta else pairs\n n = _concatenate2(ns, axes=axis)\n n = np.sum(n, axis=axis, dtype=dtype, **kwargs)\n\n if computing_meta:\n return n\n\n totals = deepmap(lambda pair: pair[\"total\"], pairs)\n total = _concatenate2(totals, axes=axis).sum(axis=axis, dtype=dtype, **kwargs)\n\n return divide(total, n, dtype=dtype)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_mean_mean.return.reduction_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_mean_mean.return.reduction_", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 639, "end_line": 658, "span_ids": ["mean"], "tokens": 129}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef mean(a, axis=None, dtype=None, keepdims=False, split_every=None, out=None):\n if dtype is not None:\n dt = dtype\n elif a.dtype == object:\n dt = object\n else:\n dt = getattr(np.mean(np.zeros(shape=(1,), dtype=a.dtype)), \"dtype\", object)\n return reduction(\n a,\n mean_chunk,\n mean_agg,\n axis=axis,\n keepdims=keepdims,\n dtype=dt,\n split_every=split_every,\n combine=mean_combine,\n out=out,\n concatenate=False,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_nanmean_None_1.nanmean.derived_from_np_nanmean_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_nanmean_None_1.nanmean.derived_from_np_nanmean_", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 602, "end_line": 623, "span_ids": ["impl:10", "nanmean"], "tokens": 162}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef nanmean(a, axis=None, dtype=None, keepdims=False, split_every=None, out=None):\n if dtype is not None:\n dt = dtype\n else:\n dt = getattr(np.mean(np.empty(shape=(1,), dtype=a.dtype)), \"dtype\", object)\n return reduction(\n a,\n partial(mean_chunk, sum=chunk.nansum, numel=nannumel),\n mean_agg,\n axis=axis,\n keepdims=keepdims,\n dtype=dt,\n split_every=split_every,\n out=out,\n concatenate=False,\n combine=partial(mean_combine, sum=chunk.nansum, numel=nannumel),\n )\n\n\nwith ignoring(AttributeError):\n nanmean = derived_from(np)(nanmean)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_moment_chunk_moment_chunk.return._total_total_n_n_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_moment_chunk_moment_chunk.return._total_total_n_n_", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 626, "end_line": 639, "span_ids": ["moment_chunk"], "tokens": 154}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def moment_chunk(\n A, order=2, sum=chunk.sum, numel=numel, dtype=\"f8\", computing_meta=False, **kwargs\n):\n if computing_meta:\n return A\n n = numel(A, **kwargs)\n\n n = n.astype(np.int64)\n total = sum(A, dtype=dtype, **kwargs)\n with np.errstate(divide=\"ignore\", invalid=\"ignore\"):\n u = total / n\n xs = [sum((A - u) ** i, dtype=dtype, **kwargs) for i in range(2, order + 1)]\n M = np.stack(xs, axis=-1)\n return {\"total\": total, \"n\": n, \"M\": M}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py__moment_helper__moment_helper.return.M": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py__moment_helper__moment_helper.return.M", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 642, "end_line": 649, "span_ids": ["_moment_helper"], "tokens": 121}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _moment_helper(Ms, ns, inner_term, order, sum, axis, kwargs):\n M = Ms[..., order - 2].sum(axis=axis, **kwargs) + sum(\n ns * inner_term ** order, axis=axis, **kwargs\n )\n for k in range(1, order - 1):\n coeff = factorial(order) / (factorial(k) * factorial(order - k))\n M += coeff * sum(Ms[..., order - k - 2] * inner_term ** k, axis=axis, **kwargs)\n return M", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_moment_combine_moment_combine.return._total_total_n_n_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_moment_combine_moment_combine.return._total_total_n_n_", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 711, "end_line": 748, "span_ids": ["moment_combine"], "tokens": 299}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def moment_combine(\n pairs,\n order=2,\n ddof=0,\n dtype=\"f8\",\n sum=np.sum,\n axis=None,\n computing_meta=False,\n **kwargs,\n):\n if not isinstance(pairs, list):\n pairs = [pairs]\n\n kwargs[\"dtype\"] = dtype\n kwargs[\"keepdims\"] = True\n\n ns = deepmap(lambda pair: pair[\"n\"], pairs) if not computing_meta else pairs\n ns = _concatenate2(ns, axes=axis)\n n = ns.sum(axis=axis, **kwargs)\n\n if computing_meta:\n return n\n\n totals = _concatenate2(deepmap(lambda pair: pair[\"total\"], pairs), axes=axis)\n Ms = _concatenate2(deepmap(lambda pair: pair[\"M\"], pairs), axes=axis)\n\n total = totals.sum(axis=axis, **kwargs)\n\n with np.errstate(divide=\"ignore\", invalid=\"ignore\"):\n mu = divide(total, n, dtype=dtype)\n inner_term = divide(totals, ns, dtype=dtype) - mu\n\n xs = [\n _moment_helper(Ms, ns, inner_term, o, sum, axis, kwargs)\n for o in range(2, order + 1)\n ]\n M = np.stack(xs, axis=-1)\n return {\"total\": total, \"n\": n, \"M\": M}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_moment_agg_moment_agg.return.divide_M_denominator_dt": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_moment_agg_moment_agg.return.divide_M_denominator_dt", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 751, "end_line": 796, "span_ids": ["moment_agg"], "tokens": 383}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def moment_agg(\n pairs,\n order=2,\n ddof=0,\n dtype=\"f8\",\n sum=np.sum,\n axis=None,\n computing_meta=False,\n **kwargs,\n):\n if not isinstance(pairs, list):\n pairs = [pairs]\n\n kwargs[\"dtype\"] = dtype\n # To properly handle ndarrays, the original dimensions need to be kept for\n # part of the calculation.\n keepdim_kw = kwargs.copy()\n keepdim_kw[\"keepdims\"] = True\n\n ns = deepmap(lambda pair: pair[\"n\"], pairs) if not computing_meta else pairs\n ns = _concatenate2(ns, axes=axis)\n n = ns.sum(axis=axis, **keepdim_kw)\n\n if computing_meta:\n return n\n\n totals = _concatenate2(deepmap(lambda pair: pair[\"total\"], pairs), axes=axis)\n Ms = _concatenate2(deepmap(lambda pair: pair[\"M\"], pairs), axes=axis)\n\n mu = divide(totals.sum(axis=axis, **keepdim_kw), n, dtype=dtype)\n\n with np.errstate(divide=\"ignore\", invalid=\"ignore\"):\n inner_term = divide(totals, ns, dtype=dtype) - mu\n\n M = _moment_helper(Ms, ns, inner_term, order, sum, axis, kwargs)\n\n denominator = n.sum(axis=axis, **kwargs) - ddof\n\n # taking care of the edge case with empty or all-nans array with ddof > 0\n if isinstance(denominator, Number):\n if denominator < 0:\n denominator = np.nan\n elif denominator is not np.ma.masked:\n denominator[denominator < 0] = np.nan\n\n return divide(M, denominator, dtype=dtype)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_moment_moment.return.reduction_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_moment_moment.return.reduction_", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 799, "end_line": 832, "span_ids": ["moment"], "tokens": 286}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def moment(\n a, order, axis=None, dtype=None, keepdims=False, ddof=0, split_every=None, out=None\n):\n if not isinstance(order, Integral) or order < 0:\n raise ValueError(\"Order must be an integer >= 0\")\n\n if order < 2:\n reduced = a.sum(axis=axis) # get reduced shape and chunks\n if order == 0:\n # When order equals 0, the result is 1, by definition.\n return ones(\n reduced.shape, chunks=reduced.chunks, dtype=\"f8\", meta=reduced._meta\n )\n # By definition the first order about the mean is 0.\n return zeros(\n reduced.shape, chunks=reduced.chunks, dtype=\"f8\", meta=reduced._meta\n )\n\n if dtype is not None:\n dt = dtype\n else:\n dt = getattr(np.var(np.ones(shape=(1,), dtype=a.dtype)), \"dtype\", object)\n return reduction(\n a,\n partial(moment_chunk, order=order),\n partial(moment_agg, order=order, ddof=ddof),\n axis=axis,\n keepdims=keepdims,\n dtype=dt,\n split_every=split_every,\n out=out,\n concatenate=False,\n combine=partial(moment_combine, order=order),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_var_var.return.reduction_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_var_var.return.reduction_", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 768, "end_line": 786, "span_ids": ["var"], "tokens": 134}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef var(a, axis=None, dtype=None, keepdims=False, ddof=0, split_every=None, out=None):\n if dtype is not None:\n dt = dtype\n else:\n dt = getattr(np.var(np.ones(shape=(1,), dtype=a.dtype)), \"dtype\", object)\n return reduction(\n a,\n moment_chunk,\n partial(moment_agg, ddof=ddof),\n axis=axis,\n keepdims=keepdims,\n dtype=dt,\n split_every=split_every,\n combine=moment_combine,\n name=\"var\",\n out=out,\n concatenate=False,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_nanvar_nanvar.return.reduction_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_nanvar_nanvar.return.reduction_", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 789, "end_line": 808, "span_ids": ["nanvar"], "tokens": 160}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef nanvar(\n a, axis=None, dtype=None, keepdims=False, ddof=0, split_every=None, out=None\n):\n if dtype is not None:\n dt = dtype\n else:\n dt = getattr(np.var(np.ones(shape=(1,), dtype=a.dtype)), \"dtype\", object)\n return reduction(\n a,\n partial(moment_chunk, sum=chunk.nansum, numel=nannumel),\n partial(moment_agg, sum=np.nansum, ddof=ddof),\n axis=axis,\n keepdims=keepdims,\n dtype=dt,\n split_every=split_every,\n combine=partial(moment_combine, sum=np.nansum),\n out=out,\n concatenate=False,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_None_2_safe_sqrt.return._sqrt_a_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_None_2_safe_sqrt.return._sqrt_a_", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 811, "end_line": 833, "span_ids": ["_sqrt", "impl:13", "safe_sqrt"], "tokens": 179}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "with ignoring(AttributeError):\n nanvar = derived_from(np)(nanvar)\n\n\ndef _sqrt(a):\n o = np.sqrt(a)\n if isinstance(o, np.ma.masked_array) and not o.shape and o.mask.all():\n return np.ma.masked\n return o\n\n\ndef safe_sqrt(a):\n \"\"\"A version of sqrt that properly handles scalar masked arrays.\n\n To mimic ``np.ma`` reductions, we need to convert scalar masked arrays that\n have an active mask to the ``np.ma.masked`` singleton. This is properly\n handled automatically for reduction code, but not for ufuncs. We implement\n a simple version here, since calling `np.ma.sqrt` everywhere is\n significantly more expensive.\n \"\"\"\n if hasattr(a, \"_elemwise\"):\n return a._elemwise(_sqrt, a)\n return _sqrt(a)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_std_nanstd.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_std_nanstd.return.result", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 836, "end_line": 871, "span_ids": ["std", "nanstd"], "tokens": 208}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef std(a, axis=None, dtype=None, keepdims=False, ddof=0, split_every=None, out=None):\n result = safe_sqrt(\n var(\n a,\n axis=axis,\n dtype=dtype,\n keepdims=keepdims,\n ddof=ddof,\n split_every=split_every,\n out=out,\n )\n )\n if dtype and dtype != result.dtype:\n result = result.astype(dtype)\n return result\n\n\n@derived_from(np)\ndef nanstd(\n a, axis=None, dtype=None, keepdims=False, ddof=0, split_every=None, out=None\n):\n result = safe_sqrt(\n nanvar(\n a,\n axis=axis,\n dtype=dtype,\n keepdims=keepdims,\n ddof=ddof,\n split_every=split_every,\n out=out,\n )\n )\n if dtype and dtype != result.dtype:\n result = result.astype(dtype)\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_None_3__arg_combine.return.arg_vals": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_None_3__arg_combine.return.arg_vals", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 874, "end_line": 897, "span_ids": ["impl:16", "_arg_combine"], "tokens": 211}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "with ignoring(AttributeError):\n nanstd = derived_from(np)(nanstd)\n\n\ndef _arg_combine(data, axis, argfunc, keepdims=False):\n \"\"\" Merge intermediate results from ``arg_*`` functions\"\"\"\n axis = None if len(axis) == data.ndim or data.ndim == 1 else axis[0]\n vals = data[\"vals\"]\n arg = data[\"arg\"]\n if axis is None:\n local_args = argfunc(vals, axis=axis, keepdims=keepdims)\n vals = vals.ravel()[local_args]\n arg = arg.ravel()[local_args]\n else:\n local_args = argfunc(vals, axis=axis)\n inds = np.ogrid[tuple(map(slice, local_args.shape))]\n inds.insert(axis, local_args)\n inds = tuple(inds)\n vals = vals[inds]\n arg = arg[inds]\n if keepdims:\n vals = np.expand_dims(vals, axis)\n arg = np.expand_dims(arg, axis)\n return arg, vals", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_arg_chunk_arg_chunk.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_arg_chunk_arg_chunk.return.result", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 900, "end_line": 924, "span_ids": ["arg_chunk"], "tokens": 251}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def arg_chunk(func, argfunc, x, axis, offset_info):\n arg_axis = None if len(axis) == x.ndim or x.ndim == 1 else axis[0]\n vals = func(x, axis=arg_axis, keepdims=True)\n arg = argfunc(x, axis=arg_axis, keepdims=True)\n if arg_axis is None:\n offset, total_shape = offset_info\n ind = np.unravel_index(arg.ravel()[0], x.shape)\n total_ind = tuple(o + i for (o, i) in zip(offset, ind))\n arg[:] = np.ravel_multi_index(total_ind, total_shape)\n else:\n arg += offset_info\n\n if isinstance(vals, np.ma.masked_array):\n if \"min\" in argfunc.__name__:\n fill_value = np.ma.minimum_fill_value(vals)\n else:\n fill_value = np.ma.maximum_fill_value(vals)\n vals = np.ma.filled(vals, fill_value)\n\n result = np.empty(\n shape=vals.shape, dtype=[(\"vals\", vals.dtype), (\"arg\", arg.dtype)]\n )\n result[\"vals\"] = vals\n result[\"arg\"] = arg\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_arg_combine_nanarg_agg.return.arg": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_arg_combine_nanarg_agg.return.arg", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 927, "end_line": 945, "span_ids": ["arg_agg", "nanarg_agg", "arg_combine"], "tokens": 173}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def arg_combine(func, argfunc, data, axis=None, **kwargs):\n arg, vals = _arg_combine(data, axis, argfunc, keepdims=True)\n result = np.empty(\n shape=vals.shape, dtype=[(\"vals\", vals.dtype), (\"arg\", arg.dtype)]\n )\n result[\"vals\"] = vals\n result[\"arg\"] = arg\n return result\n\n\ndef arg_agg(func, argfunc, data, axis=None, **kwargs):\n return _arg_combine(data, axis, argfunc, keepdims=False)[0]\n\n\ndef nanarg_agg(func, argfunc, data, axis=None, **kwargs):\n arg, vals = _arg_combine(data, axis, argfunc, keepdims=False)\n if np.any(np.isnan(vals)):\n raise ValueError(\"All NaN slice encountered\")\n return arg", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_arg_reduction_arg_reduction.return.handle_out_out_result_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_arg_reduction_arg_reduction.return.handle_out_out_result_", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 948, "end_line": 1004, "span_ids": ["arg_reduction"], "tokens": 543}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def arg_reduction(x, chunk, combine, agg, axis=None, split_every=None, out=None):\n \"\"\"Generic function for argreduction.\n\n Parameters\n ----------\n x : Array\n chunk : callable\n Partialed ``arg_chunk``.\n combine : callable\n Partialed ``arg_combine``.\n agg : callable\n Partialed ``arg_agg``.\n axis : int, optional\n split_every : int or dict, optional\n \"\"\"\n if axis is None:\n axis = tuple(range(x.ndim))\n ravel = True\n elif isinstance(axis, Integral):\n axis = validate_axis(axis, x.ndim)\n axis = (axis,)\n ravel = x.ndim == 1\n else:\n raise TypeError(\"axis must be either `None` or int, got '{0}'\".format(axis))\n\n for ax in axis:\n chunks = x.chunks[ax]\n if len(chunks) > 1 and np.isnan(chunks).any():\n raise ValueError(\n \"Arg-reductions do not work with arrays that have \"\n \"unknown chunksizes. At some point in your computation \"\n \"this array lost chunking information.\\n\\n\"\n \"A possible solution is with \\n\"\n \" x.compute_chunk_sizes()\"\n )\n\n # Map chunk across all blocks\n name = \"arg-reduce-{0}\".format(tokenize(axis, x, chunk, combine, split_every))\n old = x.name\n keys = list(product(*map(range, x.numblocks)))\n offsets = list(product(*(accumulate(operator.add, bd[:-1], 0) for bd in x.chunks)))\n if ravel:\n offset_info = zip(offsets, repeat(x.shape))\n else:\n offset_info = pluck(axis[0], offsets)\n\n chunks = tuple((1,) * len(c) if i in axis else c for (i, c) in enumerate(x.chunks))\n dsk = dict(\n ((name,) + k, (chunk, (old,) + k, axis, off))\n for (k, off) in zip(keys, offset_info)\n )\n # The dtype of `tmp` doesn't actually matter, just need to provide something\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[x])\n tmp = Array(graph, name, chunks, dtype=x.dtype)\n dtype = np.argmin([1]).dtype\n result = _tree_reduce(tmp, agg, axis, False, dtype, split_every, combine)\n return handle_out(out, result)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_make_arg_reduction_make_arg_reduction.return.derived_from_np_wrapped_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_make_arg_reduction_make_arg_reduction.return.derived_from_np_wrapped_", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1007, "end_line": 1031, "span_ids": ["make_arg_reduction"], "tokens": 185}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def make_arg_reduction(func, argfunc, is_nan_func=False):\n \"\"\"Create an argreduction callable\n\n Parameters\n ----------\n func : callable\n The reduction (e.g. ``min``)\n argfunc : callable\n The argreduction (e.g. ``argmin``)\n \"\"\"\n chunk = partial(arg_chunk, func, argfunc)\n combine = partial(arg_combine, func, argfunc)\n if is_nan_func:\n agg = partial(nanarg_agg, func, argfunc)\n else:\n agg = partial(arg_agg, func, argfunc)\n\n def wrapped(x, axis=None, split_every=None, out=None):\n return arg_reduction(\n x, chunk, combine, agg, axis, split_every=split_every, out=out\n )\n\n wrapped.__name__ = func.__name__\n\n return derived_from(np)(wrapped)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py__nanargmin_nanargmax.make_arg_reduction_chunk_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py__nanargmin_nanargmax.make_arg_reduction_chunk_", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1034, "end_line": 1051, "span_ids": ["_nanargmax", "impl:19", "_nanargmin"], "tokens": 170}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _nanargmin(x, axis, **kwargs):\n try:\n return chunk.nanargmin(x, axis, **kwargs)\n except ValueError:\n return chunk.nanargmin(np.where(np.isnan(x), np.inf, x), axis, **kwargs)\n\n\ndef _nanargmax(x, axis, **kwargs):\n try:\n return chunk.nanargmax(x, axis, **kwargs)\n except ValueError:\n return chunk.nanargmax(np.where(np.isnan(x), -np.inf, x), axis, **kwargs)\n\n\nargmin = make_arg_reduction(chunk.min, chunk.argmin)\nargmax = make_arg_reduction(chunk.max, chunk.argmax)\nnanargmin = make_arg_reduction(chunk.nanmin, _nanargmin, True)\nnanargmax = make_arg_reduction(chunk.nanmax, _nanargmax, True)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_topk_topk.return.reduction_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_topk_topk.return.reduction_", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1147, "end_line": 1203, "span_ids": ["topk"], "tokens": 459}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def topk(a, k, axis=-1, split_every=None):\n \"\"\"Extract the k largest elements from a on the given axis,\n and return them sorted from largest to smallest.\n If k is negative, extract the -k smallest elements instead,\n and return them sorted from smallest to largest.\n\n This performs best when ``k`` is much smaller than the chunk size. All\n results will be returned in a single chunk along the given axis.\n\n Parameters\n ----------\n x: Array\n Data being sorted\n k: int\n axis: int, optional\n split_every: int >=2, optional\n See :func:`reduce`. This parameter becomes very important when k is\n on the same order of magnitude of the chunk size or more, as it\n prevents getting the whole or a significant portion of the input array\n in memory all at once, with a negative impact on network transfer\n too when running on distributed.\n\n Returns\n -------\n Selection of x with size abs(k) along the given axis.\n\n Examples\n --------\n >>> import dask.array as da\n >>> x = np.array([5, 1, 3, 6])\n >>> d = da.from_array(x, chunks=2)\n >>> d.topk(2).compute()\n array([6, 5])\n >>> d.topk(-2).compute()\n array([1, 3])\n \"\"\"\n axis = validate_axis(axis, a.ndim)\n\n # chunk and combine steps of the reduction, which recursively invoke\n # np.partition to pick the top/bottom k elements from the previous step.\n # The selection is not sorted internally.\n chunk_combine = partial(chunk.topk, k=k)\n # aggregate step of the reduction. Internally invokes the chunk/combine\n # function, then sorts the results internally.\n aggregate = partial(chunk.topk_aggregate, k=k)\n\n return reduction(\n a,\n chunk=chunk_combine,\n combine=chunk_combine,\n aggregate=aggregate,\n axis=axis,\n keepdims=True,\n dtype=a.dtype,\n split_every=split_every,\n output_size=abs(k),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_argtopk_argtopk.return.reduction_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_argtopk_argtopk.return.reduction_", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1206, "end_line": 1275, "span_ids": ["argtopk"], "tokens": 610}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def argtopk(a, k, axis=-1, split_every=None):\n \"\"\"Extract the indices of the k largest elements from a on the given axis,\n and return them sorted from largest to smallest. If k is negative, extract\n the indices of the -k smallest elements instead, and return them sorted\n from smallest to largest.\n\n This performs best when ``k`` is much smaller than the chunk size. All\n results will be returned in a single chunk along the given axis.\n\n Parameters\n ----------\n x: Array\n Data being sorted\n k: int\n axis: int, optional\n split_every: int >=2, optional\n See :func:`topk`. The performance considerations for topk also apply\n here.\n\n Returns\n -------\n Selection of np.intp indices of x with size abs(k) along the given axis.\n\n Examples\n --------\n >>> import dask.array as da\n >>> x = np.array([5, 1, 3, 6])\n >>> d = da.from_array(x, chunks=2)\n >>> d.argtopk(2).compute()\n array([3, 0])\n >>> d.argtopk(-2).compute()\n array([1, 2])\n \"\"\"\n axis = validate_axis(axis, a.ndim)\n\n # Generate nodes where every chunk is a tuple of (a, original index of a)\n idx = arange(a.shape[axis], chunks=(a.chunks[axis],), dtype=np.intp)\n idx = idx[tuple(slice(None) if i == axis else np.newaxis for i in range(a.ndim))]\n a_plus_idx = a.map_blocks(chunk.argtopk_preprocess, idx, dtype=object)\n\n # chunk and combine steps of the reduction. They acquire in input a tuple\n # of (a, original indices of a) and return another tuple containing the top\n # k elements of a and the matching original indices. The selection is not\n # sorted internally, as in np.argpartition.\n chunk_combine = partial(chunk.argtopk, k=k)\n # aggregate step of the reduction. Internally invokes the chunk/combine\n # function, then sorts the results internally, drops a and returns the\n # index only.\n aggregate = partial(chunk.argtopk_aggregate, k=k)\n\n if isinstance(axis, Number):\n naxis = 1\n else:\n naxis = len(axis)\n\n meta = a._meta.astype(np.intp).reshape((0,) * (a.ndim - naxis + 1))\n\n return reduction(\n a_plus_idx,\n chunk=chunk_combine,\n combine=chunk_combine,\n aggregate=aggregate,\n axis=axis,\n keepdims=True,\n dtype=np.intp,\n split_every=split_every,\n concatenate=False,\n output_size=abs(k),\n meta=meta,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_trace_median.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_trace_median.return.result", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1278, "end_line": 1313, "span_ids": ["median", "trace"], "tokens": 285}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef trace(a, offset=0, axis1=0, axis2=1, dtype=None):\n return diagonal(a, offset=offset, axis1=axis1, axis2=axis2).sum(-1, dtype=dtype)\n\n\n@derived_from(np)\ndef median(a, axis=None, keepdims=False, out=None):\n \"\"\"\n This works by automatically chunking the reduced axes to a single chunk\n and then calling ``numpy.median`` function across the remaining dimensions\n \"\"\"\n if axis is None:\n raise NotImplementedError(\n \"The da.median function only works along an axis. \"\n \"The full algorithm is difficult to do in parallel\"\n )\n\n if not isinstance(axis, Iterable):\n axis = (axis,)\n\n axis = [ax + a.ndim if ax < 0 else ax for ax in axis]\n\n a = a.rechunk({ax: -1 if ax in axis else \"auto\" for ax in range(a.ndim)})\n\n result = a.map_blocks(\n np.median,\n axis=axis,\n keepdims=keepdims,\n drop_axis=axis if not keepdims else None,\n chunks=[1 if ax in axis else c for ax, c in enumerate(a.chunks)]\n if keepdims\n else None,\n )\n\n result = handle_out(out, result)\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_nanmedian_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_nanmedian_", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1316, "end_line": 1347, "span_ids": ["nanmedian"], "tokens": 239}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef nanmedian(a, axis=None, keepdims=False, out=None):\n \"\"\"\n This works by automatically chunking the reduced axes to a single chunk\n and then calling ``numpy.nanmedian`` function across the remaining dimensions\n \"\"\"\n if axis is None:\n raise NotImplementedError(\n \"The da.nanmedian function only works along an axis or a subset of axes. \"\n \"The full algorithm is difficult to do in parallel\"\n )\n\n if not isinstance(axis, Iterable):\n axis = (axis,)\n\n axis = [ax + a.ndim if ax < 0 else ax for ax in axis]\n\n a = a.rechunk({ax: -1 if ax in axis else \"auto\" for ax in range(a.ndim)})\n\n result = a.map_blocks(\n np.nanmedian,\n axis=axis,\n keepdims=keepdims,\n drop_axis=axis if not keepdims else None,\n chunks=[1 if ax in axis else c for ax, c in enumerate(a.chunks)]\n if keepdims\n else None,\n )\n\n result = handle_out(out, result)\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reshape.py_from_functools_import_red_reshape_rechunk.return.tuple_result_inchunks_t": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reshape.py_from_functools_import_red_reshape_rechunk.return.tuple_result_inchunks_t", "embedding": null, "metadata": {"file_path": "dask/array/reshape.py", "file_name": "reshape.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 89, "span_ids": ["imports", "reshape_rechunk"], "tokens": 905}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from functools import reduce\nfrom itertools import product\nfrom operator import mul\n\nimport numpy as np\n\nfrom .core import Array\nfrom .utils import meta_from_array\nfrom ..base import tokenize\nfrom ..core import flatten\nfrom ..highlevelgraph import HighLevelGraph\nfrom ..utils import M\n\n\ndef reshape_rechunk(inshape, outshape, inchunks):\n assert all(isinstance(c, tuple) for c in inchunks)\n ii = len(inshape) - 1\n oi = len(outshape) - 1\n result_inchunks = [None for i in range(len(inshape))]\n result_outchunks = [None for i in range(len(outshape))]\n\n while ii >= 0 or oi >= 0:\n if inshape[ii] == outshape[oi]:\n result_inchunks[ii] = inchunks[ii]\n result_outchunks[oi] = inchunks[ii]\n ii -= 1\n oi -= 1\n continue\n din = inshape[ii]\n dout = outshape[oi]\n if din == 1:\n result_inchunks[ii] = (1,)\n ii -= 1\n elif dout == 1:\n result_outchunks[oi] = (1,)\n oi -= 1\n elif din < dout: # (4, 4, 4) -> (64,)\n ileft = ii - 1\n while (\n ileft >= 0 and reduce(mul, inshape[ileft : ii + 1]) < dout\n ): # 4 < 64, 4*4 < 64, 4*4*4 == 64\n ileft -= 1\n if reduce(mul, inshape[ileft : ii + 1]) != dout:\n raise ValueError(\"Shapes not compatible\")\n\n # Special case to avoid intermediate rechunking:\n # When all the lower axis are completely chunked (chunksize=1) then\n # we're simply moving around blocks.\n if all(len(inchunks[i]) == inshape[i] for i in range(ii)):\n for i in range(ii + 1):\n result_inchunks[i] = inchunks[i]\n result_outchunks[oi] = inchunks[i] * np.prod(\n list(map(len, inchunks[:i]))\n )\n else:\n for i in range(ileft + 1, ii + 1): # need single-shape dimensions\n result_inchunks[i] = (inshape[i],) # chunks[i] = (4,)\n\n chunk_reduction = reduce(mul, map(len, inchunks[ileft + 1 : ii + 1]))\n result_inchunks[ileft] = expand_tuple(inchunks[ileft], chunk_reduction)\n\n prod = reduce(mul, inshape[ileft + 1 : ii + 1]) # 16\n result_outchunks[oi] = tuple(\n prod * c for c in result_inchunks[ileft]\n ) # (1, 1, 1, 1) .* 16\n\n oi -= 1\n ii = ileft - 1\n elif din > dout: # (64,) -> (4, 4, 4)\n oleft = oi - 1\n while oleft >= 0 and reduce(mul, outshape[oleft : oi + 1]) < din:\n oleft -= 1\n if reduce(mul, outshape[oleft : oi + 1]) != din:\n raise ValueError(\"Shapes not compatible\")\n\n # TODO: don't coalesce shapes unnecessarily\n cs = reduce(mul, outshape[oleft + 1 : oi + 1])\n\n result_inchunks[ii] = contract_tuple(inchunks[ii], cs) # (16, 16, 16, 16)\n\n for i in range(oleft + 1, oi + 1):\n result_outchunks[i] = (outshape[i],)\n\n result_outchunks[oleft] = tuple(c // cs for c in result_inchunks[ii])\n\n oi = oleft - 1\n ii -= 1\n\n return tuple(result_inchunks), tuple(result_outchunks)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reshape.py_expand_tuple_expand_tuple.return.tuple_out_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reshape.py_expand_tuple_expand_tuple.return.tuple_out_", "embedding": null, "metadata": {"file_path": "dask/array/reshape.py", "file_name": "reshape.py", "file_type": "text/x-python", "category": "implementation", "start_line": 82, "end_line": 110, "span_ids": ["expand_tuple"], "tokens": 207}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def expand_tuple(chunks, factor):\n \"\"\"\n\n >>> expand_tuple((2, 4), 2)\n (1, 1, 2, 2)\n\n >>> expand_tuple((2, 4), 3)\n (1, 1, 1, 1, 2)\n\n >>> expand_tuple((3, 4), 2)\n (1, 2, 2, 2)\n\n >>> expand_tuple((7, 4), 3)\n (2, 2, 3, 1, 1, 2)\n \"\"\"\n if factor == 1:\n return chunks\n\n out = []\n for c in chunks:\n x = c\n part = max(x / factor, 1)\n while x >= 2 * part:\n out.append(int(part))\n x -= int(part)\n if x:\n out.append(x)\n assert sum(chunks) == sum(out)\n return tuple(out)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reshape.py_contract_tuple_contract_tuple.return.tuple_out_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reshape.py_contract_tuple_contract_tuple.return.tuple_out_", "embedding": null, "metadata": {"file_path": "dask/array/reshape.py", "file_name": "reshape.py", "file_type": "text/x-python", "category": "implementation", "start_line": 113, "end_line": 133, "span_ids": ["contract_tuple"], "tokens": 127}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def contract_tuple(chunks, factor):\n \"\"\"Return simple chunks tuple such that factor divides all elements\n\n Examples\n --------\n\n >>> contract_tuple((2, 2, 8, 4), 4)\n (4, 8, 4)\n \"\"\"\n assert sum(chunks) % factor == 0\n\n out = []\n residual = 0\n for chunk in chunks:\n chunk += residual\n div = chunk // factor\n residual = chunk % factor\n good = factor * div\n if good:\n out.append(good)\n return tuple(out)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reshape.py_reshape_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reshape.py_reshape_", "embedding": null, "metadata": {"file_path": "dask/array/reshape.py", "file_name": "reshape.py", "file_type": "text/x-python", "category": "implementation", "start_line": 136, "end_line": 206, "span_ids": ["reshape"], "tokens": 709}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def reshape(x, shape):\n \"\"\"Reshape array to new shape\n\n This is a parallelized version of the ``np.reshape`` function with the\n following limitations:\n\n 1. It assumes that the array is stored in `row-major order`_\n 2. It only allows for reshapings that collapse or merge dimensions like\n ``(1, 2, 3, 4) -> (1, 6, 4)`` or ``(64,) -> (4, 4, 4)``\n\n .. _`row-major order`: https://en.wikipedia.org/wiki/Row-_and_column-major_order\n\n When communication is necessary this algorithm depends on the logic within\n rechunk. It endeavors to keep chunk sizes roughly the same when possible.\n\n See Also\n --------\n dask.array.rechunk\n numpy.reshape\n \"\"\"\n # Sanitize inputs, look for -1 in shape\n from .slicing import sanitize_index\n\n shape = tuple(map(sanitize_index, shape))\n known_sizes = [s for s in shape if s != -1]\n if len(known_sizes) < len(shape):\n if len(shape) - len(known_sizes) > 1:\n raise ValueError(\"can only specify one unknown dimension\")\n # Fastpath for x.reshape(-1) on 1D arrays, allows unknown shape in x\n # for this case only.\n if len(shape) == 1 and x.ndim == 1:\n return x\n missing_size = sanitize_index(x.size / reduce(mul, known_sizes, 1))\n shape = tuple(missing_size if s == -1 else s for s in shape)\n\n if np.isnan(sum(x.shape)):\n raise ValueError(\n \"Array chunk size or shape is unknown. shape: %s\\n\\n\"\n \"Possible solution with x.compute_chunk_sizes()\" % x.shape\n )\n\n if reduce(mul, shape, 1) != x.size:\n raise ValueError(\"total size of new array must be unchanged\")\n\n if x.shape == shape:\n return x\n\n meta = meta_from_array(x, len(shape))\n\n name = \"reshape-\" + tokenize(x, shape)\n\n if x.npartitions == 1:\n key = next(flatten(x.__dask_keys__()))\n dsk = {(name,) + (0,) * len(shape): (M.reshape, key, shape)}\n chunks = tuple((d,) for d in shape)\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[x])\n return Array(graph, name, chunks, meta=meta)\n\n # Logic for how to rechunk\n inchunks, outchunks = reshape_rechunk(x.shape, shape, x.chunks)\n x2 = x.rechunk(inchunks)\n\n # Construct graph\n in_keys = list(product([x2.name], *[range(len(c)) for c in inchunks]))\n out_keys = list(product([name], *[range(len(c)) for c in outchunks]))\n shapes = list(product(*outchunks))\n dsk = {a: (M.reshape, b, shape) for a, b, shape in zip(out_keys, in_keys, shapes)}\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[x2])\n return Array(graph, name, outchunks, meta=meta)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_inspect_result_type.return.np_result_type_args_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_inspect_result_type.return.np_result_type_args_", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 58, "span_ids": ["result_type", "imports", "array"], "tokens": 363}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import inspect\nimport math\nimport warnings\nfrom collections.abc import Iterable\nfrom functools import wraps, partial\nfrom numbers import Real, Integral\nfrom distutils.version import LooseVersion\nfrom typing import List\n\nimport numpy as np\nfrom tlz import concat, sliding_window, interleave\n\nfrom ..compatibility import apply\nfrom ..core import flatten\nfrom ..base import tokenize, is_dask_collection\nfrom ..delayed import unpack_collections, Delayed\nfrom ..highlevelgraph import HighLevelGraph\nfrom ..utils import funcname, derived_from, is_arraylike\nfrom . import chunk\nfrom .creation import arange, diag, empty, indices\nfrom .utils import safe_wraps, validate_axis, meta_from_array, zeros_like_safe\nfrom .wrap import ones\nfrom .ufunc import multiply, sqrt\n\nfrom .core import (\n Array,\n map_blocks,\n elemwise,\n asarray,\n asanyarray,\n concatenate,\n stack,\n blockwise,\n broadcast_shapes,\n is_scalar_for_elemwise,\n broadcast_to,\n tensordot_lookup,\n implements,\n)\n\nfrom .einsumfuncs import einsum # noqa\nfrom .numpy_compat import _unravel_index_keyword\n\n\n@derived_from(np)\ndef array(x, dtype=None, ndmin=None):\n x = asarray(x)\n while ndmin is not None and x.ndim < ndmin:\n x = x[None, :]\n if dtype is not None and x.dtype != dtype:\n x = x.astype(dtype)\n return x\n\n\n@derived_from(np)\ndef result_type(*args):\n args = [a if is_scalar_for_elemwise(a) else a.dtype for a in args]\n return np.result_type(*args)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_atleast_3d_atleast_3d.if_len_new_arys_1_.else_.return.new_arys": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_atleast_3d_atleast_3d.if_len_new_arys_1_.else_.return.new_arys", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 61, "end_line": 78, "span_ids": ["atleast_3d"], "tokens": 122}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef atleast_3d(*arys):\n new_arys = []\n for x in arys:\n x = asanyarray(x)\n if x.ndim == 0:\n x = x[None, None, None]\n elif x.ndim == 1:\n x = x[None, :, None]\n elif x.ndim == 2:\n x = x[:, :, None]\n\n new_arys.append(x)\n\n if len(new_arys) == 1:\n return new_arys[0]\n else:\n return new_arys", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_atleast_2d_dstack.return.concatenate_tup_axis_2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_atleast_2d_dstack.return.concatenate_tup_axis_2_", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 81, "end_line": 136, "span_ids": ["atleast_2d", "vstack", "dstack", "hstack", "atleast_1d"], "tokens": 374}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef atleast_2d(*arys):\n new_arys = []\n for x in arys:\n x = asanyarray(x)\n if x.ndim == 0:\n x = x[None, None]\n elif x.ndim == 1:\n x = x[None, :]\n\n new_arys.append(x)\n\n if len(new_arys) == 1:\n return new_arys[0]\n else:\n return new_arys\n\n\n@derived_from(np)\ndef atleast_1d(*arys):\n new_arys = []\n for x in arys:\n x = asanyarray(x)\n if x.ndim == 0:\n x = x[None]\n\n new_arys.append(x)\n\n if len(new_arys) == 1:\n return new_arys[0]\n else:\n return new_arys\n\n\n@derived_from(np)\ndef vstack(tup, allow_unknown_chunksizes=False):\n tup = tuple(atleast_2d(x) for x in tup)\n return concatenate(tup, axis=0, allow_unknown_chunksizes=allow_unknown_chunksizes)\n\n\n@derived_from(np)\ndef hstack(tup, allow_unknown_chunksizes=False):\n if all(x.ndim == 1 for x in tup):\n return concatenate(\n tup, axis=0, allow_unknown_chunksizes=allow_unknown_chunksizes\n )\n else:\n return concatenate(\n tup, axis=1, allow_unknown_chunksizes=allow_unknown_chunksizes\n )\n\n\n@derived_from(np)\ndef dstack(tup, allow_unknown_chunksizes=False):\n tup = tuple(atleast_3d(x) for x in tup)\n return concatenate(tup, axis=2, allow_unknown_chunksizes=allow_unknown_chunksizes)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_swapaxes_transpose.return.blockwise_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_swapaxes_transpose.return.blockwise_", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 139, "end_line": 164, "span_ids": ["transpose", "swapaxes"], "tokens": 222}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef swapaxes(a, axis1, axis2):\n if axis1 == axis2:\n return a\n if axis1 < 0:\n axis1 = axis1 + a.ndim\n if axis2 < 0:\n axis2 = axis2 + a.ndim\n ind = list(range(a.ndim))\n out = list(ind)\n out[axis1], out[axis2] = axis2, axis1\n\n return blockwise(np.swapaxes, out, a, ind, axis1=axis1, axis2=axis2, dtype=a.dtype)\n\n\n@derived_from(np)\ndef transpose(a, axes=None):\n if axes:\n if len(axes) != a.ndim:\n raise ValueError(\"axes don't match array\")\n else:\n axes = tuple(range(a.ndim))[::-1]\n axes = tuple(d + a.ndim if d < 0 else d for d in axes)\n return blockwise(\n np.transpose, axes, a, tuple(range(a.ndim)), dtype=a.dtype, axes=axes\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_flip_ALPHABET.alphabet_upper_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_flip_ALPHABET.alphabet_upper_", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 167, "end_line": 206, "span_ids": ["flip", "fliplr", "flipud", "impl"], "tokens": 180}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def flip(m, axis):\n \"\"\"\n Reverse element order along axis.\n\n Parameters\n ----------\n axis : int\n Axis to reverse element order of.\n\n Returns\n -------\n reversed array : ndarray\n \"\"\"\n\n m = asanyarray(m)\n\n sl = m.ndim * [slice(None)]\n try:\n sl[axis] = slice(None, None, -1)\n except IndexError as e:\n raise ValueError(\n \"`axis` of %s invalid for %s-D array\" % (str(axis), str(m.ndim))\n ) from e\n sl = tuple(sl)\n\n return m[sl]\n\n\n@derived_from(np)\ndef flipud(m):\n return flip(m, 0)\n\n\n@derived_from(np)\ndef fliplr(m):\n return flip(m, 1)\n\n\nalphabet = \"abcdefghijklmnopqrstuvwxyz\"\nALPHABET = alphabet.upper()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py__tensordot__tensordot.return.x": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py__tensordot__tensordot.return.x", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 209, "end_line": 235, "span_ids": ["_tensordot"], "tokens": 263}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _tensordot(a, b, axes):\n x = max([a, b], key=lambda x: x.__array_priority__)\n tensordot = tensordot_lookup.dispatch(type(x))\n\n # workaround may be removed when numpy version (currently 1.13.0) is bumped\n a_dims = np.array([a.shape[i] for i in axes[0]])\n b_dims = np.array([b.shape[i] for i in axes[1]])\n if (\n len(a_dims) > 0\n and (a_dims == b_dims).all()\n and a_dims.min() == 0\n and LooseVersion(np.__version__) < LooseVersion(\"1.14\")\n ):\n x = np.zeros(\n tuple(\n [s for i, s in enumerate(a.shape) if i not in axes[0]]\n + [s for i, s in enumerate(b.shape) if i not in axes[1]]\n )\n )\n else:\n x = tensordot(a, b, axes=axes)\n\n ind = [slice(None, None)] * x.ndim\n for a in sorted(axes[0]):\n ind.insert(a, None)\n x = x[tuple(ind)]\n return x", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_tensordot_vdot.return.dot_a_conj_ravel_b_r": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_tensordot_vdot.return.dot_a_conj_ravel_b_r", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 238, "end_line": 287, "span_ids": ["vdot", "tensordot", "dot"], "tokens": 326}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef tensordot(lhs, rhs, axes=2):\n if isinstance(axes, Iterable):\n left_axes, right_axes = axes\n else:\n left_axes = tuple(range(lhs.ndim - axes, lhs.ndim))\n right_axes = tuple(range(0, axes))\n\n if isinstance(left_axes, Integral):\n left_axes = (left_axes,)\n if isinstance(right_axes, Integral):\n right_axes = (right_axes,)\n if isinstance(left_axes, list):\n left_axes = tuple(left_axes)\n if isinstance(right_axes, list):\n right_axes = tuple(right_axes)\n\n dt = np.promote_types(lhs.dtype, rhs.dtype)\n\n left_index = list(range(lhs.ndim))\n right_index = list(range(lhs.ndim, lhs.ndim + rhs.ndim))\n out_index = left_index + right_index\n\n for l, r in zip(left_axes, right_axes):\n out_index.remove(right_index[r])\n right_index[r] = left_index[l]\n\n intermediate = blockwise(\n _tensordot,\n out_index,\n lhs,\n left_index,\n rhs,\n right_index,\n dtype=dt,\n axes=(left_axes, right_axes),\n )\n\n result = intermediate.sum(axis=left_axes)\n return result\n\n\n@derived_from(np)\ndef dot(a, b):\n return tensordot(a, b, axes=((a.ndim - 1,), (b.ndim - 2,)))\n\n\n@derived_from(np)\ndef vdot(a, b):\n return dot(a.conj().ravel(), b.ravel())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_matmul_matmul.return.out": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_matmul_matmul.return.out", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 290, "end_line": 329, "span_ids": ["matmul"], "tokens": 294}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef matmul(a, b):\n a = asanyarray(a)\n b = asanyarray(b)\n\n if a.ndim == 0 or b.ndim == 0:\n raise ValueError(\"`matmul` does not support scalars.\")\n\n a_is_1d = False\n if a.ndim == 1:\n a_is_1d = True\n a = a[np.newaxis, :]\n\n b_is_1d = False\n if b.ndim == 1:\n b_is_1d = True\n b = b[:, np.newaxis]\n\n if a.ndim < b.ndim:\n a = a[(b.ndim - a.ndim) * (np.newaxis,)]\n elif a.ndim > b.ndim:\n b = b[(a.ndim - b.ndim) * (np.newaxis,)]\n\n out = blockwise(\n np.matmul,\n tuple(range(1, a.ndim + 1)),\n a,\n tuple(range(1, a.ndim - 1)) + (a.ndim - 1, 0),\n b,\n tuple(range(1, a.ndim - 1)) + (0, a.ndim),\n dtype=result_type(a, b),\n concatenate=True,\n )\n\n if a_is_1d:\n out = out[..., 0, :]\n if b_is_1d:\n out = out[..., 0]\n\n return out", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_outer__inner_apply_along_axis.return.np_apply_along_axis_func1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_outer__inner_apply_along_axis.return.np_apply_along_axis_func1", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 332, "end_line": 343, "span_ids": ["outer", "_inner_apply_along_axis"], "tokens": 120}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef outer(a, b):\n a = a.flatten()\n b = b.flatten()\n\n dtype = np.outer(a.dtype.type(), b.dtype.type()).dtype\n\n return blockwise(np.outer, \"ij\", a, \"i\", b, \"j\", dtype=dtype)\n\n\ndef _inner_apply_along_axis(arr, func1d, func1d_axis, func1d_args, func1d_kwargs):\n return np.apply_along_axis(func1d, func1d_axis, arr, *func1d_args, **func1d_kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_apply_along_axis_apply_along_axis.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_apply_along_axis_apply_along_axis.return.result", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 346, "end_line": 410, "span_ids": ["apply_along_axis"], "tokens": 587}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef apply_along_axis(func1d, axis, arr, *args, dtype=None, shape=None, **kwargs):\n \"\"\"\n Apply a function to 1-D slices along the given axis. This is\n a blocked variant of :func:`numpy.apply_along_axis` implemented via\n :func:`dask.array.map_blocks`\n\n Parameters\n ----------\n func1d : callable\n Function to apply to 1-D slices of the array along the given axis\n axis : int\n Axis along which func1d will be applied\n arr : dask array\n Dask array to which ``func1d`` will be applied\n args : any\n Additional arguments to ``func1d``.\n dtype : str or dtype, optional\n The dtype of the output of ``func1d``.\n shape : tuple, optional\n The shape of the output of ``func1d``.\n kwargs : any\n Additional keyword arguments for ``func1d``.\n\n Notes\n -----\n If either of `dtype` or `shape` are not provided, Dask attempts to\n determine them by calling `func1d` on a dummy array. This may produce\n incorrect values for `dtype` or `shape`, so we recommend providing them.\n \"\"\"\n arr = asarray(arr)\n\n # Verify that axis is valid and throw an error otherwise\n axis = len(arr.shape[:axis])\n\n # If necessary, infer dtype and shape of the output of func1d by calling it on test data.\n if shape is None or dtype is None:\n test_data = np.ones((1,), dtype=arr.dtype)\n test_result = np.array(func1d(test_data, *args, **kwargs))\n if shape is None:\n shape = test_result.shape\n if dtype is None:\n dtype = test_result.dtype\n\n # Rechunk so that func1d is applied over the full axis.\n arr = arr.rechunk(\n arr.chunks[:axis] + (arr.shape[axis : axis + 1],) + arr.chunks[axis + 1 :]\n )\n\n # Map func1d over the data to get the result\n # Adds other axes as needed.\n result = arr.map_blocks(\n _inner_apply_along_axis,\n name=funcname(func1d) + \"-along-axis\",\n dtype=dtype,\n chunks=(arr.chunks[:axis] + shape + arr.chunks[axis + 1 :]),\n drop_axis=axis,\n new_axis=list(range(axis, axis + len(shape), 1)),\n func1d=func1d,\n func1d_axis=axis,\n func1d_args=args,\n func1d_kwargs=kwargs,\n )\n\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_apply_over_axes_apply_over_axes.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_apply_over_axes_apply_over_axes.return.result", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 413, "end_line": 438, "span_ids": ["apply_over_axes"], "tokens": 168}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef apply_over_axes(func, a, axes):\n # Validate arguments\n a = asarray(a)\n try:\n axes = tuple(axes)\n except TypeError:\n axes = (axes,)\n\n sl = a.ndim * (slice(None),)\n\n # Compute using `apply_along_axis`.\n result = a\n for i in axes:\n result = apply_along_axis(func, i, result, 0)\n\n # Restore original dimensionality or error.\n if result.ndim == (a.ndim - 1):\n result = result[sl[:i] + (None,)]\n elif result.ndim != a.ndim:\n raise ValueError(\n \"func must either preserve dimensionality of the input\"\n \" or reduce it by one.\"\n )\n\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_ptp_diff.return.r": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_ptp_diff.return.r", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 441, "end_line": 465, "span_ids": ["ptp", "diff"], "tokens": 165}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef ptp(a, axis=None):\n return a.max(axis=axis) - a.min(axis=axis)\n\n\n@derived_from(np)\ndef diff(a, n=1, axis=-1):\n a = asarray(a)\n n = int(n)\n axis = int(axis)\n\n sl_1 = a.ndim * [slice(None)]\n sl_2 = a.ndim * [slice(None)]\n\n sl_1[axis] = slice(1, None)\n sl_2[axis] = slice(None, -1)\n\n sl_1 = tuple(sl_1)\n sl_2 = tuple(sl_2)\n\n r = a\n for i in range(n):\n r = r[sl_1] - r[sl_2]\n\n return r", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_ediff1d__gradient_kernel.return.grad": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_ediff1d__gradient_kernel.return.grad", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 468, "end_line": 502, "span_ids": ["_gradient_kernel", "ediff1d"], "tokens": 263}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef ediff1d(ary, to_end=None, to_begin=None):\n ary = asarray(ary)\n\n aryf = ary.flatten()\n r = aryf[1:] - aryf[:-1]\n\n r = [r]\n if to_begin is not None:\n r = [asarray(to_begin).flatten()] + r\n if to_end is not None:\n r = r + [asarray(to_end).flatten()]\n r = concatenate(r)\n\n return r\n\n\ndef _gradient_kernel(x, block_id, coord, axis, array_locs, grad_kwargs):\n \"\"\"\n x: nd-array\n array of one block\n coord: 1d-array or scalar\n coordinate along which the gradient is computed.\n axis: int\n axis along which the gradient is computed\n array_locs:\n actual location along axis. None if coordinate is scalar\n grad_kwargs:\n keyword to be passed to np.gradient\n \"\"\"\n block_loc = block_id[axis]\n if array_locs is not None:\n coord = coord[array_locs[0][block_loc] : array_locs[1][block_loc]]\n grad = np.gradient(x, coord, axis=axis, **grad_kwargs)\n return grad", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_gradient_gradient.return.results": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_gradient_gradient.return.results", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 505, "end_line": 581, "span_ids": ["gradient"], "tokens": 581}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef gradient(f, *varargs, **kwargs):\n f = asarray(f)\n\n kwargs[\"edge_order\"] = math.ceil(kwargs.get(\"edge_order\", 1))\n if kwargs[\"edge_order\"] > 2:\n raise ValueError(\"edge_order must be less than or equal to 2.\")\n\n drop_result_list = False\n axis = kwargs.pop(\"axis\", None)\n if axis is None:\n axis = tuple(range(f.ndim))\n elif isinstance(axis, Integral):\n drop_result_list = True\n axis = (axis,)\n\n axis = validate_axis(axis, f.ndim)\n\n if len(axis) != len(set(axis)):\n raise ValueError(\"duplicate axes not allowed\")\n\n axis = tuple(ax % f.ndim for ax in axis)\n\n if varargs == ():\n varargs = (1,)\n if len(varargs) == 1:\n varargs = len(axis) * varargs\n if len(varargs) != len(axis):\n raise TypeError(\n \"Spacing must either be a single scalar, or a scalar / 1d-array per axis\"\n )\n\n if issubclass(f.dtype.type, (np.bool8, Integral)):\n f = f.astype(float)\n elif issubclass(f.dtype.type, Real) and f.dtype.itemsize < 4:\n f = f.astype(float)\n\n results = []\n for i, ax in enumerate(axis):\n for c in f.chunks[ax]:\n if np.min(c) < kwargs[\"edge_order\"] + 1:\n raise ValueError(\n \"Chunk size must be larger than edge_order + 1. \"\n \"Minimum chunk for axis {} is {}. Rechunk to \"\n \"proceed.\".format(ax, np.min(c))\n )\n\n if np.isscalar(varargs[i]):\n array_locs = None\n else:\n if isinstance(varargs[i], Array):\n raise NotImplementedError(\"dask array coordinated is not supported.\")\n # coordinate position for each block taking overlap into account\n chunk = np.array(f.chunks[ax])\n array_loc_stop = np.cumsum(chunk) + 1\n array_loc_start = array_loc_stop - chunk - 2\n array_loc_stop[-1] -= 1\n array_loc_start[0] = 0\n array_locs = (array_loc_start, array_loc_stop)\n\n results.append(\n f.map_overlap(\n _gradient_kernel,\n dtype=f.dtype,\n depth={j: 1 if j == ax else 0 for j in range(f.ndim)},\n boundary=\"none\",\n coord=varargs[i],\n axis=ax,\n array_locs=array_locs,\n grad_kwargs=kwargs,\n )\n )\n\n if drop_result_list:\n results = results[0]\n\n return results", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py__bincount_sum_bincount.return.Array_graph_final_name_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py__bincount_sum_bincount.return.Array_graph_final_name_", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 584, "end_line": 629, "span_ids": ["_bincount_sum", "bincount"], "tokens": 409}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _bincount_sum(bincounts, dtype=int):\n n = max(map(len, bincounts))\n out = zeros_like_safe(bincounts[0], shape=n, dtype=dtype)\n for b in bincounts:\n out[: len(b)] += b\n return out\n\n\n@derived_from(np)\ndef bincount(x, weights=None, minlength=0):\n if x.ndim != 1:\n raise ValueError(\"Input array must be one dimensional. Try using x.ravel()\")\n if weights is not None:\n if weights.chunks != x.chunks:\n raise ValueError(\"Chunks of input array x and weights must match.\")\n\n token = tokenize(x, weights, minlength)\n name = \"bincount-\" + token\n final_name = \"bincount-sum\" + token\n # Call np.bincount on each block, possibly with weights\n if weights is not None:\n dsk = {\n (name, i): (np.bincount, (x.name, i), (weights.name, i), minlength)\n for i, _ in enumerate(x.__dask_keys__())\n }\n dtype = np.bincount([1], weights=[1]).dtype\n else:\n dsk = {\n (name, i): (np.bincount, (x.name, i), None, minlength)\n for i, _ in enumerate(x.__dask_keys__())\n }\n dtype = np.bincount([]).dtype\n\n dsk[(final_name, 0)] = (_bincount_sum, list(dsk), dtype)\n graph = HighLevelGraph.from_collections(\n final_name, dsk, dependencies=[x] if weights is None else [x, weights]\n )\n\n if minlength == 0:\n chunks = ((np.nan,),)\n else:\n chunks = ((minlength,),)\n\n meta = meta_from_array(x, 1, dtype=dtype)\n\n return Array(graph, final_name, chunks, meta=meta)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_digitize__block_hist.return.np_histogram_x_bins_ran": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_digitize__block_hist.return.np_histogram_x_bins_ran", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 632, "end_line": 656, "span_ids": ["_block_hist", "digitize", "_linspace_from_delayed"], "tokens": 268}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef digitize(a, bins, right=False):\n bins = np.asarray(bins)\n dtype = np.digitize([0], bins, right=False).dtype\n return a.map_blocks(np.digitize, dtype=dtype, bins=bins, right=right)\n\n\n# TODO: dask linspace doesn't support delayed values\ndef _linspace_from_delayed(start, stop, num=50):\n linspace_name = \"linspace-\" + tokenize(start, stop, num)\n (start_ref, stop_ref, num_ref), deps = unpack_collections([start, stop, num])\n if len(deps) == 0:\n return np.linspace(start, stop, num=num)\n\n linspace_dsk = {(linspace_name, 0): (np.linspace, start_ref, stop_ref, num_ref)}\n linspace_graph = HighLevelGraph.from_collections(\n linspace_name, linspace_dsk, dependencies=deps\n )\n\n chunks = ((np.nan,),) if is_dask_collection(num) else ((num,),)\n return Array(linspace_graph, linspace_name, chunks, dtype=float)\n\n\ndef _block_hist(x, bins, range=None, weights=None):\n return np.histogram(x, bins, range=range, weights=weights)[0][np.newaxis]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_histogram_histogram._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_histogram_histogram._", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 659, "end_line": 734, "span_ids": ["histogram"], "tokens": 884}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def histogram(a, bins=None, range=None, normed=False, weights=None, density=None):\n \"\"\"\n Blocked variant of :func:`numpy.histogram`.\n\n Parameters\n ----------\n a : array_like\n Input data. The histogram is computed over the flattened array.\n bins : int or sequence of scalars, optional\n Either an iterable specifying the ``bins`` or the number of ``bins``\n and a ``range`` argument is required as computing ``min`` and ``max``\n over blocked arrays is an expensive operation that must be performed\n explicitly.\n If `bins` is an int, it defines the number of equal-width\n bins in the given range (10, by default). If `bins` is a\n sequence, it defines a monotonically increasing array of bin edges,\n including the rightmost edge, allowing for non-uniform bin widths.\n range : (float, float), optional\n The lower and upper range of the bins. If not provided, range\n is simply ``(a.min(), a.max())``. Values outside the range are\n ignored. The first element of the range must be less than or\n equal to the second. `range` affects the automatic bin\n computation as well. While bin width is computed to be optimal\n based on the actual data within `range`, the bin count will fill\n the entire range including portions containing no data.\n normed : bool, optional\n This is equivalent to the ``density`` argument, but produces incorrect\n results for unequal bin widths. It should not be used.\n weights : array_like, optional\n A dask.array.Array of weights, of the same block structure as ``a``. Each value in\n ``a`` only contributes its associated weight towards the bin count\n (instead of 1). If ``density`` is True, the weights are\n normalized, so that the integral of the density over the range\n remains 1.\n density : bool, optional\n If ``False``, the result will contain the number of samples in\n each bin. If ``True``, the result is the value of the\n probability *density* function at the bin, normalized such that\n the *integral* over the range is 1. Note that the sum of the\n histogram values will not be equal to 1 unless bins of unity\n width are chosen; it is not a probability *mass* function.\n Overrides the ``normed`` keyword if given.\n If ``density`` is True, ``bins`` cannot be a single-number delayed\n value. It must be a concrete number, or a (possibly-delayed)\n array/sequence of the bin edges.\n Returns\n -------\n hist : dask Array\n The values of the histogram. See `density` and `weights` for a\n description of the possible semantics.\n bin_edges : dask Array of dtype float\n Return the bin edges ``(length(hist)+1)``.\n\n\n Examples\n --------\n Using number of bins and range:\n\n >>> import dask.array as da\n >>> import numpy as np\n >>> x = da.from_array(np.arange(10000), chunks=10)\n >>> h, bins = da.histogram(x, bins=10, range=[0, 10000])\n >>> bins\n array([ 0., 1000., 2000., 3000., 4000., 5000., 6000., 7000.,\n 8000., 9000., 10000.])\n >>> h.compute()\n array([1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000])\n\n Explicitly specifying the bins:\n\n >>> h, bins = da.histogram(x, bins=np.array([0, 5000, 10000]))\n >>> bins\n array([ 0, 5000, 10000])\n >>> h.compute()\n array([5000, 5000])\n \"\"\"\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_histogram.if_isinstance_bins_Array_histogram._Map_the_histogram_to_al": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_histogram.if_isinstance_bins_Array_histogram._Map_the_histogram_to_al", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 735, "end_line": 805, "span_ids": ["histogram"], "tokens": 692}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def histogram(a, bins=None, range=None, normed=False, weights=None, density=None):\n if isinstance(bins, Array):\n scalar_bins = bins.ndim == 0\n # ^ `np.ndim` is not implemented by Dask array.\n elif isinstance(bins, Delayed):\n scalar_bins = bins._length is None or bins._length == 1\n else:\n scalar_bins = np.ndim(bins) == 0\n\n if bins is None or (scalar_bins and range is None):\n raise ValueError(\n \"dask.array.histogram requires either specifying \"\n \"bins as an iterable or specifying both a range and \"\n \"the number of bins\"\n )\n\n if weights is not None and weights.chunks != a.chunks:\n raise ValueError(\"Input array and weights must have the same chunked structure\")\n\n if normed is not False:\n raise ValueError(\n \"The normed= keyword argument has been deprecated. \"\n \"Please use density instead. \"\n \"See the numpy.histogram docstring for more information.\"\n )\n\n if density and scalar_bins and isinstance(bins, (Array, Delayed)):\n raise NotImplementedError(\n \"When `density` is True, `bins` cannot be a scalar Dask object. \"\n \"It must be a concrete number or a (possibly-delayed) array/sequence of bin edges.\"\n )\n\n for argname, val in [(\"bins\", bins), (\"range\", range), (\"weights\", weights)]:\n if not isinstance(bins, (Array, Delayed)) and is_dask_collection(bins):\n raise TypeError(\n \"Dask types besides Array and Delayed are not supported \"\n \"for `histogram`. For argument `{}`, got: {!r}\".format(argname, val)\n )\n\n if range is not None:\n try:\n if len(range) != 2:\n raise ValueError(\n f\"range must be a sequence or array of length 2, but got {len(range)} items\"\n )\n if isinstance(range, (Array, np.ndarray)) and range.shape != (2,):\n raise ValueError(\n f\"range must be a 1-dimensional array of two items, but got an array of shape {range.shape}\"\n )\n except TypeError:\n raise TypeError(\n f\"Expected a sequence or array for range, not {range}\"\n ) from None\n\n token = tokenize(a, bins, range, weights, density)\n name = \"histogram-sum-\" + token\n\n if scalar_bins:\n bins = _linspace_from_delayed(range[0], range[1], bins + 1)\n # ^ NOTE `range[1]` is safe because of the above check, and the initial check\n # that range must not be None if `scalar_bins`\n else:\n if not isinstance(bins, (Array, np.ndarray)):\n bins = asarray(bins)\n if bins.ndim != 1:\n raise ValueError(\n f\"bins must be a 1-dimensional array or sequence, got shape {bins.shape}\"\n )\n\n (bins_ref, range_ref), deps = unpack_collections([bins, range])\n\n # Map the histogram to all bins, forming a 2D array of histograms, stacked for each chunk\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_histogram.if_weights_is_None__histogram.if_density_is_not_None_.else_.return.n_bins": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_histogram.if_weights_is_None__histogram.if_density_is_not_None_.else_.return.n_bins", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 806, "end_line": 843, "span_ids": ["histogram"], "tokens": 380}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def histogram(a, bins=None, range=None, normed=False, weights=None, density=None):\n # ... other code\n if weights is None:\n dsk = {\n (name, i, 0): (_block_hist, k, bins_ref, range_ref)\n for i, k in enumerate(flatten(a.__dask_keys__()))\n }\n dtype = np.histogram([])[0].dtype\n else:\n a_keys = flatten(a.__dask_keys__())\n w_keys = flatten(weights.__dask_keys__())\n dsk = {\n (name, i, 0): (_block_hist, k, bins_ref, range_ref, w)\n for i, (k, w) in enumerate(zip(a_keys, w_keys))\n }\n dtype = weights.dtype\n\n deps = (a,) + deps\n if weights is not None:\n deps += (weights,)\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=deps)\n\n # Turn graph into a 2D Array of shape (nchunks, nbins)\n nchunks = len(list(flatten(a.__dask_keys__())))\n nbins = bins.size - 1 # since `bins` is 1D\n chunks = ((1,) * nchunks, (nbins,))\n mapped = Array(graph, name, chunks, dtype=dtype)\n\n # Sum over chunks to get the final histogram\n n = mapped.sum(axis=0)\n\n # We need to replicate normed and density options from numpy\n if density is not None:\n if density:\n db = asarray(np.diff(bins).astype(float), chunks=n.chunks)\n return n / db / n.sum(), bins\n else:\n return n, bins\n else:\n return n, bins", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_cov_cov.if_not_rowvar_.else_.return._dot_X_X_T_conj_fac": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_cov_cov.if_not_rowvar_.else_.return._dot_X_X_T_conj_fac", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 846, "end_line": 891, "span_ids": ["cov"], "tokens": 356}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef cov(m, y=None, rowvar=1, bias=0, ddof=None):\n # This was copied almost verbatim from np.cov\n if ddof is not None and ddof != int(ddof):\n raise ValueError(\"ddof must be integer\")\n\n # Handles complex arrays too\n m = asarray(m)\n if y is None:\n dtype = np.result_type(m, np.float64)\n else:\n y = asarray(y)\n dtype = np.result_type(m, y, np.float64)\n X = array(m, ndmin=2, dtype=dtype)\n\n if X.shape[0] == 1:\n rowvar = 1\n if rowvar:\n N = X.shape[1]\n axis = 0\n else:\n N = X.shape[0]\n axis = 1\n\n # check ddof\n if ddof is None:\n if bias == 0:\n ddof = 1\n else:\n ddof = 0\n fact = float(N - ddof)\n if fact <= 0:\n warnings.warn(\"Degrees of freedom <= 0 for slice\", RuntimeWarning)\n fact = 0.0\n\n if y is not None:\n y = array(y, ndmin=2, dtype=dtype)\n X = concatenate((X, y), axis)\n\n X = X - X.mean(axis=1 - axis, keepdims=True)\n if not rowvar:\n return (dot(X.T, X.conj()) / fact).squeeze()\n else:\n return (dot(X, X.T.conj()) / fact).squeeze()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_corrcoef_iscomplexobj.return.issubclass_x_dtype_type_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_corrcoef_iscomplexobj.return.issubclass_x_dtype_type_", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 894, "end_line": 914, "span_ids": ["iscomplexobj", "round", "corrcoef"], "tokens": 152}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef corrcoef(x, y=None, rowvar=1):\n c = cov(x, y, rowvar)\n if c.shape == ():\n return c / c\n d = diag(c)\n d = d.reshape((d.shape[0], 1))\n sqr_d = sqrt(d)\n return (c / sqr_d) / sqr_d.T\n\n\n@implements(np.round, np.round_)\n@derived_from(np)\ndef round(a, decimals=0):\n return a.map_blocks(np.round, decimals=decimals, dtype=a.dtype)\n\n\n@implements(np.iscomplexobj)\n@derived_from(np)\ndef iscomplexobj(x):\n return issubclass(x.dtype.type, np.complexfloating)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py__unique_internal__unique_internal.return.r": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py__unique_internal__unique_internal.return.r", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 917, "end_line": 975, "span_ids": ["_unique_internal"], "tokens": 596}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _unique_internal(ar, indices, counts, return_inverse=False):\n \"\"\"\n Helper/wrapper function for :func:`numpy.unique`.\n\n Uses :func:`numpy.unique` to find the unique values for the array chunk.\n Given this chunk may not represent the whole array, also take the\n ``indices`` and ``counts`` that are in 1-to-1 correspondence to ``ar``\n and reduce them in the same fashion as ``ar`` is reduced. Namely sum\n any counts that correspond to the same value and take the smallest\n index that corresponds to the same value.\n\n To handle the inverse mapping from the unique values to the original\n array, simply return a NumPy array created with ``arange`` with enough\n values to correspond 1-to-1 to the unique values. While there is more\n work needed to be done to create the full inverse mapping for the\n original array, this provides enough information to generate the\n inverse mapping in Dask.\n\n Given Dask likes to have one array returned from functions like\n ``blockwise``, some formatting is done to stuff all of the resulting arrays\n into one big NumPy structured array. Dask is then able to handle this\n object and can split it apart into the separate results on the Dask side,\n which then can be passed back to this function in concatenated chunks for\n further reduction or can be return to the user to perform other forms of\n analysis.\n\n By handling the problem in this way, it does not matter where a chunk\n is in a larger array or how big it is. The chunk can still be computed\n on the same way. Also it does not matter if the chunk is the result of\n other chunks being run through this function multiple times. The end\n result will still be just as accurate using this strategy.\n \"\"\"\n\n return_index = indices is not None\n return_counts = counts is not None\n\n u = np.unique(ar)\n\n dt = [(\"values\", u.dtype)]\n if return_index:\n dt.append((\"indices\", np.intp))\n if return_inverse:\n dt.append((\"inverse\", np.intp))\n if return_counts:\n dt.append((\"counts\", np.intp))\n\n r = np.empty(u.shape, dtype=dt)\n r[\"values\"] = u\n if return_inverse:\n r[\"inverse\"] = np.arange(len(r), dtype=np.intp)\n if return_index or return_counts:\n for i, v in enumerate(r[\"values\"]):\n m = ar == v\n if return_index:\n indices[m].min(keepdims=True, out=r[\"indices\"][i : i + 1])\n if return_counts:\n counts[m].sum(keepdims=True, out=r[\"counts\"][i : i + 1])\n\n return r", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_unique_unique.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_unique_unique.return.result", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 978, "end_line": 1068, "span_ids": ["unique"], "tokens": 770}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef unique(ar, return_index=False, return_inverse=False, return_counts=False):\n ar = ar.ravel()\n\n # Run unique on each chunk and collect results in a Dask Array of\n # unknown size.\n\n args = [ar, \"i\"]\n out_dtype = [(\"values\", ar.dtype)]\n if return_index:\n args.extend([arange(ar.shape[0], dtype=np.intp, chunks=ar.chunks[0]), \"i\"])\n out_dtype.append((\"indices\", np.intp))\n else:\n args.extend([None, None])\n if return_counts:\n args.extend([ones((ar.shape[0],), dtype=np.intp, chunks=ar.chunks[0]), \"i\"])\n out_dtype.append((\"counts\", np.intp))\n else:\n args.extend([None, None])\n\n out = blockwise(_unique_internal, \"i\", *args, dtype=out_dtype, return_inverse=False)\n out._chunks = tuple((np.nan,) * len(c) for c in out.chunks)\n\n # Take the results from the unique chunks and do the following.\n #\n # 1. Collect all results as arguments.\n # 2. Concatenate each result into one big array.\n # 3. Pass all results as arguments to the internal unique again.\n #\n # TODO: This should be replaced with a tree reduction using this strategy.\n # xref: https://github.com/dask/dask/issues/2851\n\n out_parts = [out[\"values\"]]\n if return_index:\n out_parts.append(out[\"indices\"])\n else:\n out_parts.append(None)\n if return_counts:\n out_parts.append(out[\"counts\"])\n else:\n out_parts.append(None)\n\n name = \"unique-aggregate-\" + out.name\n dsk = {\n (name, 0): (\n (_unique_internal,)\n + tuple(\n (np.concatenate, o.__dask_keys__())\n if hasattr(o, \"__dask_keys__\")\n else o\n for o in out_parts\n )\n + (return_inverse,)\n )\n }\n out_dtype = [(\"values\", ar.dtype)]\n if return_index:\n out_dtype.append((\"indices\", np.intp))\n if return_inverse:\n out_dtype.append((\"inverse\", np.intp))\n if return_counts:\n out_dtype.append((\"counts\", np.intp))\n\n dependencies = [o for o in out_parts if hasattr(o, \"__dask_keys__\")]\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=dependencies)\n chunks = ((np.nan,),)\n out = Array(graph, name, chunks, out_dtype)\n\n # Split out all results to return to the user.\n\n result = [out[\"values\"]]\n if return_index:\n result.append(out[\"indices\"])\n if return_inverse:\n # Using the returned unique values and arange of unknown length, find\n # each value matching a unique value and replace it with its\n # corresponding index or `0`. There should be only one entry for this\n # index in axis `1` (the one of unknown length). Reduce axis `1`\n # through summing to get an array with known dimensionality and the\n # mapping of the original values.\n mtches = (ar[:, None] == out[\"values\"][None, :]).astype(np.intp)\n result.append((mtches * out[\"inverse\"]).sum(axis=1))\n if return_counts:\n result.append(out[\"counts\"])\n\n if len(result) == 1:\n result = result[0]\n else:\n result = tuple(result)\n\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py__isin_kernel_isin.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py__isin_kernel_isin.return.result", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1071, "end_line": 1097, "span_ids": ["_isin_kernel", "isin"], "tokens": 201}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _isin_kernel(element, test_elements, assume_unique=False):\n values = np.in1d(element.ravel(), test_elements, assume_unique=assume_unique)\n return values.reshape(element.shape + (1,) * test_elements.ndim)\n\n\n@safe_wraps(getattr(np, \"isin\", None))\ndef isin(element, test_elements, assume_unique=False, invert=False):\n element = asarray(element)\n test_elements = asarray(test_elements)\n element_axes = tuple(range(element.ndim))\n test_axes = tuple(i + element.ndim for i in range(test_elements.ndim))\n mapped = blockwise(\n _isin_kernel,\n element_axes + test_axes,\n element,\n element_axes,\n test_elements,\n test_axes,\n adjust_chunks={axis: lambda _: 1 for axis in test_axes},\n dtype=bool,\n assume_unique=assume_unique,\n )\n\n result = mapped.any(axis=test_axes)\n if invert:\n result = ~result\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_roll_roll.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_roll_roll.return.result", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1100, "end_line": 1144, "span_ids": ["roll"], "tokens": 251}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef roll(array, shift, axis=None):\n result = array\n\n if axis is None:\n result = ravel(result)\n\n if not isinstance(shift, Integral):\n raise TypeError(\n \"Expect `shift` to be an instance of Integral when `axis` is None.\"\n )\n\n shift = (shift,)\n axis = (0,)\n else:\n try:\n len(shift)\n except TypeError:\n shift = (shift,)\n try:\n len(axis)\n except TypeError:\n axis = (axis,)\n\n if len(shift) != len(axis):\n raise ValueError(\"Must have the same number of shifts as axes.\")\n\n for i, s in zip(axis, shift):\n s = -s\n s %= result.shape[i]\n\n sl1 = result.ndim * [slice(None)]\n sl2 = result.ndim * [slice(None)]\n\n sl1[i] = slice(s, None)\n sl2[i] = slice(None, s)\n\n sl1 = tuple(sl1)\n sl2 = tuple(sl2)\n\n result = concatenate([result[sl1], result[sl2]], axis=i)\n\n result = result.reshape(array.shape)\n\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_shape_squeeze.return.a": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_shape_squeeze.return.a", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1147, "end_line": 1178, "span_ids": ["ravel", "shape", "squeeze", "union1d"], "tokens": 186}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef shape(array):\n return array.shape\n\n\n@derived_from(np)\ndef union1d(ar1, ar2):\n return unique(concatenate((ar1.ravel(), ar2.ravel())))\n\n\n@derived_from(np)\ndef ravel(array):\n return array.reshape((-1,))\n\n\n@derived_from(np)\ndef squeeze(a, axis=None):\n if axis is None:\n axis = tuple(i for i, d in enumerate(a.shape) if d == 1)\n elif not isinstance(axis, tuple):\n axis = (axis,)\n\n if any(a.shape[i] != 1 for i in axis):\n raise ValueError(\"cannot squeeze axis with size other than one\")\n\n axis = validate_axis(axis, a.ndim)\n\n sl = tuple(0 if i in axis else slice(None) for i, s in enumerate(a.shape))\n\n a = a[sl]\n\n return a", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_compress_compress.return.a": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_compress_compress.return.a", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1181, "end_line": 1210, "span_ids": ["compress"], "tokens": 206}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef compress(condition, a, axis=None):\n\n if not is_arraylike(condition):\n # Allow `condition` to be anything array-like, otherwise ensure `condition`\n # is a numpy array.\n condition = np.asarray(condition)\n condition = condition.astype(bool)\n a = asarray(a)\n\n if condition.ndim != 1:\n raise ValueError(\"Condition must be one dimensional\")\n\n if axis is None:\n a = a.ravel()\n axis = 0\n axis = validate_axis(axis, a.ndim)\n\n # Treat `condition` as filled with `False` (if it is too short)\n a = a[\n tuple(\n slice(None, len(condition)) if i == axis else slice(None)\n for i in range(a.ndim)\n )\n ]\n\n # Use `condition` to select along 1 dimension\n a = a[tuple(condition if i == axis else slice(None) for i in range(a.ndim))]\n\n return a", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_extract__isnonzero_vec.np_vectorize__isnonzero_v": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_extract__isnonzero_vec.np_vectorize__isnonzero_v", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1213, "end_line": 1287, "span_ids": ["isclose", "isnull", "notnull", "allclose", "variadic_choose", "choose", "_take_dask_array_from_numpy", "_isnonzero_vec", "around", "impl:5", "extract", "_asarray_isnull", "take"], "tokens": 514}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef extract(condition, arr):\n condition = asarray(condition).astype(bool)\n arr = asarray(arr)\n return compress(condition.ravel(), arr.ravel())\n\n\n@derived_from(np)\ndef take(a, indices, axis=0):\n axis = validate_axis(axis, a.ndim)\n\n if isinstance(a, np.ndarray) and isinstance(indices, Array):\n return _take_dask_array_from_numpy(a, indices, axis)\n else:\n return a[(slice(None),) * axis + (indices,)]\n\n\ndef _take_dask_array_from_numpy(a, indices, axis):\n assert isinstance(a, np.ndarray)\n assert isinstance(indices, Array)\n\n return indices.map_blocks(\n lambda block: np.take(a, block, axis), chunks=indices.chunks, dtype=a.dtype\n )\n\n\n@derived_from(np)\ndef around(x, decimals=0):\n return map_blocks(partial(np.around, decimals=decimals), x, dtype=x.dtype)\n\n\ndef _asarray_isnull(values):\n import pandas as pd\n\n return np.asarray(pd.isnull(values))\n\n\ndef isnull(values):\n \"\"\" pandas.isnull for dask arrays \"\"\"\n # eagerly raise ImportError, if pandas isn't available\n import pandas as pd # noqa\n\n return elemwise(_asarray_isnull, values, dtype=\"bool\")\n\n\ndef notnull(values):\n \"\"\" pandas.notnull for dask arrays \"\"\"\n return ~isnull(values)\n\n\n@derived_from(np)\ndef isclose(arr1, arr2, rtol=1e-5, atol=1e-8, equal_nan=False):\n func = partial(np.isclose, rtol=rtol, atol=atol, equal_nan=equal_nan)\n return elemwise(func, arr1, arr2, dtype=\"bool\")\n\n\n@derived_from(np)\ndef allclose(arr1, arr2, rtol=1e-5, atol=1e-8, equal_nan=False):\n return isclose(arr1, arr2, rtol=rtol, atol=atol, equal_nan=equal_nan).all()\n\n\ndef variadic_choose(a, *choices):\n return np.choose(a, choices)\n\n\n@derived_from(np)\ndef choose(a, choices):\n return elemwise(variadic_choose, a, *choices)\n\n\ndef _isnonzero_vec(v):\n return bool(np.count_nonzero(v))\n\n\n_isnonzero_vec = np.vectorize(_isnonzero_vec, otypes=[bool])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_isnonzero_isnonzero.try_.else_.return.a_astype_bool_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_isnonzero_isnonzero.try_.else_.return.a_astype_bool_", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1290, "end_line": 1309, "span_ids": ["isnonzero"], "tokens": 212}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def isnonzero(a):\n if a.dtype.kind in {\"U\", \"S\"}:\n # NumPy treats all-whitespace strings as falsy (like in `np.nonzero`).\n # but not in `.astype(bool)`. To match the behavior of numpy at least until\n # 1.19, we use `_isnonzero_vec`. When NumPy changes behavior, we should just\n # use the try block below.\n # https://github.com/numpy/numpy/issues/9875\n return a.map_blocks(_isnonzero_vec, dtype=bool)\n try:\n np.zeros(tuple(), dtype=a.dtype).astype(bool)\n except ValueError:\n ######################################################\n # Handle special cases where conversion to bool does #\n # not work correctly. #\n # #\n # xref: https://github.com/numpy/numpy/issues/9479 #\n ######################################################\n return a.map_blocks(_isnonzero_vec, dtype=bool)\n else:\n return a.astype(bool)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_argwhere_where.if_np_isscalar_condition_.else_.return.elemwise_np_where_condit": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_argwhere_where.if_np_isscalar_condition_.else_.return.elemwise_np_where_condit", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1312, "end_line": 1343, "span_ids": ["argwhere", "where"], "tokens": 232}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef argwhere(a):\n a = asarray(a)\n\n nz = isnonzero(a).flatten()\n\n ind = indices(a.shape, dtype=np.intp, chunks=a.chunks)\n if ind.ndim > 1:\n ind = stack([ind[i].ravel() for i in range(len(ind))], axis=1)\n ind = compress(nz, ind, axis=0)\n\n return ind\n\n\n@derived_from(np)\ndef where(condition, x=None, y=None):\n if (x is None) != (y is None):\n raise ValueError(\"either both or neither of x and y should be given\")\n if (x is None) and (y is None):\n return nonzero(condition)\n\n if np.isscalar(condition):\n dtype = result_type(x, y)\n x = asarray(x)\n y = asarray(y)\n\n shape = broadcast_shapes(x.shape, y.shape)\n out = x if condition else y\n\n return broadcast_to(out, shape).astype(dtype)\n else:\n return elemwise(np.where, condition, x, y)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_count_nonzero__unravel_index_kernel.return.np_stack_np_unravel_index": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_count_nonzero__unravel_index_kernel.return.np_stack_np_unravel_index", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1346, "end_line": 1372, "span_ids": ["_unravel_index_kernel", "nonzero", "flatnonzero", "count_nonzero", "_int_piecewise"], "tokens": 181}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef count_nonzero(a, axis=None):\n return isnonzero(asarray(a)).astype(np.intp).sum(axis=axis)\n\n\n@derived_from(np)\ndef flatnonzero(a):\n return argwhere(asarray(a).ravel())[:, 0]\n\n\n@derived_from(np)\ndef nonzero(a):\n ind = argwhere(a)\n if ind.ndim > 1:\n return tuple(ind[:, i] for i in range(ind.shape[1]))\n else:\n return (ind,)\n\n\ndef _int_piecewise(x, *condlist, **kwargs):\n return np.piecewise(\n x, list(condlist), kwargs[\"funclist\"], *kwargs[\"func_args\"], **kwargs[\"func_kw\"]\n )\n\n\ndef _unravel_index_kernel(indices, func_kwargs):\n return np.stack(np.unravel_index(indices, **func_kwargs))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_unravel_index_piecewise.return.map_blocks_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_unravel_index_piecewise.return.map_blocks_", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1375, "end_line": 1404, "span_ids": ["unravel_index", "piecewise"], "tokens": 196}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef unravel_index(indices, shape, order=\"C\"):\n if shape and indices.size:\n unraveled_indices = tuple(\n indices.map_blocks(\n _unravel_index_kernel,\n dtype=np.intp,\n chunks=(((len(shape),),) + indices.chunks),\n new_axis=0,\n func_kwargs={_unravel_index_keyword: shape, \"order\": order},\n )\n )\n else:\n unraveled_indices = tuple(empty((0,), dtype=np.intp, chunks=1) for i in shape)\n\n return unraveled_indices\n\n\n@derived_from(np)\ndef piecewise(x, condlist, funclist, *args, **kw):\n return map_blocks(\n _int_piecewise,\n x,\n *condlist,\n dtype=x.dtype,\n name=\"piecewise\",\n funclist=funclist,\n func_args=args,\n func_kw=kw,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_aligned_coarsen_chunks_aligned_coarsen_chunks.return.tuple_newchunks_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_aligned_coarsen_chunks_aligned_coarsen_chunks.return.tuple_newchunks_", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1407, "end_line": 1457, "span_ids": ["aligned_coarsen_chunks"], "tokens": 416}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def aligned_coarsen_chunks(chunks: List[int], multiple: int) -> List[int]:\n \"\"\"\n Returns a new chunking aligned with the coarsening multiple.\n Any excess is at the end of the array.\n\n Examples\n --------\n >>> aligned_coarsen_chunks(chunks=(1, 2, 3), multiple=4)\n (4, 2)\n >>> aligned_coarsen_chunks(chunks=(1, 20, 3, 4), multiple=4)\n (20, 4, 4)\n >>> aligned_coarsen_chunks(chunks=(20, 10, 15, 23, 24), multiple=10)\n (20, 10, 20, 20, 20, 2)\n \"\"\"\n\n def choose_new_size(multiple, q, left):\n \"\"\"\n See if multiple * q is a good choice when 'left' elements are remaining.\n Else return multiple * (q-1)\n \"\"\"\n possible = multiple * q\n if (left - possible) > 0:\n return possible\n else:\n return multiple * (q - 1)\n\n newchunks = []\n left = sum(chunks) - sum(newchunks)\n chunkgen = (c for c in chunks)\n while left > 0:\n if left < multiple:\n newchunks.append(left)\n break\n\n chunk_size = next(chunkgen, 0)\n if chunk_size == 0:\n chunk_size = multiple\n\n q, r = divmod(chunk_size, multiple)\n if q == 0:\n continue\n elif r == 0:\n newchunks.append(chunk_size)\n elif r >= 5:\n newchunks.append(choose_new_size(multiple, q + 1, left))\n else:\n newchunks.append(choose_new_size(multiple, q, left))\n\n left = sum(chunks) - sum(newchunks)\n\n return tuple(newchunks)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_coarsen_coarsen.return.Array_graph_name_chunks": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_coarsen_coarsen.return.Array_graph_name_chunks", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1460, "end_line": 1489, "span_ids": ["coarsen"], "tokens": 316}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@wraps(chunk.coarsen)\ndef coarsen(reduction, x, axes, trim_excess=False, **kwargs):\n if not trim_excess and not all(x.shape[i] % div == 0 for i, div in axes.items()):\n msg = \"Coarsening factor does not align with block dimensions\"\n raise ValueError(msg)\n\n if \"dask\" in inspect.getfile(reduction):\n reduction = getattr(np, reduction.__name__)\n\n new_chunks = {}\n for i, div in axes.items():\n aligned = aligned_coarsen_chunks(x.chunks[i], div)\n if aligned != x.chunks[i]:\n new_chunks[i] = aligned\n if new_chunks:\n x = x.rechunk(new_chunks)\n\n name = \"coarsen-\" + tokenize(reduction, x, axes, trim_excess)\n dsk = {\n (name,)\n + key[1:]: (apply, chunk.coarsen, [reduction, key, axes, trim_excess], kwargs)\n for key in flatten(x.__dask_keys__())\n }\n chunks = tuple(\n tuple(int(bd // axes.get(i, 1)) for bd in bds) for i, bds in enumerate(x.chunks)\n )\n\n meta = reduction(np.empty((1,) * x.ndim, dtype=x.dtype), **kwargs)\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[x])\n return Array(graph, name, chunks, meta=meta)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_split_at_breaks_split_at_breaks.return.split_array": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_split_at_breaks_split_at_breaks.return.split_array", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1492, "end_line": 1502, "span_ids": ["split_at_breaks"], "tokens": 146}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def split_at_breaks(array, breaks, axis=0):\n \"\"\"Split an array into a list of arrays (using slices) at the given breaks\n\n >>> split_at_breaks(np.arange(6), [3, 5])\n [array([0, 1, 2]), array([3, 4]), array([5])]\n \"\"\"\n padded_breaks = concat([[None], breaks, [None]])\n slices = [slice(i, j) for i, j in sliding_window(2, padded_breaks)]\n preslice = (slice(None),) * axis\n split_array = [array[preslice + (s,)] for s in slices]\n return split_array", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_insert_insert.return.concatenate_interleaved_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_insert_insert.return.concatenate_interleaved_", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1505, "end_line": 1551, "span_ids": ["insert"], "tokens": 439}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef insert(arr, obj, values, axis):\n # axis is a required argument here to avoid needing to deal with the numpy\n # default case (which reshapes the array to make it flat)\n axis = validate_axis(axis, arr.ndim)\n\n if isinstance(obj, slice):\n obj = np.arange(*obj.indices(arr.shape[axis]))\n obj = np.asarray(obj)\n scalar_obj = obj.ndim == 0\n if scalar_obj:\n obj = np.atleast_1d(obj)\n\n obj = np.where(obj < 0, obj + arr.shape[axis], obj)\n if (np.diff(obj) < 0).any():\n raise NotImplementedError(\n \"da.insert only implemented for monotonic ``obj`` argument\"\n )\n\n split_arr = split_at_breaks(arr, np.unique(obj), axis)\n\n if getattr(values, \"ndim\", 0) == 0:\n # we need to turn values into a dask array\n name = \"values-\" + tokenize(values)\n dtype = getattr(values, \"dtype\", type(values))\n values = Array({(name,): values}, name, chunks=(), dtype=dtype)\n\n values_shape = tuple(\n len(obj) if axis == n else s for n, s in enumerate(arr.shape)\n )\n values = broadcast_to(values, values_shape)\n elif scalar_obj:\n values = values[(slice(None),) * axis + (None,)]\n\n values_chunks = tuple(\n values_bd if axis == n else arr_bd\n for n, (arr_bd, values_bd) in enumerate(zip(arr.chunks, values.chunks))\n )\n values = values.rechunk(values_chunks)\n\n counts = np.bincount(obj)[:-1]\n values_breaks = np.cumsum(counts[counts > 0])\n split_values = split_at_breaks(values, values_breaks, axis)\n\n interleaved = list(interleave([split_arr, split_values]))\n interleaved = [i for i in interleaved if i.nbytes]\n return concatenate(interleaved, axis=axis)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py__average_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py__average_", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1554, "end_line": 1608, "span_ids": ["average", "_average"], "tokens": 405}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _average(a, axis=None, weights=None, returned=False, is_masked=False):\n # This was minimally modified from numpy.average\n a = asanyarray(a)\n\n if weights is None:\n avg = a.mean(axis)\n scl = avg.dtype.type(a.size / avg.size)\n else:\n wgt = asanyarray(weights)\n\n if issubclass(a.dtype.type, (np.integer, np.bool_)):\n result_dtype = result_type(a.dtype, wgt.dtype, \"f8\")\n else:\n result_dtype = result_type(a.dtype, wgt.dtype)\n\n # Sanity checks\n if a.shape != wgt.shape:\n if axis is None:\n raise TypeError(\n \"Axis must be specified when shapes of a and weights differ.\"\n )\n if wgt.ndim != 1:\n raise TypeError(\n \"1D weights expected when shapes of a and weights differ.\"\n )\n if wgt.shape[0] != a.shape[axis]:\n raise ValueError(\n \"Length of weights not compatible with specified axis.\"\n )\n\n # setup wgt to broadcast along axis\n wgt = broadcast_to(wgt, (a.ndim - 1) * (1,) + wgt.shape)\n wgt = wgt.swapaxes(-1, axis)\n if is_masked:\n from .ma import getmaskarray\n\n wgt = wgt * (~getmaskarray(a))\n scl = wgt.sum(axis=axis, dtype=result_dtype)\n avg = multiply(a, wgt, dtype=result_dtype).sum(axis) / scl\n\n if returned:\n if scl.shape != avg.shape:\n scl = broadcast_to(scl, avg.shape).copy()\n return avg, scl\n else:\n return avg\n\n\n@derived_from(np)\ndef average(a, axis=None, weights=None, returned=False):\n return _average(a, axis, weights, returned, is_masked=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_from_itertools_import_pro__sanitize_index_element.if_isinstance_ind_Number.else_.raise_TypeError_Invalid_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_from_itertools_import_pro__sanitize_index_element.if_isinstance_ind_Number.else_.raise_TypeError_Invalid_", "embedding": null, "metadata": {"file_path": "dask/array/slicing.py", "file_name": "slicing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 32, "span_ids": ["imports", "_sanitize_index_element"], "tokens": 192}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from itertools import product\nimport math\nfrom numbers import Integral, Number\nfrom operator import add, getitem, itemgetter\nimport warnings\nimport functools\nimport bisect\n\nimport numpy as np\nfrom tlz import memoize, merge, pluck, concat, accumulate\n\nfrom .. import core\nfrom .. import config\nfrom .. import utils\nfrom ..highlevelgraph import HighLevelGraph\nfrom ..base import tokenize, is_dask_collection\n\ncolon = slice(None, None, None)\n\n\ndef _sanitize_index_element(ind):\n \"\"\"Sanitize a one-element index.\"\"\"\n if isinstance(ind, Number):\n ind2 = int(ind)\n if ind2 != ind:\n raise IndexError(\"Bad index. Must be integer-like: %s\" % ind)\n else:\n return ind2\n elif ind is None:\n return None\n else:\n raise TypeError(\"Invalid index type\", type(ind), ind)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_sanitize_index_sanitize_index.if_index_array_dtype_b.else_.raise_TypeError_Invalid_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_sanitize_index_sanitize_index.if_index_array_dtype_b.else_.raise_TypeError_Invalid_", "embedding": null, "metadata": {"file_path": "dask/array/slicing.py", "file_name": "slicing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 35, "end_line": 85, "span_ids": ["sanitize_index"], "tokens": 450}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def sanitize_index(ind):\n \"\"\"Sanitize the elements for indexing along one axis\n\n >>> sanitize_index([2, 3, 5])\n array([2, 3, 5])\n >>> sanitize_index([True, False, True, False])\n array([0, 2])\n >>> sanitize_index(np.array([1, 2, 3]))\n array([1, 2, 3])\n >>> sanitize_index(np.array([False, True, True]))\n array([1, 2])\n >>> type(sanitize_index(np.int32(0)))\n \n >>> sanitize_index(1.0)\n 1\n >>> sanitize_index(0.5)\n Traceback (most recent call last):\n ...\n IndexError: Bad index. Must be integer-like: 0.5\n \"\"\"\n if ind is None:\n return None\n elif isinstance(ind, slice):\n return slice(\n _sanitize_index_element(ind.start),\n _sanitize_index_element(ind.stop),\n _sanitize_index_element(ind.step),\n )\n elif isinstance(ind, Number):\n return _sanitize_index_element(ind)\n elif is_dask_collection(ind):\n return ind\n index_array = np.asanyarray(ind)\n if index_array.dtype == bool:\n nonzero = np.nonzero(index_array)\n if len(nonzero) == 1:\n # If a 1-element tuple, unwrap the element\n nonzero = nonzero[0]\n return np.asanyarray(nonzero)\n elif np.issubdtype(index_array.dtype, np.integer):\n return index_array\n elif np.issubdtype(index_array.dtype, np.floating):\n int_index = index_array.astype(np.intp)\n if np.allclose(index_array, int_index):\n return int_index\n else:\n check_int = np.isclose(index_array, int_index)\n first_err = index_array.ravel()[np.flatnonzero(~check_int)[0]]\n raise IndexError(\"Bad index. Must be integer-like: %s\" % first_err)\n else:\n raise TypeError(\"Invalid index type\", type(ind), ind)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_slice_array_slice_array.return.dsk_out_bd_out": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_slice_array_slice_array.return.dsk_out_bd_out", "embedding": null, "metadata": {"file_path": "dask/array/slicing.py", "file_name": "slicing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 88, "end_line": 165, "span_ids": ["slice_array"], "tokens": 704}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def slice_array(out_name, in_name, blockdims, index, itemsize):\n \"\"\"\n Master function for array slicing\n\n This function makes a new dask that slices blocks along every\n dimension and aggregates (via cartesian product) each dimension's\n slices so that the resulting block slices give the same results\n as the original slice on the original structure\n\n Index must be a tuple. It may contain the following types\n\n int, slice, list (at most one list), None\n\n Parameters\n ----------\n in_name - string\n This is the dask variable name that will be used as input\n out_name - string\n This is the dask variable output name\n blockshape - iterable of integers\n index - iterable of integers, slices, lists, or None\n itemsize : int\n The number of bytes required for each element of the array.\n\n Returns\n -------\n Dict where the keys are tuples of\n\n (out_name, dim_index[, dim_index[, ...]])\n\n and the values are\n\n (function, (in_name, dim_index, dim_index, ...),\n (slice(...), [slice()[,...]])\n\n Also new blockdims with shapes of each block\n\n ((10, 10, 10, 10), (20, 20))\n\n Examples\n --------\n >>> dsk, blockdims = slice_array('y', 'x', [(20, 20, 20, 20, 20)],\n ... (slice(10, 35),)) # doctest: +SKIP\n >>> dsk # doctest: +SKIP\n {('y', 0): (getitem, ('x', 0), (slice(10, 20),)),\n ('y', 1): (getitem, ('x', 1), (slice(0, 15),))}\n >>> blockdims # doctest: +SKIP\n ((10, 15),)\n\n See Also\n --------\n This function works by successively unwrapping cases and passing down\n through a sequence of functions.\n\n slice_with_newaxis : handle None/newaxis case\n slice_wrap_lists : handle fancy indexing with lists\n slice_slices_and_integers : handle everything else\n \"\"\"\n blockdims = tuple(map(tuple, blockdims))\n\n # x[:, :, :] - Punt and return old value\n if all(\n isinstance(index, slice) and index == slice(None, None, None) for index in index\n ):\n suffixes = product(*[range(len(bd)) for bd in blockdims])\n dsk = dict(((out_name,) + s, (in_name,) + s) for s in suffixes)\n return dsk, blockdims\n\n # Add in missing colons at the end as needed. x[5] -> x[5, :, :]\n not_none_count = sum(i is not None for i in index)\n missing = len(blockdims) - not_none_count\n index += (slice(None, None, None),) * missing\n\n # Pass down to next function\n dsk_out, bd_out = slice_with_newaxes(out_name, in_name, blockdims, index, itemsize)\n\n bd_out = tuple(map(tuple, bd_out))\n return dsk_out, bd_out", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_slice_with_newaxes_slice_with_newaxes.if_where_none_.else_.return.dsk_blockdims2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_slice_with_newaxes_slice_with_newaxes.if_where_none_.else_.return.dsk_blockdims2", "embedding": null, "metadata": {"file_path": "dask/array/slicing.py", "file_name": "slicing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 168, "end_line": 206, "span_ids": ["slice_with_newaxes"], "tokens": 408}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def slice_with_newaxes(out_name, in_name, blockdims, index, itemsize):\n \"\"\"\n Handle indexing with Nones\n\n Strips out Nones then hands off to slice_wrap_lists\n \"\"\"\n # Strip Nones from index\n index2 = tuple([ind for ind in index if ind is not None])\n where_none = [i for i, ind in enumerate(index) if ind is None]\n where_none_orig = list(where_none)\n for i, x in enumerate(where_none):\n n = sum(isinstance(ind, Integral) for ind in index[:x])\n if n:\n where_none[i] -= n\n\n # Pass down and do work\n dsk, blockdims2 = slice_wrap_lists(out_name, in_name, blockdims, index2, itemsize)\n\n if where_none:\n expand = expander(where_none)\n expand_orig = expander(where_none_orig)\n\n # Insert \",0\" into the key: ('x', 2, 3) -> ('x', 0, 2, 0, 3)\n dsk2 = {\n (out_name,) + expand(k[1:], 0): (v[:2] + (expand_orig(v[2], None),))\n for k, v in dsk.items()\n if k[0] == out_name\n }\n\n # Add back intermediate parts of the dask that weren't the output\n dsk3 = merge(dsk2, {k: v for k, v in dsk.items() if k[0] != out_name})\n\n # Insert (1,) into blockdims: ((2, 2), (3, 3)) -> ((2, 2), (1,), (3, 3))\n blockdims3 = expand(blockdims2, (1,))\n\n return dsk3, blockdims3\n\n else:\n return dsk, blockdims2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_slice_wrap_lists_slice_wrap_lists.return.dsk3_blockdims2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_slice_wrap_lists_slice_wrap_lists.return.dsk3_blockdims2", "embedding": null, "metadata": {"file_path": "dask/array/slicing.py", "file_name": "slicing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 209, "end_line": 271, "span_ids": ["slice_wrap_lists"], "tokens": 629}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def slice_wrap_lists(out_name, in_name, blockdims, index, itemsize):\n \"\"\"\n Fancy indexing along blocked array dasks\n\n Handles index of type list. Calls slice_slices_and_integers for the rest\n\n See Also\n --------\n\n take : handle slicing with lists (\"fancy\" indexing)\n slice_slices_and_integers : handle slicing with slices and integers\n \"\"\"\n assert all(isinstance(i, (slice, list, Integral, np.ndarray)) for i in index)\n if not len(blockdims) == len(index):\n raise IndexError(\"Too many indices for array\")\n\n # Do we have more than one list in the index?\n where_list = [\n i for i, ind in enumerate(index) if isinstance(ind, np.ndarray) and ind.ndim > 0\n ]\n if len(where_list) > 1:\n raise NotImplementedError(\"Don't yet support nd fancy indexing\")\n # Is the single list an empty list? In this case just treat it as a zero\n # length slice\n if where_list and not index[where_list[0]].size:\n index = list(index)\n index[where_list.pop()] = slice(0, 0, 1)\n index = tuple(index)\n\n # No lists, hooray! just use slice_slices_and_integers\n if not where_list:\n return slice_slices_and_integers(out_name, in_name, blockdims, index)\n\n # Replace all lists with full slices [3, 1, 0] -> slice(None, None, None)\n index_without_list = tuple(\n slice(None, None, None) if isinstance(i, np.ndarray) else i for i in index\n )\n\n # lists and full slices. Just use take\n if all(isinstance(i, np.ndarray) or i == slice(None, None, None) for i in index):\n axis = where_list[0]\n blockdims2, dsk3 = take(\n out_name, in_name, blockdims, index[where_list[0]], itemsize, axis=axis\n )\n # Mixed case. Both slices/integers and lists. slice/integer then take\n else:\n # Do first pass without lists\n tmp = \"slice-\" + tokenize((out_name, in_name, blockdims, index))\n dsk, blockdims2 = slice_slices_and_integers(\n tmp, in_name, blockdims, index_without_list\n )\n\n # After collapsing some axes due to int indices, adjust axis parameter\n axis = where_list[0]\n axis2 = axis - sum(\n 1 for i, ind in enumerate(index) if i < axis and isinstance(ind, Integral)\n )\n\n # Do work\n blockdims2, dsk2 = take(out_name, tmp, blockdims2, index[axis], 8, axis=axis2)\n dsk3 = merge(dsk, dsk2)\n\n return dsk3, blockdims2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_slice_slices_and_integers_slice_slices_and_integers.return.dsk_out_new_blockdims": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_slice_slices_and_integers_slice_slices_and_integers.return.dsk_out_new_blockdims", "embedding": null, "metadata": {"file_path": "dask/array/slicing.py", "file_name": "slicing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 274, "end_line": 328, "span_ids": ["slice_slices_and_integers"], "tokens": 493}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def slice_slices_and_integers(out_name, in_name, blockdims, index):\n \"\"\"\n Dask array indexing with slices and integers\n\n See Also\n --------\n\n _slice_1d\n \"\"\"\n from .core import unknown_chunk_message\n\n shape = tuple(cached_cumsum(dim, initial_zero=True)[-1] for dim in blockdims)\n\n for dim, ind in zip(shape, index):\n if np.isnan(dim) and ind != slice(None, None, None):\n raise ValueError(\n \"Arrays chunk sizes are unknown: %s%s\" % (shape, unknown_chunk_message)\n )\n\n assert all(isinstance(ind, (slice, Integral)) for ind in index)\n assert len(index) == len(blockdims)\n\n # Get a list (for each dimension) of dicts{blocknum: slice()}\n block_slices = list(map(_slice_1d, shape, blockdims, index))\n sorted_block_slices = [sorted(i.items()) for i in block_slices]\n\n # (in_name, 1, 1, 2), (in_name, 1, 1, 4), (in_name, 2, 1, 2), ...\n in_names = list(product([in_name], *[pluck(0, s) for s in sorted_block_slices]))\n\n # (out_name, 0, 0, 0), (out_name, 0, 0, 1), (out_name, 0, 1, 0), ...\n out_names = list(\n product(\n [out_name],\n *[\n range(len(d))[::-1] if i.step and i.step < 0 else range(len(d))\n for d, i in zip(block_slices, index)\n if not isinstance(i, Integral)\n ]\n )\n )\n\n all_slices = list(product(*[pluck(1, s) for s in sorted_block_slices]))\n\n dsk_out = {\n out_name: (getitem, in_name, slices)\n for out_name, in_name, slices in zip(out_names, in_names, all_slices)\n }\n\n new_blockdims = [\n new_blockdim(d, db, i)\n for d, i, db in zip(shape, index, blockdims)\n if not isinstance(i, Integral)\n ]\n\n return dsk_out, new_blockdims", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py__slice_1d__slice_1d._Returns_a_dict_of_blo": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py__slice_1d__slice_1d._Returns_a_dict_of_blo", "embedding": null, "metadata": {"file_path": "dask/array/slicing.py", "file_name": "slicing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 331, "end_line": 403, "span_ids": ["_slice_1d"], "tokens": 883}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _slice_1d(dim_shape, lengths, index):\n \"\"\"Returns a dict of {blocknum: slice}\n\n This function figures out where each slice should start in each\n block for a single dimension. If the slice won't return any elements\n in the block, that block will not be in the output.\n\n Parameters\n ----------\n\n dim_shape - the number of elements in this dimension.\n This should be a positive, non-zero integer\n blocksize - the number of elements per block in this dimension\n This should be a positive, non-zero integer\n index - a description of the elements in this dimension that we want\n This might be an integer, a slice(), or an Ellipsis\n\n Returns\n -------\n\n dictionary where the keys are the integer index of the blocks that\n should be sliced and the values are the slices\n\n Examples\n --------\n\n Trivial slicing\n\n >>> _slice_1d(100, [60, 40], slice(None, None, None))\n {0: slice(None, None, None), 1: slice(None, None, None)}\n\n 100 length array cut into length 20 pieces, slice 0:35\n\n >>> _slice_1d(100, [20, 20, 20, 20, 20], slice(0, 35))\n {0: slice(None, None, None), 1: slice(0, 15, 1)}\n\n Support irregular blocks and various slices\n\n >>> _slice_1d(100, [20, 10, 10, 10, 25, 25], slice(10, 35))\n {0: slice(10, 20, 1), 1: slice(None, None, None), 2: slice(0, 5, 1)}\n\n Support step sizes\n\n >>> _slice_1d(100, [15, 14, 13], slice(10, 41, 3))\n {0: slice(10, 15, 3), 1: slice(1, 14, 3), 2: slice(2, 12, 3)}\n\n >>> _slice_1d(100, [20, 20, 20, 20, 20], slice(0, 100, 40)) # step > blocksize\n {0: slice(0, 20, 40), 2: slice(0, 20, 40), 4: slice(0, 20, 40)}\n\n Also support indexing single elements\n\n >>> _slice_1d(100, [20, 20, 20, 20, 20], 25)\n {1: 5}\n\n And negative slicing\n\n >>> _slice_1d(100, [20, 20, 20, 20, 20], slice(100, 0, -3)) # doctest: +NORMALIZE_WHITESPACE\n {4: slice(-1, -21, -3),\n 3: slice(-2, -21, -3),\n 2: slice(-3, -21, -3),\n 1: slice(-1, -21, -3),\n 0: slice(-2, -20, -3)}\n\n >>> _slice_1d(100, [20, 20, 20, 20, 20], slice(100, 12, -3)) # doctest: +NORMALIZE_WHITESPACE\n {4: slice(-1, -21, -3),\n 3: slice(-2, -21, -3),\n 2: slice(-3, -21, -3),\n 1: slice(-1, -21, -3),\n 0: slice(-2, -8, -3)}\n\n >>> _slice_1d(100, [20, 20, 20, 20, 20], slice(100, -12, -3))\n {4: slice(-1, -12, -3)}\n \"\"\"\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py__slice_1d.chunk_boundaries__slice_1d.return.d": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py__slice_1d.chunk_boundaries__slice_1d.return.d", "embedding": null, "metadata": {"file_path": "dask/array/slicing.py", "file_name": "slicing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 404, "end_line": 495, "span_ids": ["_slice_1d"], "tokens": 793}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _slice_1d(dim_shape, lengths, index):\n chunk_boundaries = cached_cumsum(lengths)\n\n if isinstance(index, Integral):\n # use right-side search to be consistent with previous result\n i = bisect.bisect_right(chunk_boundaries, index)\n if i > 0:\n # the very first chunk has no relative shift\n ind = index - chunk_boundaries[i - 1]\n else:\n ind = index\n return {int(i): int(ind)}\n\n assert isinstance(index, slice)\n\n if index == colon:\n return {k: colon for k in range(len(lengths))}\n\n step = index.step or 1\n if step > 0:\n start = index.start or 0\n stop = index.stop if index.stop is not None else dim_shape\n else:\n start = index.start if index.start is not None else dim_shape - 1\n start = dim_shape - 1 if start >= dim_shape else start\n stop = -(dim_shape + 1) if index.stop is None else index.stop\n\n # posify start and stop\n if start < 0:\n start += dim_shape\n if stop < 0:\n stop += dim_shape\n\n d = dict()\n if step > 0:\n istart = bisect.bisect_right(chunk_boundaries, start)\n istop = bisect.bisect_left(chunk_boundaries, stop)\n\n # the bound is not exactly tight; make it tighter?\n istop = min(istop + 1, len(lengths))\n\n # jump directly to istart\n if istart > 0:\n start = start - chunk_boundaries[istart - 1]\n stop = stop - chunk_boundaries[istart - 1]\n\n for i in range(istart, istop):\n length = lengths[i]\n if start < length and stop > 0:\n d[i] = slice(start, min(stop, length), step)\n start = (start - length) % step\n else:\n start = start - length\n stop -= length\n else:\n rstart = start # running start\n\n istart = bisect.bisect_left(chunk_boundaries, start)\n istop = bisect.bisect_right(chunk_boundaries, stop)\n\n # the bound is not exactly tight; make it tighter?\n istart = min(istart + 1, len(chunk_boundaries) - 1)\n istop = max(istop - 1, -1)\n\n for i in range(istart, istop, -1):\n chunk_stop = chunk_boundaries[i]\n # create a chunk start and stop\n if i == 0:\n chunk_start = 0\n else:\n chunk_start = chunk_boundaries[i - 1]\n\n # if our slice is in this chunk\n if (chunk_start <= rstart < chunk_stop) and (rstart > stop):\n d[i] = slice(\n rstart - chunk_stop,\n max(chunk_start - chunk_stop - 1, stop - chunk_stop),\n step,\n )\n\n # compute the next running start point,\n offset = (rstart - (chunk_start - 1)) % step\n rstart = chunk_start + offset - 1\n\n # replace 0:20:1 with : if appropriate\n for k, v in d.items():\n if v == slice(0, lengths[k], 1):\n d[k] = slice(None, None, None)\n\n if not d: # special case x[:0]\n d[0] = slice(0, 0, 1)\n\n return d", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_partition_by_size_issorted.return.np_all_seq_1_seq_1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_partition_by_size_issorted.return.np_all_seq_1_seq_1_", "embedding": null, "metadata": {"file_path": "dask/array/slicing.py", "file_name": "slicing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 498, "end_line": 525, "span_ids": ["issorted", "partition_by_size"], "tokens": 256}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def partition_by_size(sizes, seq):\n \"\"\"\n\n >>> partition_by_size([10, 20, 10], [1, 5, 9, 12, 29, 35])\n [array([1, 5, 9]), array([ 2, 19]), array([5])]\n \"\"\"\n seq = np.asanyarray(seq)\n left = np.empty(len(sizes) + 1, dtype=int)\n left[0] = 0\n\n right = np.cumsum(sizes, out=left[1:])\n locations = np.empty(len(sizes) + 1, dtype=int)\n locations[0] = 0\n locations[1:] = np.searchsorted(seq, right)\n return [(seq[j:k] - l) for j, k, l in zip(locations[:-1], locations[1:], left)]\n\n\ndef issorted(seq):\n \"\"\"Is sequence sorted?\n\n >>> issorted([1, 2, 3])\n True\n >>> issorted([3, 1, 2])\n False\n \"\"\"\n if len(seq) == 0:\n return True\n return np.all(seq[:-1] <= seq[1:])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_slicing_plan_slicing_plan.return.out": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_slicing_plan_slicing_plan.return.out", "embedding": null, "metadata": {"file_path": "dask/array/slicing.py", "file_name": "slicing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 528, "end_line": 558, "span_ids": ["slicing_plan"], "tokens": 233}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def slicing_plan(chunks, index):\n \"\"\"Construct a plan to slice chunks with the given index\n\n Parameters\n ----------\n chunks : Tuple[int]\n One dimensions worth of chunking information\n index : np.ndarray[int]\n The index passed to slice on that dimension\n\n Returns\n -------\n out : List[Tuple[int, np.ndarray]]\n A list of chunk/sub-index pairs corresponding to each output chunk\n \"\"\"\n index = np.asanyarray(index)\n cum_chunks = cached_cumsum(chunks)\n\n chunk_locations = np.searchsorted(cum_chunks, index, side=\"right\")\n where = np.where(np.diff(chunk_locations))[0] + 1\n where = np.concatenate([[0], where, [len(chunk_locations)]])\n\n out = []\n for i in range(len(where) - 1):\n sub_index = index[where[i] : where[i + 1]]\n chunk = chunk_locations[where[i]]\n if chunk > 0:\n sub_index = sub_index - cum_chunks[chunk - 1]\n out.append((chunk, sub_index))\n\n return out", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_posify_index_posify_index.return.ind": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_posify_index_posify_index.return.ind", "embedding": null, "metadata": {"file_path": "dask/array/slicing.py", "file_name": "slicing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 649, "end_line": 674, "span_ids": ["posify_index"], "tokens": 242}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def posify_index(shape, ind):\n \"\"\"Flip negative indices around to positive ones\n\n >>> posify_index(10, 3)\n 3\n >>> posify_index(10, -3)\n 7\n >>> posify_index(10, [3, -3])\n array([3, 7])\n\n >>> posify_index((10, 20), (3, -3))\n (3, 17)\n >>> posify_index((10, 20), (3, [3, 4, -3])) # doctest: +NORMALIZE_WHITESPACE\n (3, array([ 3, 4, 17]))\n \"\"\"\n if isinstance(ind, tuple):\n return tuple(map(posify_index, shape, ind))\n if isinstance(ind, Integral):\n if ind < 0 and not math.isnan(shape):\n return ind + shape\n else:\n return ind\n if isinstance(ind, (np.ndarray, list)) and not math.isnan(shape):\n ind = np.asanyarray(ind)\n return np.where(ind < 0, ind + shape, ind)\n return ind", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py__expander_expander.return._expander_tuple_where_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py__expander_expander.return._expander_tuple_where_", "embedding": null, "metadata": {"file_path": "dask/array/slicing.py", "file_name": "slicing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 677, "end_line": 711, "span_ids": ["expander", "_expander"], "tokens": 233}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@memoize\ndef _expander(where):\n if not where:\n\n def expand(seq, val):\n return seq\n\n return expand\n else:\n decl = \"\"\"def expand(seq, val):\n return ({left}) + tuple({right})\n \"\"\"\n left = []\n j = 0\n for i in range(max(where) + 1):\n if i in where:\n left.append(\"val, \")\n else:\n left.append(\"seq[%d], \" % j)\n j += 1\n right = \"seq[%d:]\" % j\n left = \"\".join(left)\n decl = decl.format(**locals())\n ns = {}\n exec(compile(decl, \"\", \"exec\"), ns, ns)\n return ns[\"expand\"]\n\n\ndef expander(where):\n \"\"\"Create a function to insert value at many locations in sequence.\n\n >>> expander([0, 2])(['a', 'b', 'c'], 'z')\n ('z', 'a', 'z', 'b', 'c')\n \"\"\"\n return _expander(tuple(where))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_new_blockdim_new_blockdim.return._int_math_ceil_1_0_slc": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_new_blockdim_new_blockdim.return._int_math_ceil_1_0_slc", "embedding": null, "metadata": {"file_path": "dask/array/slicing.py", "file_name": "slicing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 714, "end_line": 738, "span_ids": ["new_blockdim"], "tokens": 307}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def new_blockdim(dim_shape, lengths, index):\n \"\"\"\n\n >>> new_blockdim(100, [20, 10, 20, 10, 40], slice(0, 90, 2))\n [10, 5, 10, 5, 15]\n\n >>> new_blockdim(100, [20, 10, 20, 10, 40], [5, 1, 30, 22])\n [4]\n\n >>> new_blockdim(100, [20, 10, 20, 10, 40], slice(90, 10, -2))\n [16, 5, 10, 5, 4]\n \"\"\"\n if index == slice(None, None, None):\n return lengths\n if isinstance(index, list):\n return [len(index)]\n assert not isinstance(index, Integral)\n pairs = sorted(_slice_1d(dim_shape, lengths, index).items(), key=itemgetter(0))\n slices = [\n slice(0, lengths[i], 1) if slc == slice(None, None, None) else slc\n for i, slc in pairs\n ]\n if isinstance(index, slice) and index.step and index.step < 0:\n slices = slices[::-1]\n return [int(math.ceil((1.0 * slc.stop - slc.start) / slc.step)) for slc in slices]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_replace_ellipsis_replace_ellipsis.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_replace_ellipsis_replace_ellipsis.return._", "embedding": null, "metadata": {"file_path": "dask/array/slicing.py", "file_name": "slicing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 741, "end_line": 759, "span_ids": ["replace_ellipsis"], "tokens": 201}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def replace_ellipsis(n, index):\n \"\"\"Replace ... with slices, :, : ,:\n\n >>> replace_ellipsis(4, (3, Ellipsis, 2))\n (3, slice(None, None, None), slice(None, None, None), 2)\n\n >>> replace_ellipsis(2, (Ellipsis, None))\n (slice(None, None, None), slice(None, None, None), None)\n \"\"\"\n # Careful about using in or index because index may contain arrays\n isellipsis = [i for i, ind in enumerate(index) if ind is Ellipsis]\n if not isellipsis:\n return index\n else:\n loc = isellipsis[0]\n extra_dimensions = n - (len(index) - sum(i is None for i in index) - 1)\n return (\n index[:loc] + (slice(None, None, None),) * extra_dimensions + index[loc + 1 :]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_normalize_slice_normalize_slice.return.idx": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_normalize_slice_normalize_slice.return.idx", "embedding": null, "metadata": {"file_path": "dask/array/slicing.py", "file_name": "slicing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 762, "end_line": 795, "span_ids": ["normalize_slice"], "tokens": 202}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def normalize_slice(idx, dim):\n \"\"\"Normalize slices to canonical form\n\n Parameters\n ----------\n idx: slice or other index\n dim: dimension length\n\n Examples\n --------\n >>> normalize_slice(slice(0, 10, 1), 10)\n slice(None, None, None)\n \"\"\"\n\n if isinstance(idx, slice):\n if math.isnan(dim):\n return idx\n start, stop, step = idx.indices(dim)\n if step > 0:\n if start == 0:\n start = None\n if stop >= dim:\n stop = None\n if step == 1:\n step = None\n if stop is not None and start is not None and stop < start:\n stop = start\n elif step < 0:\n if start >= dim - 1:\n start = None\n if stop < 0:\n stop = None\n return slice(start, stop, step)\n return idx", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_normalize_index_normalize_index.return.idx": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_normalize_index_normalize_index.return.idx", "embedding": null, "metadata": {"file_path": "dask/array/slicing.py", "file_name": "slicing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 798, "end_line": 863, "span_ids": ["normalize_index"], "tokens": 595}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def normalize_index(idx, shape):\n \"\"\"Normalize slicing indexes\n\n 1. Replaces ellipses with many full slices\n 2. Adds full slices to end of index\n 3. Checks bounding conditions\n 4. Replace multidimensional numpy arrays with dask arrays\n 5. Replaces numpy arrays with lists\n 6. Posify's integers and lists\n 7. Normalizes slices to canonical form\n\n Examples\n --------\n >>> normalize_index(1, (10,))\n (1,)\n >>> normalize_index(-1, (10,))\n (9,)\n >>> normalize_index([-1], (10,))\n (array([9]),)\n >>> normalize_index(slice(-3, 10, 1), (10,))\n (slice(7, None, None),)\n >>> normalize_index((Ellipsis, None), (10,))\n (slice(None, None, None), None)\n >>> normalize_index(np.array([[True, False], [False, True], [True, True]]), (3, 2))\n (dask.array,)\n \"\"\"\n from .core import from_array\n\n if not isinstance(idx, tuple):\n idx = (idx,)\n\n # if a > 1D numpy.array is provided, cast it to a dask array\n if len(idx) > 0 and len(shape) > 1:\n i = idx[0]\n if isinstance(i, np.ndarray) and i.shape == shape:\n idx = (from_array(i), *idx[1:])\n\n idx = replace_ellipsis(len(shape), idx)\n n_sliced_dims = 0\n for i in idx:\n if hasattr(i, \"ndim\") and i.ndim >= 1:\n n_sliced_dims += i.ndim\n elif i is None:\n continue\n else:\n n_sliced_dims += 1\n idx = idx + (slice(None),) * (len(shape) - n_sliced_dims)\n if len([i for i in idx if i is not None]) > len(shape):\n raise IndexError(\"Too many indices for array\")\n\n none_shape = []\n i = 0\n for ind in idx:\n if ind is not None:\n none_shape.append(shape[i])\n i += 1\n else:\n none_shape.append(None)\n\n for i, d in zip(idx, none_shape):\n if d is not None:\n check_index(i, d)\n idx = tuple(map(sanitize_index, idx))\n idx = tuple(map(normalize_slice, idx, none_shape))\n idx = posify_index(none_shape, idx)\n return idx", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_check_index_check_index.if_np_isnan_dimension_.None_5.raise_IndexError_msg_i": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_check_index_check_index.if_np_isnan_dimension_.None_5.raise_IndexError_msg_i", "embedding": null, "metadata": {"file_path": "dask/array/slicing.py", "file_name": "slicing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 866, "end_line": 933, "span_ids": ["check_index"], "tokens": 517}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def check_index(ind, dimension):\n \"\"\"Check validity of index for a given dimension\n\n Examples\n --------\n >>> check_index(3, 5)\n >>> check_index(5, 5)\n Traceback (most recent call last):\n ...\n IndexError: Index is not smaller than dimension 5 >= 5\n\n >>> check_index(6, 5)\n Traceback (most recent call last):\n ...\n IndexError: Index is not smaller than dimension 6 >= 5\n\n >>> check_index(-1, 5)\n >>> check_index(-6, 5)\n Traceback (most recent call last):\n ...\n IndexError: Negative index is not greater than negative dimension -6 <= -5\n\n >>> check_index([1, 2], 5)\n >>> check_index([6, 3], 5)\n Traceback (most recent call last):\n ...\n IndexError: Index out of bounds 5\n\n >>> check_index(slice(0, 3), 5)\n\n >>> check_index([True], 1)\n >>> check_index([True, True], 3)\n Traceback (most recent call last):\n ...\n IndexError: Boolean array length 2 doesn't equal dimension 3\n >>> check_index([True, True, True], 1)\n Traceback (most recent call last):\n ...\n IndexError: Boolean array length 3 doesn't equal dimension 1\n \"\"\"\n # unknown dimension, assumed to be in bounds\n if np.isnan(dimension):\n return\n elif isinstance(ind, (list, np.ndarray)):\n x = np.asanyarray(ind)\n if x.dtype == bool:\n if x.size != dimension:\n raise IndexError(\n \"Boolean array length %s doesn't equal dimension %s\"\n % (x.size, dimension)\n )\n elif (x >= dimension).any() or (x < -dimension).any():\n raise IndexError(\"Index out of bounds %s\" % dimension)\n elif isinstance(ind, slice):\n return\n elif is_dask_collection(ind):\n return\n elif ind is None:\n return\n\n elif ind >= dimension:\n raise IndexError(\n \"Index is not smaller than dimension %d >= %d\" % (ind, dimension)\n )\n\n elif ind < -dimension:\n msg = \"Negative index is not greater than negative dimension %d <= -%d\"\n raise IndexError(msg % (ind, dimension))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_slice_with_int_dask_array_slice_with_int_dask_array.return.x_tuple_out_index_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_slice_with_int_dask_array_slice_with_int_dask_array.return.x_tuple_out_index_", "embedding": null, "metadata": {"file_path": "dask/array/slicing.py", "file_name": "slicing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 936, "end_line": 986, "span_ids": ["slice_with_int_dask_array"], "tokens": 424}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def slice_with_int_dask_array(x, index):\n \"\"\"Slice x with at most one 1D dask arrays of ints.\n\n This is a helper function of :meth:`Array.__getitem__`.\n\n Parameters\n ----------\n x: Array\n index: tuple with as many elements as x.ndim, among which there are\n one or more Array's with dtype=int\n\n Returns\n -------\n tuple of (sliced x, new index)\n\n where the new index is the same as the input, but with slice(None)\n replaced to the original slicer where a 1D filter has been applied and\n one less element where a zero-dimensional filter has been applied.\n \"\"\"\n from .core import Array\n\n assert len(index) == x.ndim\n fancy_indexes = [\n isinstance(idx, (tuple, list))\n or (isinstance(idx, (np.ndarray, Array)) and idx.ndim > 0)\n for idx in index\n ]\n if sum(fancy_indexes) > 1:\n raise NotImplementedError(\"Don't yet support nd fancy indexing\")\n\n out_index = []\n dropped_axis_cnt = 0\n for in_axis, idx in enumerate(index):\n out_axis = in_axis - dropped_axis_cnt\n if isinstance(idx, Array) and idx.dtype.kind in \"iu\":\n if idx.ndim == 0:\n idx = idx[np.newaxis]\n x = slice_with_int_dask_array_on_axis(x, idx, out_axis)\n x = x[tuple(0 if i == out_axis else slice(None) for i in range(x.ndim))]\n dropped_axis_cnt += 1\n elif idx.ndim == 1:\n x = slice_with_int_dask_array_on_axis(x, idx, out_axis)\n out_index.append(slice(None))\n else:\n raise NotImplementedError(\n \"Slicing with dask.array of ints only permitted when \"\n \"the indexer has zero or one dimensions\"\n )\n else:\n out_index.append(idx)\n return x, tuple(out_index)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_slice_with_int_dask_array_on_axis_slice_with_int_dask_array_on_axis.return.y": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_slice_with_int_dask_array_on_axis_slice_with_int_dask_array_on_axis.return.y", "embedding": null, "metadata": {"file_path": "dask/array/slicing.py", "file_name": "slicing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 989, "end_line": 1050, "span_ids": ["slice_with_int_dask_array_on_axis"], "tokens": 486}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def slice_with_int_dask_array_on_axis(x, idx, axis):\n \"\"\"Slice a ND dask array with a 1D dask arrays of ints along the given\n axis.\n\n This is a helper function of :func:`slice_with_int_dask_array`.\n \"\"\"\n from .core import Array, blockwise, from_array\n from . import chunk\n\n assert 0 <= axis < x.ndim\n\n if np.isnan(x.chunks[axis]).any():\n raise NotImplementedError(\n \"Slicing an array with unknown chunks with \"\n \"a dask.array of ints is not supported\"\n )\n\n # Calculate the offset at which each chunk starts along axis\n # e.g. chunks=(..., (5, 3, 4), ...) -> offset=[0, 5, 8]\n offset = np.roll(np.cumsum(x.chunks[axis]), 1)\n offset[0] = 0\n offset = from_array(offset, chunks=1)\n # Tamper with the declared chunks of offset to make blockwise align it with\n # x[axis]\n offset = Array(offset.dask, offset.name, (x.chunks[axis],), offset.dtype)\n\n # Define axis labels for blockwise\n x_axes = tuple(range(x.ndim))\n idx_axes = (x.ndim,) # arbitrary index not already in x_axes\n offset_axes = (axis,)\n p_axes = x_axes[: axis + 1] + idx_axes + x_axes[axis + 1 :]\n y_axes = x_axes[:axis] + idx_axes + x_axes[axis + 1 :]\n\n # Calculate the cartesian product of every chunk of x vs every chunk of idx\n p = blockwise(\n chunk.slice_with_int_dask_array,\n p_axes,\n x,\n x_axes,\n idx,\n idx_axes,\n offset,\n offset_axes,\n x_size=x.shape[axis],\n axis=axis,\n dtype=x.dtype,\n )\n\n # Aggregate on the chunks of x along axis\n y = blockwise(\n chunk.slice_with_int_dask_array_aggregate,\n y_axes,\n idx,\n idx_axes,\n p,\n p_axes,\n concatenate=True,\n x_chunks=x.chunks[axis],\n axis=axis,\n dtype=x.dtype,\n )\n return y", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_slice_with_bool_dask_array_slice_with_bool_dask_array.return.out_tuple_out_index_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_slice_with_bool_dask_array_slice_with_bool_dask_array.return.out_tuple_out_index_", "embedding": null, "metadata": {"file_path": "dask/array/slicing.py", "file_name": "slicing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1053, "end_line": 1142, "span_ids": ["slice_with_bool_dask_array"], "tokens": 715}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def slice_with_bool_dask_array(x, index):\n \"\"\"Slice x with one or more dask arrays of bools\n\n This is a helper function of `Array.__getitem__`.\n\n Parameters\n ----------\n x: Array\n index: tuple with as many elements as x.ndim, among which there are\n one or more Array's with dtype=bool\n\n Returns\n -------\n tuple of (sliced x, new index)\n\n where the new index is the same as the input, but with slice(None)\n replaced to the original slicer when a filter has been applied.\n\n Note: The sliced x will have nan chunks on the sliced axes.\n \"\"\"\n from .core import Array, blockwise, elemwise\n\n out_index = [\n slice(None) if isinstance(ind, Array) and ind.dtype == bool else ind\n for ind in index\n ]\n\n if len(index) == 1 and index[0].ndim == x.ndim:\n if not np.isnan(x.shape).any() and not np.isnan(index[0].shape).any():\n x = x.ravel()\n index = tuple(i.ravel() for i in index)\n elif x.ndim > 1:\n warnings.warn(\n \"When slicing a Dask array of unknown chunks with a boolean mask \"\n \"Dask array, the output array may have a different ordering \"\n \"compared to the equivalent NumPy operation. This will raise an \"\n \"error in a future release of Dask.\",\n stacklevel=3,\n )\n y = elemwise(getitem, x, *index, dtype=x.dtype)\n name = \"getitem-\" + tokenize(x, index)\n dsk = {(name, i): k for i, k in enumerate(core.flatten(y.__dask_keys__()))}\n chunks = ((np.nan,) * y.npartitions,)\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[y])\n return Array(graph, name, chunks, x.dtype), out_index\n\n if any(\n isinstance(ind, Array) and ind.dtype == bool and ind.ndim != 1 for ind in index\n ):\n raise NotImplementedError(\n \"Slicing with dask.array of bools only permitted when \"\n \"the indexer has only one dimension or when \"\n \"it has the same dimension as the sliced \"\n \"array\"\n )\n indexes = [\n ind if isinstance(ind, Array) and ind.dtype == bool else slice(None)\n for ind in index\n ]\n\n arginds = []\n i = 0\n for ind in indexes:\n if isinstance(ind, Array) and ind.dtype == bool:\n new = (ind, tuple(range(i, i + ind.ndim)))\n i += x.ndim\n else:\n new = (slice(None), None)\n i += 1\n arginds.append(new)\n\n arginds = list(concat(arginds))\n\n out = blockwise(\n getitem_variadic,\n tuple(range(x.ndim)),\n x,\n tuple(range(x.ndim)),\n *arginds,\n dtype=x.dtype\n )\n\n chunks = []\n for ind, chunk in zip(index, out.chunks):\n if isinstance(ind, Array) and ind.dtype == bool:\n chunks.append((np.nan,) * len(chunk))\n else:\n chunks.append(chunk)\n out._chunks = tuple(chunks)\n return out, tuple(out_index)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_getitem_variadic_make_block_sorted_slices.return.index2_index3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_getitem_variadic_make_block_sorted_slices.return.index2_index3", "embedding": null, "metadata": {"file_path": "dask/array/slicing.py", "file_name": "slicing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1145, "end_line": 1200, "span_ids": ["make_block_sorted_slices", "getitem_variadic"], "tokens": 418}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def getitem_variadic(x, *index):\n return x[index]\n\n\ndef make_block_sorted_slices(index, chunks):\n \"\"\"Generate blockwise-sorted index pairs for shuffling an array.\n\n Parameters\n ----------\n index : ndarray\n An array of index positions.\n chunks : tuple\n Chunks from the original dask array\n\n Returns\n -------\n index2 : ndarray\n Same values as `index`, but each block has been sorted\n index3 : ndarray\n The location of the values of `index` in `index2`\n\n Examples\n --------\n >>> index = np.array([6, 0, 4, 2, 7, 1, 5, 3])\n >>> chunks = ((4, 4),)\n >>> a, b = make_block_sorted_slices(index, chunks)\n\n Notice that the first set of 4 items are sorted, and the\n second set of 4 items are sorted.\n\n >>> a\n array([0, 2, 4, 6, 1, 3, 5, 7])\n >>> b\n array([3, 0, 2, 1, 7, 4, 6, 5])\n \"\"\"\n from .core import slices_from_chunks\n\n slices = slices_from_chunks(chunks)\n\n if len(slices[0]) > 1:\n slices = [slice_[0] for slice_ in slices]\n\n offsets = np.roll(np.cumsum(chunks[0]), 1)\n offsets[0] = 0\n\n index2 = np.empty_like(index)\n index3 = np.empty_like(index)\n\n for slice_, offset in zip(slices, offsets):\n a = index[slice_]\n b = np.sort(a)\n c = offset + np.argsort(b.take(np.argsort(a)))\n index2[slice_] = b\n index3[slice_] = c\n\n return index2, index3", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_shuffle_slice_shuffle_slice.with_warnings_catch_warni.return.x_index2_rechunk_chunks2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_shuffle_slice_shuffle_slice.with_warnings_catch_warni.return.x_index2_rechunk_chunks2", "embedding": null, "metadata": {"file_path": "dask/array/slicing.py", "file_name": "slicing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1203, "end_line": 1225, "span_ids": ["shuffle_slice"], "tokens": 163}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def shuffle_slice(x, index):\n \"\"\"A relatively efficient way to shuffle `x` according to `index`.\n\n Parameters\n ----------\n x : Array\n index : ndarray\n This should be an ndarray the same length as `x` containing\n each index position in ``range(0, len(x))``.\n\n Returns\n -------\n Array\n \"\"\"\n from .core import PerformanceWarning\n\n chunks1 = chunks2 = x.chunks\n if x.ndim > 1:\n chunks1 = (chunks1[0],)\n index2, index3 = make_block_sorted_slices(index, chunks1)\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", PerformanceWarning)\n return x[index2].rechunk(chunks2)[index3]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py__HashIdWrapper__cumsum.if_initial_zero_.else_.return.tuple_accumulate_add_seq": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py__HashIdWrapper__cumsum.if_initial_zero_.else_.return.tuple_accumulate_add_seq", "embedding": null, "metadata": {"file_path": "dask/array/slicing.py", "file_name": "slicing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1228, "end_line": 1255, "span_ids": ["_HashIdWrapper.__eq__", "_HashIdWrapper", "_HashIdWrapper.__ne__", "_cumsum", "_HashIdWrapper.__hash__", "_HashIdWrapper.__init__"], "tokens": 183}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _HashIdWrapper(object):\n \"\"\"Hash and compare a wrapped object by identity instead of value\"\"\"\n\n def __init__(self, wrapped):\n self.wrapped = wrapped\n\n def __eq__(self, other):\n if not isinstance(other, _HashIdWrapper):\n return NotImplemented\n return self.wrapped is other.wrapped\n\n def __ne__(self, other):\n if not isinstance(other, _HashIdWrapper):\n return NotImplemented\n return self.wrapped is not other.wrapped\n\n def __hash__(self):\n return id(self.wrapped)\n\n\n@functools.lru_cache()\ndef _cumsum(seq, initial_zero):\n if isinstance(seq, _HashIdWrapper):\n seq = seq.wrapped\n if initial_zero:\n return tuple(accumulate(add, seq, 0))\n else:\n return tuple(accumulate(add, seq))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_cached_cumsum_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_cached_cumsum_", "embedding": null, "metadata": {"file_path": "dask/array/slicing.py", "file_name": "slicing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1258, "end_line": 1285, "span_ids": ["cached_cumsum"], "tokens": 213}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def cached_cumsum(seq, initial_zero=False):\n \"\"\"Compute :meth:`toolz.accumulate` with caching.\n\n Caching is by the identify of `seq` rather than the value. It is thus\n important that `seq` is a tuple of immutable objects, and this function\n is intended for use where `seq` is a value that will persist (generally\n block sizes).\n\n Parameters\n ----------\n seq : tuple\n Values to cumulatively sum.\n initial_zero : bool, optional\n If true, the return value is prefixed with a zero.\n\n Returns\n -------\n tuple\n \"\"\"\n if isinstance(seq, tuple):\n # Look up by identity first, to avoid a linear-time __hash__\n # if we've seen this tuple object before.\n result = _cumsum(_HashIdWrapper(seq), initial_zero)\n else:\n # Construct a temporary tuple, and look up by value.\n result = _cumsum(tuple(seq), initial_zero)\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_____all__._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_____all__._", "embedding": null, "metadata": {"file_path": "dask/array/stats.py", "file_name": "stats.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 69, "span_ids": ["docstring"], "tokens": 281}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\"\nStatistical functions and tests, following scipy.stats.\n\nSome differences\n\n- We don't handle missing values at all\n\n\"\"\"\n# This is lightly adapted from scipy.stats 0.19\n# https://github.com/scipy/scipy/blob/v0.19.0/scipy/stats/stats.py\nimport math\n\nimport numpy as np\nimport dask.array as da\nfrom dask.array.ufunc import wrap_elemwise\nfrom dask.utils import derived_from\nfrom dask import delayed\n\ntry:\n import scipy.stats\nexcept ImportError as e:\n raise ImportError(\"`dask.array.stats` requires `scipy` to be installed.\") from e\nfrom scipy.stats import distributions\nfrom scipy import special\nfrom scipy.stats.stats import (\n Ttest_indResult,\n Ttest_1sampResult,\n Ttest_relResult,\n Power_divergenceResult,\n NormaltestResult,\n SkewtestResult,\n KurtosistestResult,\n F_onewayResult,\n)\n\n\n__all__ = [\n \"ttest_ind\",\n \"ttest_1samp\",\n \"ttest_rel\",\n \"chisquare\",\n \"power_divergence\",\n \"skew\",\n \"skewtest\",\n \"kurtosis\",\n \"kurtosistest\",\n \"normaltest\",\n \"f_oneway\",\n \"moment\",\n]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_None_21_ttest_ind.return.delayed_Ttest_indResult_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_None_21_ttest_ind.return.delayed_Ttest_indResult_", "embedding": null, "metadata": {"file_path": "dask/array/stats.py", "file_name": "stats.py", "file_type": "text/x-python", "category": "implementation", "start_line": 71, "end_line": 90, "span_ids": ["ttest_ind", "docstring"], "tokens": 193}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# -----------------\n# Statistical Tests\n# -----------------\n\n\n@derived_from(scipy.stats)\ndef ttest_ind(a, b, axis=0, equal_var=True):\n v1 = da.var(a, axis, ddof=1) # XXX: np -> da\n v2 = da.var(b, axis, ddof=1) # XXX: np -> da\n n1 = a.shape[axis]\n n2 = b.shape[axis]\n\n if equal_var:\n df, denom = _equal_var_ttest_denom(v1, n1, v2, n2)\n else:\n df, denom = _unequal_var_ttest_denom(v1, n1, v2, n2)\n\n res = _ttest_ind_from_stats(da.mean(a, axis), da.mean(b, axis), denom, df)\n\n return delayed(Ttest_indResult, nout=2)(*res)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_ttest_1samp_ttest_1samp.return.delayed_Ttest_1sampResult": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_ttest_1samp_ttest_1samp.return.delayed_Ttest_1sampResult", "embedding": null, "metadata": {"file_path": "dask/array/stats.py", "file_name": "stats.py", "file_type": "text/x-python", "category": "implementation", "start_line": 93, "end_line": 109, "span_ids": ["ttest_1samp"], "tokens": 166}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(scipy.stats)\ndef ttest_1samp(a, popmean, axis=0, nan_policy=\"propagate\"):\n if nan_policy != \"propagate\":\n raise NotImplementedError(\n \"`nan_policy` other than 'propagate' have not been implemented.\"\n )\n n = a.shape[axis]\n df = n - 1\n\n d = da.mean(a, axis) - popmean\n v = da.var(a, axis, ddof=1)\n denom = da.sqrt(v / float(n))\n\n with np.errstate(divide=\"ignore\", invalid=\"ignore\"):\n t = da.divide(d, denom)\n t, prob = _ttest_finish(df, t)\n return delayed(Ttest_1sampResult, nout=2)(t, prob)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_ttest_rel_chisquare.return.power_divergence_f_obs_f": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_ttest_rel_chisquare.return.power_divergence_f_obs_f", "embedding": null, "metadata": {"file_path": "dask/array/stats.py", "file_name": "stats.py", "file_type": "text/x-python", "category": "implementation", "start_line": 112, "end_line": 136, "span_ids": ["chisquare", "ttest_rel"], "tokens": 228}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(scipy.stats)\ndef ttest_rel(a, b, axis=0, nan_policy=\"propagate\"):\n if nan_policy != \"propagate\":\n raise NotImplementedError(\n \"`nan_policy` other than 'propagate' have not been implemented.\"\n )\n\n n = a.shape[axis]\n df = float(n - 1)\n\n d = (a - b).astype(np.float64)\n v = da.var(d, axis, ddof=1)\n dm = da.mean(d, axis)\n denom = da.sqrt(v / float(n))\n\n with np.errstate(divide=\"ignore\", invalid=\"ignore\"):\n t = da.divide(dm, denom)\n t, prob = _ttest_finish(df, t)\n\n return delayed(Ttest_relResult, nout=2)(t, prob)\n\n\n@derived_from(scipy.stats)\ndef chisquare(f_obs, f_exp=None, ddof=0, axis=0):\n return power_divergence(f_obs, f_exp=f_exp, ddof=ddof, axis=axis, lambda_=\"pearson\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_power_divergence_power_divergence.return.delayed_Power_divergenceR": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_power_divergence_power_divergence.return.delayed_Power_divergenceR", "embedding": null, "metadata": {"file_path": "dask/array/stats.py", "file_name": "stats.py", "file_type": "text/x-python", "category": "implementation", "start_line": 139, "end_line": 183, "span_ids": ["power_divergence"], "tokens": 472}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(scipy.stats)\ndef power_divergence(f_obs, f_exp=None, ddof=0, axis=0, lambda_=None):\n\n if isinstance(lambda_, str):\n # TODO: public api\n if lambda_ not in scipy.stats.stats._power_div_lambda_names:\n names = repr(list(scipy.stats.stats._power_div_lambda_names.keys()))[1:-1]\n raise ValueError(\n \"invalid string for lambda_: {0!r}. Valid strings \"\n \"are {1}\".format(lambda_, names)\n )\n lambda_ = scipy.stats.stats._power_div_lambda_names[lambda_]\n elif lambda_ is None:\n lambda_ = 1\n\n if f_exp is not None:\n # f_exp = np.atleast_1d(np.asanyarray(f_exp))\n pass\n else:\n f_exp = f_obs.mean(axis=axis, keepdims=True)\n\n # `terms` is the array of terms that are summed along `axis` to create\n # the test statistic. We use some specialized code for a few special\n # cases of lambda_.\n if lambda_ == 1:\n # Pearson's chi-squared statistic\n terms = (f_obs - f_exp) ** 2 / f_exp\n elif lambda_ == 0:\n # Log-likelihood ratio (i.e. G-test)\n terms = 2.0 * _xlogy(f_obs, f_obs / f_exp)\n elif lambda_ == -1:\n # Modified log-likelihood ratio\n terms = 2.0 * _xlogy(f_exp, f_exp / f_obs)\n else:\n # General Cressie-Read power divergence.\n terms = f_obs * ((f_obs / f_exp) ** lambda_ - 1)\n terms /= 0.5 * lambda_ * (lambda_ + 1)\n\n stat = terms.sum(axis=axis)\n\n num_obs = _count(terms, axis=axis)\n # ddof = asarray(ddof)\n p = delayed(distributions.chi2.sf)(stat, num_obs - 1 - ddof)\n\n return delayed(Power_divergenceResult, nout=2)(stat, p)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_skew_skew.return.vals": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_skew_skew.return.vals", "embedding": null, "metadata": {"file_path": "dask/array/stats.py", "file_name": "stats.py", "file_type": "text/x-python", "category": "implementation", "start_line": 186, "end_line": 210, "span_ids": ["skew"], "tokens": 219}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(scipy.stats)\ndef skew(a, axis=0, bias=True, nan_policy=\"propagate\"):\n if nan_policy != \"propagate\":\n raise NotImplementedError(\n \"`nan_policy` other than 'propagate' have not been implemented.\"\n )\n\n n = a.shape[axis] # noqa; for bias\n m2 = moment(a, 2, axis)\n m3 = moment(a, 3, axis)\n zero = m2 == 0\n vals = da.where(~zero, m3 / m2 ** 1.5, 0.0)\n # vals = da.where(~zero, (m2, m3),\n # lambda m2, m3: m3 / m2**1.5,\n # 0.)\n if not bias:\n # Need a version of np.place\n raise NotImplementedError(\"bias=False is not implemented.\")\n\n if vals.ndim == 0:\n return vals\n # TODO: scalar\n # return vals.item()\n\n return vals", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_skewtest_skewtest.return.delayed_SkewtestResult_n": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_skewtest_skewtest.return.delayed_SkewtestResult_n", "embedding": null, "metadata": {"file_path": "dask/array/stats.py", "file_name": "stats.py", "file_type": "text/x-python", "category": "implementation", "start_line": 213, "end_line": 241, "span_ids": ["skewtest"], "tokens": 340}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(scipy.stats)\ndef skewtest(a, axis=0, nan_policy=\"propagate\"):\n if nan_policy != \"propagate\":\n raise NotImplementedError(\n \"`nan_policy` other than 'propagate' have not been implemented.\"\n )\n\n b2 = skew(a, axis)\n n = float(a.shape[axis])\n if n < 8:\n raise ValueError(\n \"skewtest is not valid with less than 8 samples; %i samples\"\n \" were given.\" % int(n)\n )\n y = b2 * math.sqrt(((n + 1) * (n + 3)) / (6.0 * (n - 2)))\n beta2 = (\n 3.0\n * (n ** 2 + 27 * n - 70)\n * (n + 1)\n * (n + 3)\n / ((n - 2.0) * (n + 5) * (n + 7) * (n + 9))\n )\n W2 = -1 + math.sqrt(2 * (beta2 - 1))\n delta = 1 / math.sqrt(0.5 * math.log(W2))\n alpha = math.sqrt(2.0 / (W2 - 1))\n y = np.where(y == 0, 1, y)\n Z = delta * np.log(y / alpha + np.sqrt((y / alpha) ** 2 + 1))\n\n return delayed(SkewtestResult, nout=2)(Z, 2 * distributions.norm.sf(np.abs(Z)))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_kurtosis_kurtosis.if_fisher_.else_._TODO_scalar_vals_va": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_kurtosis_kurtosis.if_fisher_.else_._TODO_scalar_vals_va", "embedding": null, "metadata": {"file_path": "dask/array/stats.py", "file_name": "stats.py", "file_type": "text/x-python", "category": "implementation", "start_line": 244, "end_line": 268, "span_ids": ["kurtosis"], "tokens": 213}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(scipy.stats)\ndef kurtosis(a, axis=0, fisher=True, bias=True, nan_policy=\"propagate\"):\n if nan_policy != \"propagate\":\n raise NotImplementedError(\n \"`nan_policy` other than 'propagate' have not been implemented.\"\n )\n n = a.shape[axis] # noqa; for bias\n m2 = moment(a, 2, axis)\n m4 = moment(a, 4, axis)\n zero = m2 == 0\n olderr = np.seterr(all=\"ignore\")\n try:\n vals = da.where(zero, 0, m4 / m2 ** 2.0)\n finally:\n np.seterr(**olderr)\n\n if not bias:\n # need a version of np.place\n raise NotImplementedError(\"bias=False is not implemented.\")\n\n if fisher:\n return vals - 3\n else:\n return vals\n # TODO: scalar; vals = vals.item() # array scalar", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_kurtosistest_kurtosistest.return.delayed_KurtosistestResul": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_kurtosistest_kurtosistest.return.delayed_KurtosistestResul", "embedding": null, "metadata": {"file_path": "dask/array/stats.py", "file_name": "stats.py", "file_type": "text/x-python", "category": "implementation", "start_line": 271, "end_line": 305, "span_ids": ["kurtosistest"], "tokens": 547}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(scipy.stats)\ndef kurtosistest(a, axis=0, nan_policy=\"propagate\"):\n if nan_policy != \"propagate\":\n raise NotImplementedError(\n \"`nan_policy` other than 'propagate' have not been implemented.\"\n )\n\n n = float(a.shape[axis])\n b2 = kurtosis(a, axis, fisher=False)\n\n E = 3.0 * (n - 1) / (n + 1)\n varb2 = (\n 24.0 * n * (n - 2) * (n - 3) / ((n + 1) * (n + 1.0) * (n + 3) * (n + 5))\n ) # [1]_ Eq. 1\n x = (b2 - E) / np.sqrt(varb2) # [1]_ Eq. 4\n # [1]_ Eq. 2:\n sqrtbeta1 = (\n 6.0\n * (n * n - 5 * n + 2)\n / ((n + 7) * (n + 9))\n * np.sqrt((6.0 * (n + 3) * (n + 5)) / (n * (n - 2) * (n - 3)))\n )\n # [1]_ Eq. 3:\n A = 6.0 + 8.0 / sqrtbeta1 * (2.0 / sqrtbeta1 + np.sqrt(1 + 4.0 / (sqrtbeta1 ** 2)))\n term1 = 1 - 2 / (9.0 * A)\n denom = 1 + x * np.sqrt(2 / (A - 4.0))\n denom = np.where(denom < 0, 99, denom)\n term2 = np.where(denom < 0, term1, np.power((1 - 2.0 / A) / denom, 1 / 3.0))\n Z = (term1 - term2) / np.sqrt(2 / (9.0 * A)) # [1]_ Eq. 5\n Z = np.where(denom == 99, 0, Z)\n if Z.ndim == 0:\n Z = Z[()]\n\n # zprob uses upper tail, so Z needs to be positive\n return delayed(KurtosistestResult, nout=2)(Z, 2 * distributions.norm.sf(np.abs(Z)))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_normaltest_normaltest.return.delayed_NormaltestResult_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_normaltest_normaltest.return.delayed_NormaltestResult_", "embedding": null, "metadata": {"file_path": "dask/array/stats.py", "file_name": "stats.py", "file_type": "text/x-python", "category": "implementation", "start_line": 308, "end_line": 318, "span_ids": ["normaltest"], "tokens": 120}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(scipy.stats)\ndef normaltest(a, axis=0, nan_policy=\"propagate\"):\n if nan_policy != \"propagate\":\n raise NotImplementedError(\n \"`nan_policy` other than 'propagate' have not been implemented.\"\n )\n\n s, _ = skewtest(a, axis)\n k, _ = kurtosistest(a, axis)\n k2 = s * s + k * k\n return delayed(NormaltestResult, nout=2)(k2, delayed(distributions.chi2.sf)(k2, 2))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_f_oneway_f_oneway.return.delayed_F_onewayResult_n": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_f_oneway_f_oneway.return.delayed_F_onewayResult_n", "embedding": null, "metadata": {"file_path": "dask/array/stats.py", "file_name": "stats.py", "file_type": "text/x-python", "category": "implementation", "start_line": 321, "end_line": 353, "span_ids": ["f_oneway"], "tokens": 351}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(scipy.stats)\ndef f_oneway(*args):\n # args = [np.asarray(arg, dtype=float) for arg in args]\n # ANOVA on N groups, each in its own array\n num_groups = len(args)\n alldata = da.concatenate(args)\n bign = len(alldata)\n\n # Determine the mean of the data, and subtract that from all inputs to a\n # variance (via sum_of_sq / sq_of_sum) calculation. Variance is invariance\n # to a shift in location, and centering all data around zero vastly\n # improves numerical stability.\n offset = alldata.mean()\n alldata -= offset\n\n sstot = _sum_of_squares(alldata) - (_square_of_sums(alldata) / float(bign))\n ssbn = 0\n for a in args:\n ssbn += _square_of_sums(a - offset) / float(len(a))\n\n # Naming: variables ending in bn/b are for \"between treatments\", wn/w are\n # for \"within treatments\"\n ssbn -= _square_of_sums(alldata) / float(bign)\n sswn = sstot - ssbn\n dfbn = num_groups - 1\n dfwn = bign - num_groups\n msb = ssbn / float(dfbn)\n msw = sswn / float(dfwn)\n f = msb / msw\n\n prob = _fdtrc(dfbn, dfwn, f) # equivalent to stats.f.sf\n\n return delayed(F_onewayResult, nout=2)(f, prob)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_moment__equal_var_ttest_denom.return.df_denom": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_moment__equal_var_ttest_denom.return.df_denom", "embedding": null, "metadata": {"file_path": "dask/array/stats.py", "file_name": "stats.py", "file_type": "text/x-python", "category": "implementation", "start_line": 356, "end_line": 378, "span_ids": ["moment", "_equal_var_ttest_denom", "impl:6"], "tokens": 224}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(scipy.stats)\ndef moment(a, moment=1, axis=0, nan_policy=\"propagate\"):\n if nan_policy != \"propagate\":\n raise NotImplementedError(\n \"`nan_policy` other than 'propagate' have not been implemented.\"\n )\n return da.moment(a, moment, axis=axis)\n\n\n# -------\n# Helpers\n# -------\n# Don't really want to do all of scipy.special (or do we?)\n\n_xlogy = wrap_elemwise(special.xlogy, source=special)\n_fdtrc = wrap_elemwise(special.fdtrc, source=special)\n\n\ndef _equal_var_ttest_denom(v1, n1, v2, n2):\n df = n1 + n2 - 2.0\n svar = ((n1 - 1) * v1 + (n2 - 1) * v2) / df\n denom = da.sqrt(svar * (1.0 / n1 + 1.0 / n2)) # XXX: np -> da\n return df, denom", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py__unequal_var_ttest_denom__unequal_var_ttest_denom.return.df_denom": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py__unequal_var_ttest_denom__unequal_var_ttest_denom.return.df_denom", "embedding": null, "metadata": {"file_path": "dask/array/stats.py", "file_name": "stats.py", "file_type": "text/x-python", "category": "implementation", "start_line": 381, "end_line": 391, "span_ids": ["_unequal_var_ttest_denom"], "tokens": 180}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _unequal_var_ttest_denom(v1, n1, v2, n2):\n vn1 = v1 / n1\n vn2 = v2 / n2\n with np.errstate(divide=\"ignore\", invalid=\"ignore\"):\n df = (vn1 + vn2) ** 2 / (vn1 ** 2 / (n1 - 1) + vn2 ** 2 / (n2 - 1))\n\n # If df is undefined, variances are zero (assumes n1 > 0 & n2 > 0).\n # Hence it doesn't matter what df is as long as it's not NaN.\n df = da.where(da.isnan(df), 1, df) # XXX: np -> da\n denom = da.sqrt(vn1 + vn2)\n return df, denom", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py__ttest_ind_from_stats__count.if_axis_is_None_.else_.return.x_shape_axis_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py__ttest_ind_from_stats__count.if_axis_is_None_.else_.return.x_shape_axis_", "embedding": null, "metadata": {"file_path": "dask/array/stats.py", "file_name": "stats.py", "file_type": "text/x-python", "category": "implementation", "start_line": 394, "end_line": 421, "span_ids": ["_ttest_finish", "_count", "_ttest_ind_from_stats"], "tokens": 191}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _ttest_ind_from_stats(mean1, mean2, denom, df):\n\n d = mean1 - mean2\n with np.errstate(divide=\"ignore\", invalid=\"ignore\"):\n t = da.divide(d, denom)\n t, prob = _ttest_finish(df, t)\n\n return (t, prob)\n\n\ndef _ttest_finish(df, t):\n \"\"\"Common code between all 3 t-test functions.\"\"\"\n # XXX: np.abs -> da.absolute\n # XXX: delayed(distributions.t.sf)\n prob = (\n delayed(distributions.t.sf)(da.absolute(t), df) * 2\n ) # use np.abs to get upper tail\n if t.ndim == 0:\n t = t[()]\n\n return t, prob\n\n\ndef _count(x, axis=None):\n if axis is None:\n return x.size\n else:\n return x.shape[axis]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py__sum_of_squares__sum_of_squares.return.da_sum_a_a_axis_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py__sum_of_squares__sum_of_squares.return.da_sum_a_a_axis_", "embedding": null, "metadata": {"file_path": "dask/array/stats.py", "file_name": "stats.py", "file_type": "text/x-python", "category": "implementation", "start_line": 424, "end_line": 443, "span_ids": ["_sum_of_squares"], "tokens": 155}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _sum_of_squares(a, axis=0):\n \"\"\"\n Squares each element of the input array, and returns the sum(s) of that.\n Parameters\n ----------\n a : array_like\n Input array.\n axis : int or None, optional\n Axis along which to calculate. Default is 0. If None, compute over\n the whole array `a`.\n Returns\n -------\n sum_of_squares : ndarray\n The sum along the given axis for (a**2).\n See also\n --------\n _square_of_sums : The square(s) of the sum(s) (the opposite of\n `_sum_of_squares`).\n \"\"\"\n return da.sum(a * a, axis)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py__square_of_sums_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py__square_of_sums_", "embedding": null, "metadata": {"file_path": "dask/array/stats.py", "file_name": "stats.py", "file_type": "text/x-python", "category": "implementation", "start_line": 446, "end_line": 466, "span_ids": ["_square_of_sums"], "tokens": 151}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _square_of_sums(a, axis=0):\n \"\"\"\n Sums elements of the input array, and returns the square(s) of that sum.\n Parameters\n ----------\n a : array_like\n Input array.\n axis : int or None, optional\n Axis along which to calculate. Default is 0. If None, compute over\n the whole array `a`.\n Returns\n -------\n square_of_sums : float or ndarray\n The square of the sum over `axis`.\n See also\n --------\n _sum_of_squares : The sum of squares (the opposite of `square_of_sums`).\n \"\"\"\n s = da.sum(a, axis)\n return s * s", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/svg.py_math_text_style._font_size_1_0rem_font_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/svg.py_math_text_style._font_size_1_0rem_font_", "embedding": null, "metadata": {"file_path": "dask/array/svg.py", "file_name": "svg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 40, "span_ids": ["imports", "svg", "impl"], "tokens": 293}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import math\nimport re\n\nimport numpy as np\n\n\ndef svg(chunks, size=200, **kwargs):\n \"\"\"Convert chunks from Dask Array into an SVG Image\n\n Parameters\n ----------\n chunks: tuple\n size: int\n Rough size of the image\n\n Returns\n -------\n text: An svg string depicting the array as a grid of chunks\n \"\"\"\n shape = tuple(map(sum, chunks))\n if np.isnan(shape).any(): # don't support unknown sizes\n raise NotImplementedError(\n \"Can't generate SVG with unknown chunk sizes.\\n\\n\"\n \" A possible solution is with x.compute_chunk_sizes()\"\n )\n if not all(shape):\n raise NotImplementedError(\"Can't generate SVG with 0-length dimensions\")\n if len(chunks) == 0:\n raise NotImplementedError(\"Can't generate SVG with 0 dimensions\")\n if len(chunks) == 1:\n return svg_1d(chunks, size=size, **kwargs)\n elif len(chunks) == 2:\n return svg_2d(chunks, size=size, **kwargs)\n elif len(chunks) == 3:\n return svg_3d(chunks, size=size, **kwargs)\n else:\n return svg_nd(chunks, size=size, **kwargs)\n\n\ntext_style = 'font-size=\"1.0rem\" font-weight=\"100\" text-anchor=\"middle\"'", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/svg.py_svg_2d_svg_2d.return.header_n_join_lines_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/svg.py_svg_2d_svg_2d.return.header_n_join_lines_", "embedding": null, "metadata": {"file_path": "dask/array/svg.py", "file_name": "svg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 43, "end_line": 72, "span_ids": ["svg_2d"], "tokens": 326}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def svg_2d(chunks, offset=(0, 0), skew=(0, 0), size=200, sizes=None):\n shape = tuple(map(sum, chunks))\n sizes = sizes or draw_sizes(shape, size=size)\n y, x = grid_points(chunks, sizes)\n\n lines, (min_x, max_x, min_y, max_y) = svg_grid(\n x, y, offset=offset, skew=skew, size=size\n )\n\n header = (\n '\\n'\n % (max_x + 50, max_y + 50)\n )\n footer = \"\\n\"\n\n if shape[0] >= 100:\n rotate = -90\n else:\n rotate = 0\n\n text = [\n \"\",\n \" \",\n ' %d'\n % (max_x / 2, max_y + 20, text_style, shape[1]),\n ' %d'\n % (max_x + 20, max_y / 2, text_style, rotate, max_x + 20, max_y / 2, shape[0]),\n ]\n\n return header + \"\\n\".join(lines + text) + footer", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/svg.py_svg_3d_svg_3d.return.header_n_join_xy_z": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/svg.py_svg_3d_svg_3d.return.header_n_join_xy_z", "embedding": null, "metadata": {"file_path": "dask/array/svg.py", "file_name": "svg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 75, "end_line": 129, "span_ids": ["svg_3d"], "tokens": 581}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def svg_3d(chunks, size=200, sizes=None, offset=(0, 0)):\n shape = tuple(map(sum, chunks))\n sizes = sizes or draw_sizes(shape, size=size)\n x, y, z = grid_points(chunks, sizes)\n ox, oy = offset\n\n xy, (mnx, mxx, mny, mxy) = svg_grid(\n x / 1.7, y, offset=(ox + 10, oy + 0), skew=(1, 0), size=size\n )\n\n zx, (_, _, _, max_x) = svg_grid(\n z, x / 1.7, offset=(ox + 10, oy + 0), skew=(0, 1), size=size\n )\n zy, (min_z, max_z, min_y, max_y) = svg_grid(\n z, y, offset=(ox + max_x + 10, oy + max_x), skew=(0, 0), size=size\n )\n\n header = (\n '\\n'\n % (max_z + 50, max_y + 50)\n )\n footer = \"\\n\"\n\n if shape[1] >= 100:\n rotate = -90\n else:\n rotate = 0\n\n text = [\n \"\",\n \" \",\n ' %d'\n % ((min_z + max_z) / 2, max_y + 20, text_style, shape[2]),\n ' %d'\n % (\n max_z + 20,\n (min_y + max_y) / 2,\n text_style,\n rotate,\n max_z + 20,\n (min_y + max_y) / 2,\n shape[1],\n ),\n ' %d'\n % (\n (mnx + mxx) / 2 - 10,\n mxy - (mxx - mnx) / 2 + 20,\n text_style,\n (mnx + mxx) / 2 - 10,\n mxy - (mxx - mnx) / 2 + 20,\n shape[0],\n ),\n ]\n\n return header + \"\\n\".join(xy + zx + zy + text) + footer", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/svg.py_svg_nd_svg_nd.return.header_n_n_join_out_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/svg.py_svg_nd_svg_nd.return.header_n_n_join_out_", "embedding": null, "metadata": {"file_path": "dask/array/svg.py", "file_name": "svg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 128, "end_line": 160, "span_ids": ["svg_nd"], "tokens": 314}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def svg_nd(chunks, size=200):\n if len(chunks) % 3 == 1:\n chunks = ((1,),) + chunks\n shape = tuple(map(sum, chunks))\n sizes = draw_sizes(shape, size=size)\n\n chunks2 = chunks\n sizes2 = sizes\n out = []\n left = 0\n total_height = 0\n while chunks2:\n n = len(chunks2) % 3 or 3\n o = svg(chunks2[:n], sizes=sizes2[:n], offset=(left, 0))\n chunks2 = chunks2[n:]\n sizes2 = sizes2[n:]\n\n lines = o.split(\"\\n\")\n header = lines[0]\n height = float(re.search(r'height=\"(\\d*\\.?\\d*)\"', header).groups()[0])\n total_height = max(total_height, height)\n width = float(re.search(r'width=\"(\\d*\\.?\\d*)\"', header).groups()[0])\n left += width + 10\n o = \"\\n\".join(lines[1:-1]) # remove header and footer\n\n out.append(o)\n\n header = (\n '\\n'\n % (left, total_height)\n )\n footer = \"\\n\"\n return header + \"\\n\\n\".join(out) + footer", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/svg.py_svg_lines_svg_lines.return.lines": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/svg.py_svg_lines_svg_lines.return.lines", "embedding": null, "metadata": {"file_path": "dask/array/svg.py", "file_name": "svg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 167, "end_line": 190, "span_ids": ["svg_lines"], "tokens": 298}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def svg_lines(x1, y1, x2, y2, max_n=20):\n \"\"\"Convert points into lines of text for an SVG plot\n\n Examples\n --------\n >>> svg_lines([0, 1], [0, 0], [10, 11], [1, 1]) # doctest: +NORMALIZE_WHITESPACE\n [' ',\n ' ']\n \"\"\"\n n = len(x1)\n\n if n > max_n:\n indices = np.linspace(0, n - 1, max_n, dtype=\"int\")\n else:\n indices = range(n)\n\n lines = [\n ' ' % (x1[i], y1[i], x2[i], y2[i])\n for i in indices\n ]\n\n lines[0] = lines[0].replace(\" /\", ' style=\"stroke-width:2\" /')\n lines[-1] = lines[-1].replace(\" /\", ' style=\"stroke-width:2\" /')\n return lines", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/svg.py_svg_grid_svg_grid.return.h_lines_v_lines_rect_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/svg.py_svg_grid_svg_grid.return.h_lines_v_lines_rect_", "embedding": null, "metadata": {"file_path": "dask/array/svg.py", "file_name": "svg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 193, "end_line": 246, "span_ids": ["svg_grid"], "tokens": 554}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def svg_grid(x, y, offset=(0, 0), skew=(0, 0), size=200):\n \"\"\"Create lines of SVG text that show a grid\n\n Parameters\n ----------\n x: numpy.ndarray\n y: numpy.ndarray\n offset: tuple\n translational displacement of the grid in SVG coordinates\n skew: tuple\n \"\"\"\n # Horizontal lines\n x1 = np.zeros_like(y) + offset[0]\n y1 = y + offset[1]\n x2 = np.full_like(y, x[-1]) + offset[0]\n y2 = y + offset[1]\n\n if skew[0]:\n y2 += x.max() * skew[0]\n if skew[1]:\n x1 += skew[1] * y\n x2 += skew[1] * y\n\n min_x = min(x1.min(), x2.min())\n min_y = min(y1.min(), y2.min())\n max_x = max(x1.max(), x2.max())\n max_y = max(y1.max(), y2.max())\n max_n = size // 6\n\n h_lines = [\"\", \" \"] + svg_lines(x1, y1, x2, y2, max_n)\n\n # Vertical lines\n x1 = x + offset[0]\n y1 = np.zeros_like(x) + offset[1]\n x2 = x + offset[0]\n y2 = np.full_like(x, y[-1]) + offset[1]\n\n if skew[0]:\n y1 += skew[0] * x\n y2 += skew[0] * x\n if skew[1]:\n x2 += skew[1] * y.max()\n\n v_lines = [\"\", \" \"] + svg_lines(x1, y1, x2, y2, max_n)\n\n color = \"ECB172\" if len(x) < max_n and len(y) < max_n else \"8B4903\"\n corners = f\"{x1[0]},{y1[0]} {x1[-1]},{y1[-1]} {x2[-1]},{y2[-1]} {x2[0]},{y2[0]}\"\n rect = [\n \"\",\n \" \",\n f' ',\n ]\n\n return h_lines + v_lines + rect, (min_x, max_x, min_y, max_y)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/svg.py_svg_1d_draw_sizes.return.tuple_size_r_for_r_in_r": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/svg.py_svg_1d_draw_sizes.return.tuple_size_r_for_r_in_r", "embedding": null, "metadata": {"file_path": "dask/array/svg.py", "file_name": "svg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 237, "end_line": 252, "span_ids": ["grid_points", "svg_1d", "draw_sizes"], "tokens": 153}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def svg_1d(chunks, sizes=None, **kwargs):\n return svg_2d(((1,),) + chunks, **kwargs)\n\n\ndef grid_points(chunks, sizes):\n cumchunks = [np.cumsum((0,) + c) for c in chunks]\n points = [x * size / x[-1] for x, size in zip(cumchunks, sizes)]\n return points\n\n\ndef draw_sizes(shape, size=200):\n \"\"\" Get size in pixels for all dimensions \"\"\"\n mx = max(shape)\n ratios = [mx / max(0.1, d) for d in shape]\n ratios = [ratio_response(r) for r in ratios]\n return tuple(size / r for r in ratios)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/svg.py_ratio_response_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/svg.py_ratio_response_", "embedding": null, "metadata": {"file_path": "dask/array/svg.py", "file_name": "svg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 255, "end_line": 270, "span_ids": ["ratio_response"], "tokens": 126}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def ratio_response(x):\n \"\"\"How we display actual size ratios\n\n Common ratios in sizes span several orders of magnitude,\n which is hard for us to perceive.\n\n We keep ratios in the 1-3 range accurate, and then apply a logarithm to\n values up until about 100 or so, at which point we stop scaling.\n \"\"\"\n if x < math.e:\n return x\n elif x <= 100:\n return math.log(x + 12.4) # f(e) == e\n else:\n return math.log(100 + 12.4)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_copy_from_numpy_import_nancums": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_copy_from_numpy_import_nancums", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 56, "span_ids": ["imports"], "tokens": 286}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import copy\n\nimport pytest\n\nnp = pytest.importorskip(\"numpy\")\n\nimport os\nimport time\nfrom io import StringIO\nfrom distutils.version import LooseVersion\nimport operator\nfrom operator import add, sub, getitem\nfrom threading import Lock\nimport warnings\n\nfrom tlz import merge, countby, concat\nfrom tlz.curried import identity\n\nimport dask\nimport dask.array as da\nimport dask.dataframe\nfrom dask.base import tokenize, compute_as_if_collection\nfrom dask.delayed import Delayed, delayed\nfrom dask.utils import ignoring, tmpfile, tmpdir, key_split, apply\nfrom dask.utils_test import inc, dec\n\nfrom dask.array.core import (\n getem,\n getter,\n dotmany,\n concatenate3,\n Array,\n stack,\n concatenate,\n from_array,\n broadcast_shapes,\n broadcast_to,\n blockdims_from_blockshape,\n store,\n optimize,\n from_func,\n normalize_chunks,\n broadcast_chunks,\n from_delayed,\n common_blockdim,\n concatenate_axes,\n)\nfrom dask.blockwise import (\n make_blockwise_graph as top,\n broadcast_dimensions,\n optimize_blockwise,\n)\nfrom dask.array.utils import assert_eq, same_keys\nfrom dask.array.numpy_compat import _numpy_120\n\nfrom numpy import nancumsum, nancumprod", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_getem_test_getem.assert_getem_X_2_3_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_getem_test_getem.assert_getem_X_2_3_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 59, "end_line": 66, "span_ids": ["test_getem"], "tokens": 157}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_getem():\n sol = {\n (\"X\", 0, 0): (getter, \"X\", (slice(0, 2), slice(0, 3))),\n (\"X\", 1, 0): (getter, \"X\", (slice(2, 4), slice(0, 3))),\n (\"X\", 1, 1): (getter, \"X\", (slice(2, 4), slice(3, 6))),\n (\"X\", 0, 1): (getter, \"X\", (slice(0, 2), slice(3, 6))),\n }\n assert getem(\"X\", (2, 3), shape=(4, 6)) == sol", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_top_test_top.assert_top_identity_z_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_top_test_top.assert_top_identity_z_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 69, "end_line": 97, "span_ids": ["test_top"], "tokens": 632}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_top():\n assert top(inc, \"z\", \"ij\", \"x\", \"ij\", numblocks={\"x\": (2, 2)}) == {\n (\"z\", 0, 0): (inc, (\"x\", 0, 0)),\n (\"z\", 0, 1): (inc, (\"x\", 0, 1)),\n (\"z\", 1, 0): (inc, (\"x\", 1, 0)),\n (\"z\", 1, 1): (inc, (\"x\", 1, 1)),\n }\n\n assert top(\n add, \"z\", \"ij\", \"x\", \"ij\", \"y\", \"ij\", numblocks={\"x\": (2, 2), \"y\": (2, 2)}\n ) == {\n (\"z\", 0, 0): (add, (\"x\", 0, 0), (\"y\", 0, 0)),\n (\"z\", 0, 1): (add, (\"x\", 0, 1), (\"y\", 0, 1)),\n (\"z\", 1, 0): (add, (\"x\", 1, 0), (\"y\", 1, 0)),\n (\"z\", 1, 1): (add, (\"x\", 1, 1), (\"y\", 1, 1)),\n }\n\n assert top(\n dotmany, \"z\", \"ik\", \"x\", \"ij\", \"y\", \"jk\", numblocks={\"x\": (2, 2), \"y\": (2, 2)}\n ) == {\n (\"z\", 0, 0): (dotmany, [(\"x\", 0, 0), (\"x\", 0, 1)], [(\"y\", 0, 0), (\"y\", 1, 0)]),\n (\"z\", 0, 1): (dotmany, [(\"x\", 0, 0), (\"x\", 0, 1)], [(\"y\", 0, 1), (\"y\", 1, 1)]),\n (\"z\", 1, 0): (dotmany, [(\"x\", 1, 0), (\"x\", 1, 1)], [(\"y\", 0, 0), (\"y\", 1, 0)]),\n (\"z\", 1, 1): (dotmany, [(\"x\", 1, 0), (\"x\", 1, 1)], [(\"y\", 0, 1), (\"y\", 1, 1)]),\n }\n\n assert top(identity, \"z\", \"\", \"x\", \"ij\", numblocks={\"x\": (2, 2)}) == {\n (\"z\",): (identity, [[(\"x\", 0, 0), (\"x\", 0, 1)], [(\"x\", 1, 0), (\"x\", 1, 1)]])\n }", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_top_with_kwargs_test_top_supports_broadcasting_rules.assert_top_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_top_with_kwargs_test_top_supports_broadcasting_rules.assert_top_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 100, "end_line": 115, "span_ids": ["test_top_supports_broadcasting_rules", "test_top_with_kwargs"], "tokens": 274}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_top_with_kwargs():\n assert top(add, \"z\", \"i\", \"x\", \"i\", numblocks={\"x\": (2, 0)}, b=100) == {\n (\"z\", 0): (apply, add, [(\"x\", 0)], {\"b\": 100}),\n (\"z\", 1): (apply, add, [(\"x\", 1)], {\"b\": 100}),\n }\n\n\ndef test_top_supports_broadcasting_rules():\n assert top(\n add, \"z\", \"ij\", \"x\", \"ij\", \"y\", \"ij\", numblocks={\"x\": (1, 2), \"y\": (2, 1)}\n ) == {\n (\"z\", 0, 0): (add, (\"x\", 0, 0), (\"y\", 0, 0)),\n (\"z\", 0, 1): (add, (\"x\", 0, 1), (\"y\", 0, 0)),\n (\"z\", 1, 0): (add, (\"x\", 0, 0), (\"y\", 1, 0)),\n (\"z\", 1, 1): (add, (\"x\", 0, 1), (\"y\", 1, 0)),\n }", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_top_literals_test_top_literals.assert_top_add_z_ij_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_top_literals_test_top_literals.assert_top_add_z_ij_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 118, "end_line": 124, "span_ids": ["test_top_literals"], "tokens": 142}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_top_literals():\n assert top(add, \"z\", \"ij\", \"x\", \"ij\", 123, None, numblocks={\"x\": (2, 2)}) == {\n (\"z\", 0, 0): (add, (\"x\", 0, 0), 123),\n (\"z\", 0, 1): (add, (\"x\", 0, 1), 123),\n (\"z\", 1, 0): (add, (\"x\", 1, 0), 123),\n (\"z\", 1, 1): (add, (\"x\", 1, 1), 123),\n }", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_blockwise_literals_test_blockwise_literals.assert_eq_z_x_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_blockwise_literals_test_blockwise_literals.assert_eq_z_x_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 127, "end_line": 138, "span_ids": ["test_blockwise_literals"], "tokens": 150}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_blockwise_literals():\n x = da.ones((10, 10), chunks=(5, 5))\n z = da.blockwise(add, \"ij\", x, \"ij\", 100, None, dtype=x.dtype)\n assert_eq(z, x + 100)\n\n z = da.blockwise(\n lambda x, y, z: x * y + z, \"ij\", 2, None, x, \"ij\", 100, None, dtype=x.dtype\n )\n assert_eq(z, 2 * x + 100)\n\n z = da.blockwise(getitem, \"ij\", x, \"ij\", slice(None), None, dtype=x.dtype)\n assert_eq(z, x)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_blockwise_1_in_shape_I_test_blockwise_1_in_shape_I.da_blockwise_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_blockwise_1_in_shape_I_test_blockwise_1_in_shape_I.da_blockwise_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 141, "end_line": 155, "span_ids": ["test_blockwise_1_in_shape_I"], "tokens": 125}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_blockwise_1_in_shape_I():\n def test_f(a, b):\n assert 1 in b.shape\n\n p, k, N = 7, 2, 5\n da.blockwise(\n test_f,\n \"x\",\n da.zeros((2 * p, 9, k * N), chunks=(p, 3, k)),\n \"xzt\",\n da.zeros((2 * p, 9, 1), chunks=(p, 3, -1)),\n \"xzt\",\n concatenate=True,\n dtype=float,\n ).compute()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_blockwise_1_in_shape_II_test_blockwise_1_in_shape_II.da_blockwise_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_blockwise_1_in_shape_II_test_blockwise_1_in_shape_II.da_blockwise_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 158, "end_line": 172, "span_ids": ["test_blockwise_1_in_shape_II"], "tokens": 140}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_blockwise_1_in_shape_II():\n def test_f(a, b):\n assert 1 in b.shape\n\n p, k, N = 7, 2, 5\n da.blockwise(\n test_f,\n \"x\",\n da.zeros((2 * p, 9, k * N, 8), chunks=(p, 9, k, 4)),\n \"xztu\",\n da.zeros((2 * p, 9, 1, 8), chunks=(p, 9, -1, 4)),\n \"xztu\",\n concatenate=True,\n dtype=float,\n ).compute()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_blockwise_1_in_shape_III_test_concatenate3_on_scalars.assert_eq_concatenate3_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_blockwise_1_in_shape_III_test_concatenate3_on_scalars.assert_eq_concatenate3_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 175, "end_line": 193, "span_ids": ["test_concatenate3_on_scalars", "test_blockwise_1_in_shape_III"], "tokens": 148}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_blockwise_1_in_shape_III():\n def test_f(a, b):\n assert 1 in b.shape\n\n k, N = 2, 5\n da.blockwise(\n test_f,\n \"x\",\n da.zeros((k * N, 9, 8), chunks=(k, 3, 4)),\n \"xtu\",\n da.zeros((1, 9, 8), chunks=(-1, 3, 4)),\n \"xtu\",\n concatenate=True,\n dtype=float,\n ).compute()\n\n\ndef test_concatenate3_on_scalars():\n assert_eq(concatenate3([1, 2]), np.array([1, 2]))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_chunked_dot_product_test_chunked_dot_product.assert_eq_np_dot_x_o_c": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_chunked_dot_product_test_chunked_dot_product.assert_eq_np_dot_x_o_c", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 196, "end_line": 212, "span_ids": ["test_chunked_dot_product"], "tokens": 198}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_chunked_dot_product():\n x = np.arange(400).reshape((20, 20))\n o = np.ones((20, 20))\n\n d = {\"x\": x, \"o\": o}\n\n getx = getem(\"x\", (5, 5), shape=(20, 20))\n geto = getem(\"o\", (5, 5), shape=(20, 20))\n\n result = top(\n dotmany, \"out\", \"ik\", \"x\", \"ij\", \"o\", \"jk\", numblocks={\"x\": (4, 4), \"o\": (4, 4)}\n )\n\n dsk = merge(d, getx, geto, result)\n out = dask.get(dsk, [[(\"out\", i, j) for j in range(4)] for i in range(4)])\n\n assert_eq(np.dot(x, o), concatenate3(out))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_chunked_transpose_plus_one_test_chunked_transpose_plus_one.assert_eq_concatenate3_ou": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_chunked_transpose_plus_one_test_chunked_transpose_plus_one.assert_eq_concatenate3_ou", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 215, "end_line": 228, "span_ids": ["test_chunked_transpose_plus_one"], "tokens": 153}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_chunked_transpose_plus_one():\n x = np.arange(400).reshape((20, 20))\n\n d = {\"x\": x}\n\n getx = getem(\"x\", (5, 5), shape=(20, 20))\n\n f = lambda x: x.T + 1\n comp = top(f, \"out\", \"ij\", \"x\", \"ji\", numblocks={\"x\": (4, 4)})\n\n dsk = merge(d, getx, comp)\n out = dask.get(dsk, [[(\"out\", i, j) for j in range(4)] for i in range(4)])\n\n assert_eq(concatenate3(out), x.T + 1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_broadcast_dimensions_works_with_singleton_dimensions_test_broadcast_dimensions.assert_broadcast_dimensio": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_broadcast_dimensions_works_with_singleton_dimensions_test_broadcast_dimensions.assert_broadcast_dimensio", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 231, "end_line": 240, "span_ids": ["test_broadcast_dimensions", "test_broadcast_dimensions_works_with_singleton_dimensions"], "tokens": 119}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_broadcast_dimensions_works_with_singleton_dimensions():\n argpairs = [(\"x\", \"i\")]\n numblocks = {\"x\": ((1,),)}\n assert broadcast_dimensions(argpairs, numblocks) == {\"i\": (1,)}\n\n\ndef test_broadcast_dimensions():\n argpairs = [(\"x\", \"ij\"), (\"y\", \"ij\")]\n d = {\"x\": (\"Hello\", 1), \"y\": (1, (2, 3))}\n assert broadcast_dimensions(argpairs, d) == {\"i\": \"Hello\", \"j\": (2, 3)}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_Array_test_Array.with_pytest_raises_TypeEr.Array_dsk_name_chunks_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_Array_test_Array.with_pytest_raises_TypeEr.Array_dsk_name_chunks_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 243, "end_line": 263, "span_ids": ["test_Array"], "tokens": 205}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_Array():\n shape = (1000, 1000)\n chunks = (100, 100)\n name = \"x\"\n dsk = merge({name: \"some-array\"}, getem(name, chunks, shape=shape))\n a = Array(dsk, name, chunks, shape=shape, dtype=\"f8\")\n\n assert a.numblocks == (10, 10)\n\n assert a.__dask_keys__() == [[(\"x\", i, j) for j in range(10)] for i in range(10)]\n\n assert a.chunks == ((100,) * 10, (100,) * 10)\n\n assert a.shape == shape\n\n assert len(a) == shape[0]\n\n with pytest.raises(ValueError):\n Array(dsk, name, chunks, shape=shape)\n with pytest.raises(TypeError):\n Array(dsk, name, chunks, shape=shape, dtype=\"f8\", meta=np.empty(0, 0))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_uneven_chunks_test_numblocks_suppoorts_singleton_block_dims.assert_set_concat_a___das": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_uneven_chunks_test_numblocks_suppoorts_singleton_block_dims.assert_set_concat_a___das", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 266, "end_line": 278, "span_ids": ["test_numblocks_suppoorts_singleton_block_dims", "test_uneven_chunks"], "tokens": 172}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_uneven_chunks():\n a = Array({}, \"x\", chunks=(3, 3), shape=(10, 10), dtype=\"f8\")\n assert a.chunks == ((3, 3, 3, 1), (3, 3, 3, 1))\n\n\ndef test_numblocks_suppoorts_singleton_block_dims():\n shape = (100, 10)\n chunks = (10, 10)\n name = \"x\"\n dsk = merge({name: \"some-array\"}, getem(name, shape=shape, chunks=chunks))\n a = Array(dsk, name, chunks, shape=shape, dtype=\"f8\")\n\n assert set(concat(a.__dask_keys__())) == {(\"x\", i, 0) for i in range(10)}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_keys_test_keys.assert_d___dask_keys___": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_keys_test_keys.assert_d___dask_keys___", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 281, "end_line": 292, "span_ids": ["test_keys"], "tokens": 219}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_keys():\n dsk = dict(((\"x\", i, j), ()) for i in range(5) for j in range(6))\n dx = Array(dsk, \"x\", chunks=(10, 10), shape=(50, 60), dtype=\"f8\")\n assert dx.__dask_keys__() == [[(dx.name, i, j) for j in range(6)] for i in range(5)]\n # Cache works\n assert dx.__dask_keys__() is dx.__dask_keys__()\n # Test mutating names clears key cache\n dx.dask = {(\"y\", i, j): () for i in range(5) for j in range(6)}\n dx.name = \"y\"\n assert dx.__dask_keys__() == [[(dx.name, i, j) for j in range(6)] for i in range(5)]\n d = Array({}, \"x\", (), shape=(), dtype=\"f8\")\n assert d.__dask_keys__() == [(\"x\",)]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_Array_computation_test_Array_numpy_gufunc_call__array_ufunc__01.assert_eq_ny_vy_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_Array_computation_test_Array_numpy_gufunc_call__array_ufunc__01.assert_eq_ny_vy_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 295, "end_line": 312, "span_ids": ["test_Array_computation", "test_Array_numpy_gufunc_call__array_ufunc__01"], "tokens": 215}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_Array_computation():\n a = Array({(\"x\", 0, 0): np.eye(3)}, \"x\", shape=(3, 3), chunks=(3, 3), dtype=\"f8\")\n assert_eq(np.array(a), np.eye(3))\n assert isinstance(a.compute(), np.ndarray)\n assert float(a[0, 0]) == 1\n\n\n@pytest.mark.skipif(\n LooseVersion(np.__version__) < \"1.14.0\",\n reason=\"NumPy doesn't have `np.linalg._umath_linalg` yet\",\n)\ndef test_Array_numpy_gufunc_call__array_ufunc__01():\n x = da.random.normal(size=(3, 10, 10), chunks=(2, 10, 10))\n nx = x.compute()\n ny = np.linalg._umath_linalg.inv(nx)\n y = np.linalg._umath_linalg.inv(x, output_dtypes=float)\n vy = y.compute()\n assert_eq(ny, vy)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_Array_numpy_gufunc_call__array_ufunc__02_test_Array_numpy_gufunc_call__array_ufunc__02.assert_eq_nv_vv_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_Array_numpy_gufunc_call__array_ufunc__02_test_Array_numpy_gufunc_call__array_ufunc__02.assert_eq_nv_vv_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 315, "end_line": 327, "span_ids": ["test_Array_numpy_gufunc_call__array_ufunc__02"], "tokens": 156}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n LooseVersion(np.__version__) < \"1.14.0\",\n reason=\"NumPy doesn't have `np.linalg._umath_linalg` yet\",\n)\ndef test_Array_numpy_gufunc_call__array_ufunc__02():\n x = da.random.normal(size=(3, 10, 10), chunks=(2, 10, 10))\n nx = x.compute()\n nw, nv = np.linalg._umath_linalg.eig(nx)\n w, v = np.linalg._umath_linalg.eig(x, output_dtypes=(float, float))\n vw = w.compute()\n vv = v.compute()\n assert_eq(nw, vw)\n assert_eq(nv, vv)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_stack_test_stack.assert_stack_a_b_c_a": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_stack_test_stack.assert_stack_a_b_c_a", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 330, "end_line": 374, "span_ids": ["test_stack"], "tokens": 668}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_stack():\n a, b, c = [\n Array(\n getem(name, chunks=(2, 3), shape=(4, 6)),\n name,\n chunks=(2, 3),\n dtype=\"f8\",\n shape=(4, 6),\n )\n for name in \"ABC\"\n ]\n\n s = stack([a, b, c], axis=0)\n\n colon = slice(None, None, None)\n\n assert s.shape == (3, 4, 6)\n assert s.chunks == ((1, 1, 1), (2, 2), (3, 3))\n assert s.chunksize == (1, 2, 3)\n assert s.dask[(s.name, 0, 1, 0)] == (getitem, (\"A\", 1, 0), (None, colon, colon))\n assert s.dask[(s.name, 2, 1, 0)] == (getitem, (\"C\", 1, 0), (None, colon, colon))\n assert same_keys(s, stack([a, b, c], axis=0))\n\n s2 = stack([a, b, c], axis=1)\n assert s2.shape == (4, 3, 6)\n assert s2.chunks == ((2, 2), (1, 1, 1), (3, 3))\n assert s2.chunksize == (2, 1, 3)\n assert s2.dask[(s2.name, 0, 1, 0)] == (getitem, (\"B\", 0, 0), (colon, None, colon))\n assert s2.dask[(s2.name, 1, 1, 0)] == (getitem, (\"B\", 1, 0), (colon, None, colon))\n assert same_keys(s2, stack([a, b, c], axis=1))\n\n s2 = stack([a, b, c], axis=2)\n assert s2.shape == (4, 6, 3)\n assert s2.chunks == ((2, 2), (3, 3), (1, 1, 1))\n assert s2.chunksize == (2, 3, 1)\n assert s2.dask[(s2.name, 0, 1, 0)] == (getitem, (\"A\", 0, 1), (colon, colon, None))\n assert s2.dask[(s2.name, 1, 1, 2)] == (getitem, (\"C\", 1, 1), (colon, colon, None))\n assert same_keys(s2, stack([a, b, c], axis=2))\n\n pytest.raises(ValueError, lambda: stack([]))\n pytest.raises(ValueError, lambda: stack([a, b, c], axis=3))\n\n assert set(b.dask.keys()).issubset(s2.dask.keys())\n\n assert stack([a, b, c], axis=-1).chunks == stack([a, b, c], axis=2).chunks", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_stack_zero_size_test_stack_rechunk.assert_eq_z_np_stack_x_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_stack_zero_size_test_stack_rechunk.assert_eq_z_np_stack_x_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 377, "end_line": 421, "span_ids": ["test_short_stack", "test_stack_rechunk", "test_stack_zero_size", "test_stack_scalars", "test_stack_promote_type"], "tokens": 377}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_stack_zero_size():\n x = np.empty((2, 0, 3))\n y = da.from_array(x, chunks=1)\n\n result_np = np.concatenate([x, x])\n result_da = da.concatenate([y, y])\n\n assert_eq(result_np, result_da)\n\n\ndef test_short_stack():\n x = np.array([1])\n d = da.from_array(x, chunks=(1,))\n s = da.stack([d])\n assert s.shape == (1, 1)\n chunks = compute_as_if_collection(Array, s.dask, s.__dask_keys__())\n assert chunks[0][0].shape == (1, 1)\n\n\ndef test_stack_scalars():\n d = da.arange(4, chunks=2)\n\n s = da.stack([d.mean(), d.sum()])\n\n assert s.compute().tolist() == [np.arange(4).mean(), np.arange(4).sum()]\n\n\ndef test_stack_promote_type():\n i = np.arange(10, dtype=\"i4\")\n f = np.arange(10, dtype=\"f4\")\n di = da.from_array(i, chunks=5)\n df = da.from_array(f, chunks=5)\n res = da.stack([di, df])\n assert_eq(res, np.stack([i, f]))\n\n\ndef test_stack_rechunk():\n x = da.random.random(10, chunks=5)\n y = da.random.random(10, chunks=4)\n\n z = da.stack([x, y], axis=0)\n assert z.shape == (2, 10)\n assert z.chunks == ((1, 1), (4, 1, 3, 2))\n\n assert_eq(z, np.stack([x.compute(), y.compute()], axis=0))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_stack_unknown_chunksizes_test_stack_unknown_chunksizes.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_stack_unknown_chunksizes_test_stack_unknown_chunksizes.None_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 424, "end_line": 458, "span_ids": ["test_stack_unknown_chunksizes"], "tokens": 323}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_stack_unknown_chunksizes():\n dd = pytest.importorskip(\"dask.dataframe\")\n pd = pytest.importorskip(\"pandas\")\n\n a_df = pd.DataFrame({\"x\": np.arange(12)})\n b_df = pd.DataFrame({\"y\": np.arange(12) * 10})\n\n a_ddf = dd.from_pandas(a_df, sort=False, npartitions=3)\n b_ddf = dd.from_pandas(b_df, sort=False, npartitions=3)\n\n a_x = a_ddf.values\n b_x = b_ddf.values\n\n assert np.isnan(a_x.shape[0])\n assert np.isnan(b_x.shape[0])\n\n with pytest.raises(ValueError) as exc_info:\n da.stack([a_x, b_x], axis=0)\n\n assert \"shape\" in str(exc_info.value)\n assert \"nan\" in str(exc_info.value)\n\n c_x = da.stack([a_x, b_x], axis=0, allow_unknown_chunksizes=True)\n\n assert_eq(c_x, np.stack([a_df.values, b_df.values], axis=0))\n\n with pytest.raises(ValueError) as exc_info:\n da.stack([a_x, b_x], axis=1)\n\n assert \"shape\" in str(exc_info.value)\n assert \"nan\" in str(exc_info.value)\n\n c_x = da.stack([a_x, b_x], axis=1, allow_unknown_chunksizes=True)\n\n assert_eq(c_x, np.stack([a_df.values, b_df.values], axis=1))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_concatenate_test_concatenate.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_concatenate_test_concatenate.None_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 461, "end_line": 503, "span_ids": ["test_concatenate"], "tokens": 443}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_concatenate():\n a, b, c = [\n Array(\n getem(name, chunks=(2, 3), shape=(4, 6)),\n name,\n chunks=(2, 3),\n dtype=\"f8\",\n shape=(4, 6),\n )\n for name in \"ABC\"\n ]\n\n x = concatenate([a, b, c], axis=0)\n\n assert x.shape == (12, 6)\n assert x.chunks == ((2, 2, 2, 2, 2, 2), (3, 3))\n assert x.dask[(x.name, 0, 1)] == (\"A\", 0, 1)\n assert x.dask[(x.name, 5, 0)] == (\"C\", 1, 0)\n assert same_keys(x, concatenate([a, b, c], axis=0))\n\n y = concatenate([a, b, c], axis=1)\n\n assert y.shape == (4, 18)\n assert y.chunks == ((2, 2), (3, 3, 3, 3, 3, 3))\n assert y.dask[(y.name, 1, 0)] == (\"A\", 1, 0)\n assert y.dask[(y.name, 1, 5)] == (\"C\", 1, 1)\n assert same_keys(y, concatenate([a, b, c], axis=1))\n\n assert set(b.dask.keys()).issubset(y.dask.keys())\n\n z = concatenate([a], axis=0)\n\n assert z.shape == a.shape\n assert z.chunks == a.chunks\n assert z.dask == a.dask\n assert z is a\n\n assert (\n concatenate([a, b, c], axis=-1).chunks == concatenate([a, b, c], axis=1).chunks\n )\n\n pytest.raises(ValueError, lambda: concatenate([]))\n pytest.raises(ValueError, lambda: concatenate([a, b, c], axis=2))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_concatenate_types_test_concatenate_types.assert_x_dtype_dt_out": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_concatenate_types_test_concatenate_types.assert_x_dtype_dt_out", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 506, "end_line": 515, "span_ids": ["test_concatenate_types"], "tokens": 116}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"dtypes\", [((\">f8\", \">f8\"), \"float64\"), ((\" 5\n with pytest.warns(None): # ZeroDivisionWarning\n assert_eq(expr, (3 / x * y) ** 2 > 5)\n\n with pytest.warns(None): # OverflowWarning\n c = da.exp(a)\n assert_eq(c, np.exp(x))\n\n assert_eq(abs(-a), a)\n assert_eq(a, +x)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_operator_dtype_promotion_test_field_access.assert_same_keys_y_b_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_operator_dtype_promotion_test_field_access.assert_same_keys_y_b_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 931, "end_line": 946, "span_ids": ["test_operator_dtype_promotion", "test_field_access"], "tokens": 207}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_operator_dtype_promotion():\n x = np.arange(10, dtype=np.float32)\n y = np.array([1])\n a = from_array(x, chunks=(5,))\n\n assert_eq(x + 1, a + 1) # still float32\n assert_eq(x + 1e50, a + 1e50) # now float64\n assert_eq(x + y, a + y) # also float64\n\n\ndef test_field_access():\n x = np.array([(1, 1.0), (2, 2.0)], dtype=[(\"a\", \"i4\"), (\"b\", \"f4\")])\n y = from_array(x, chunks=(1,))\n assert_eq(y[\"a\"], x[\"a\"])\n assert_eq(y[[\"b\", \"a\"]], x[[\"b\", \"a\"]])\n assert same_keys(y[[\"b\", \"a\"]], y[[\"b\", \"a\"]])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_field_access_with_shape_test_field_access_with_shape.assert_eq_x_col1_col": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_field_access_with_shape_test_field_access_with_shape.assert_eq_x_col1_col", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 949, "end_line": 956, "span_ids": ["test_field_access_with_shape"], "tokens": 129}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_field_access_with_shape():\n dtype = [(\"col1\", (\"f4\", (3, 2))), (\"col2\", (\"f4\", 3))]\n data = np.ones((100, 50), dtype=dtype)\n x = da.from_array(data, 10)\n assert_eq(x[\"col1\"], data[\"col1\"])\n assert_eq(x[[\"col1\"]], data[[\"col1\"]])\n assert_eq(x[\"col2\"], data[\"col2\"])\n assert_eq(x[[\"col1\", \"col2\"]], data[[\"col1\", \"col2\"]])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_matmul_test_matmul.None_6": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_matmul_test_matmul.None_6", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 959, "end_line": 973, "span_ids": ["test_matmul"], "tokens": 208}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_matmul():\n x = np.random.random((5, 5))\n y = np.random.random((5, 2))\n a = from_array(x, chunks=(1, 5))\n b = from_array(y, chunks=(5, 1))\n assert_eq(operator.matmul(a, b), a.dot(b))\n assert_eq(operator.matmul(a, b), operator.matmul(x, y))\n assert_eq(operator.matmul(a, y), operator.matmul(x, b))\n list_vec = list(range(1, 6))\n assert_eq(operator.matmul(list_vec, b), operator.matmul(list_vec, y))\n assert_eq(operator.matmul(x, list_vec), operator.matmul(a, list_vec))\n z = np.random.random((5, 5, 5))\n c = from_array(z, chunks=(1, 5, 1))\n assert_eq(operator.matmul(a, z), operator.matmul(x, c))\n assert_eq(operator.matmul(z, a), operator.matmul(c, x))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_matmul_array_ufunc_test_T.assert_eq_x_T_a_T_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_matmul_array_ufunc_test_T.assert_eq_x_T_a_T_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 976, "end_line": 990, "span_ids": ["test_matmul_array_ufunc", "test_T"], "tokens": 151}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_matmul_array_ufunc():\n # regression test for https://github.com/dask/dask/issues/4353\n x = np.random.random((5, 5))\n y = np.random.random((5, 2))\n a = from_array(x, chunks=(1, 5))\n b = from_array(y, chunks=(5, 1))\n result = b.__array_ufunc__(np.matmul, \"__call__\", a, b)\n assert_eq(result, x.dot(y))\n\n\ndef test_T():\n x = np.arange(400).reshape((20, 20))\n a = from_array(x, chunks=(5, 5))\n\n assert_eq(x.T, a.T)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_broadcast_to_test_broadcast_to.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_broadcast_to_test_broadcast_to.None_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 993, "end_line": 1007, "span_ids": ["test_broadcast_to"], "tokens": 171}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_broadcast_to():\n x = np.random.randint(10, size=(5, 1, 6))\n a = from_array(x, chunks=(3, 1, 3))\n\n for shape in [a.shape, (5, 0, 6), (5, 4, 6), (2, 5, 1, 6), (3, 4, 5, 4, 6)]:\n xb = np.broadcast_to(x, shape)\n ab = broadcast_to(a, shape)\n\n assert_eq(xb, ab)\n\n if a.shape == ab.shape:\n assert a is ab\n\n pytest.raises(ValueError, lambda: broadcast_to(a, (2, 1, 6)))\n pytest.raises(ValueError, lambda: broadcast_to(a, (3,)))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_broadcast_to_array_test_broadcast_to_scalar.for_shape_in_tuple_0.assert_eq_a_d_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_broadcast_to_array_test_broadcast_to_scalar.for_shape_in_tuple_0.assert_eq_a_d_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 1010, "end_line": 1027, "span_ids": ["test_broadcast_to_scalar", "test_broadcast_to_array"], "tokens": 192}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_broadcast_to_array():\n x = np.random.randint(10, size=(5, 1, 6))\n\n for shape in [(5, 0, 6), (5, 4, 6), (2, 5, 1, 6), (3, 4, 5, 4, 6)]:\n a = np.broadcast_to(x, shape)\n d = broadcast_to(x, shape)\n\n assert_eq(a, d)\n\n\ndef test_broadcast_to_scalar():\n x = 5\n\n for shape in [tuple(), (0,), (2, 3), (5, 4, 6), (2, 5, 1, 6), (3, 4, 5, 4, 6)]:\n a = np.broadcast_to(x, shape)\n d = broadcast_to(x, shape)\n\n assert_eq(a, d)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_broadcast_to_chunks_test_broadcast_to_chunks.None_3.broadcast_to_a_5_2_6_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_broadcast_to_chunks_test_broadcast_to_chunks.None_3.broadcast_to_a_5_2_6_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 1030, "end_line": 1049, "span_ids": ["test_broadcast_to_chunks"], "tokens": 317}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_broadcast_to_chunks():\n x = np.random.randint(10, size=(5, 1, 6))\n a = from_array(x, chunks=(3, 1, 3))\n\n for shape, chunks, expected_chunks in [\n ((5, 3, 6), (3, -1, 3), ((3, 2), (3,), (3, 3))),\n ((5, 3, 6), (3, 1, 3), ((3, 2), (1, 1, 1), (3, 3))),\n ((2, 5, 3, 6), (1, 3, 1, 3), ((1, 1), (3, 2), (1, 1, 1), (3, 3))),\n ]:\n xb = np.broadcast_to(x, shape)\n ab = broadcast_to(a, shape, chunks=chunks)\n assert_eq(xb, ab)\n assert ab.chunks == expected_chunks\n\n with pytest.raises(ValueError):\n broadcast_to(a, a.shape, chunks=((2, 3), (1,), (3, 3)))\n with pytest.raises(ValueError):\n broadcast_to(a, a.shape, chunks=((3, 2), (3,), (3, 3)))\n with pytest.raises(ValueError):\n broadcast_to(a, (5, 2, 6), chunks=((3, 2), (3,), (3, 3)))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_broadcast_arrays_test_broadcast_arrays_uneven_chunks.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_broadcast_arrays_test_broadcast_arrays_uneven_chunks.None_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 1052, "end_line": 1085, "span_ids": ["test_broadcast_arrays", "test_broadcast_arrays_uneven_chunks"], "tokens": 271}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_broadcast_arrays():\n assert np.broadcast_arrays() == da.broadcast_arrays()\n\n a = np.arange(4)\n d_a = da.from_array(a, chunks=tuple(s // 2 for s in a.shape))\n\n a_0 = np.arange(4)[None, :]\n a_1 = np.arange(4)[:, None]\n\n d_a_0 = d_a[None, :]\n d_a_1 = d_a[:, None]\n\n a_r = np.broadcast_arrays(a_0, a_1)\n d_r = da.broadcast_arrays(d_a_0, d_a_1)\n\n assert isinstance(d_r, list)\n assert len(a_r) == len(d_r)\n\n for e_a_r, e_d_r in zip(a_r, d_r):\n assert_eq(e_a_r, e_d_r)\n\n\ndef test_broadcast_arrays_uneven_chunks():\n x = da.ones(30, chunks=(3,))\n y = da.ones(30, chunks=(5,))\n z = np.broadcast_arrays(x, y)\n\n assert_eq(z, z)\n\n x = da.ones((1, 30), chunks=(1, 3))\n y = da.ones(30, chunks=(5,))\n z = np.broadcast_arrays(x, y)\n\n assert_eq(z, z)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_broadcast_operator_test_broadcast_operator.assert_eq_w_d_w_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_broadcast_operator_test_broadcast_operator.assert_eq_w_d_w_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 1088, "end_line": 1110, "span_ids": ["test_broadcast_operator"], "tokens": 179}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"u_shape, v_shape\",\n [\n [tuple(), (2, 3)],\n [(1,), (2, 3)],\n [(1, 1), (2, 3)],\n [(0, 3), (1, 3)],\n [(2, 0), (2, 1)],\n [(1, 0), (2, 1)],\n [(0, 1), (1, 3)],\n ],\n)\ndef test_broadcast_operator(u_shape, v_shape):\n u = np.random.random(u_shape)\n v = np.random.random(v_shape)\n\n d_u = from_array(u, chunks=1)\n d_v = from_array(v, chunks=1)\n\n w = u * v\n d_w = d_u * d_v\n\n assert_eq(w, d_w)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_reshape_test_reshape.assert_eq_xr_ar_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_reshape_test_reshape.assert_eq_xr_ar_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 1113, "end_line": 1163, "span_ids": ["test_reshape"], "tokens": 777}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"original_shape,new_shape,chunks\",\n [\n ((10,), (10,), (3, 3, 4)),\n ((10,), (10, 1, 1), 5),\n ((10,), (1, 10), 5),\n ((24,), (2, 3, 4), 12),\n ((1, 24), (2, 3, 4), 12),\n ((2, 3, 4), (24,), (1, 3, 4)),\n ((2, 3, 4), (24,), 4),\n ((2, 3, 4), (24, 1), 4),\n ((2, 3, 4), (1, 24), 4),\n ((4, 4, 1), (4, 4), 2),\n ((4, 4), (4, 4, 1), 2),\n ((1, 4, 4), (4, 4), 2),\n ((1, 4, 4), (4, 4, 1), 2),\n ((1, 4, 4), (1, 1, 4, 4), 2),\n ((4, 4), (1, 4, 4, 1), 2),\n ((4, 4), (1, 4, 4), 2),\n ((2, 3), (2, 3), (1, 2)),\n ((2, 3), (3, 2), 3),\n ((4, 2, 3), (4, 6), 4),\n ((3, 4, 5, 6), (3, 4, 5, 6), (2, 3, 4, 5)),\n ((), (1,), 1),\n ((1,), (), 1),\n ((24,), (3, 8), 24),\n ((24,), (4, 6), 6),\n ((24,), (4, 3, 2), 6),\n ((24,), (4, 6, 1), 6),\n ((24,), (4, 6), (6, 12, 6)),\n ((64, 4), (8, 8, 4), (16, 2)),\n ((4, 64), (4, 8, 4, 2), (2, 16)),\n ((4, 8, 4, 2), (2, 1, 2, 32, 2), (2, 4, 2, 2)),\n ((4, 1, 4), (4, 4), (2, 1, 2)),\n ((0, 10), (0, 5, 2), (5, 5)),\n ((5, 0, 2), (0, 10), (5, 2, 2)),\n ((0,), (2, 0, 2), (4,)),\n ((2, 0, 2), (0,), (4, 4, 4)),\n ],\n)\ndef test_reshape(original_shape, new_shape, chunks):\n x = np.random.randint(10, size=original_shape)\n a = from_array(x, chunks=chunks)\n\n xr = x.reshape(new_shape)\n ar = a.reshape(new_shape)\n\n if a.shape == new_shape:\n assert a is ar\n\n assert_eq(xr, ar)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_reshape_exceptions_test_reshape_fails_for_dask_only.for_original_shape_new_s.with_pytest_raises_ValueE.da_reshape_a_new_shape_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_reshape_exceptions_test_reshape_fails_for_dask_only.for_original_shape_new_s.with_pytest_raises_ValueE.da_reshape_a_new_shape_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 1166, "end_line": 1185, "span_ids": ["test_reshape_fails_for_dask_only", "test_reshape_splat", "test_reshape_exceptions"], "tokens": 177}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_reshape_exceptions():\n x = np.random.randint(10, size=(5,))\n a = from_array(x, chunks=(2,))\n with pytest.raises(ValueError):\n da.reshape(a, (100,))\n\n\ndef test_reshape_splat():\n x = da.ones((5, 5), chunks=(2, 2))\n assert_eq(x.reshape((25,)), x.reshape(25))\n\n\ndef test_reshape_fails_for_dask_only():\n cases = [((3, 4), (4, 3), 2)]\n for original_shape, new_shape, chunks in cases:\n x = np.random.randint(10, size=original_shape)\n a = from_array(x, chunks=chunks)\n assert x.reshape(new_shape).shape == new_shape\n with pytest.raises(ValueError):\n da.reshape(a, new_shape)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_reshape_unknown_dimensions_test_full.assert_eq_d_np_full_3_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_reshape_unknown_dimensions_test_full.assert_eq_d_np_full_3_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 1188, "end_line": 1201, "span_ids": ["test_reshape_unknown_dimensions", "test_full"], "tokens": 175}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_reshape_unknown_dimensions():\n for original_shape in [(24,), (2, 12), (2, 3, 4)]:\n for new_shape in [(-1,), (2, -1), (-1, 3, 4)]:\n x = np.random.randint(10, size=original_shape)\n a = from_array(x, 24)\n assert_eq(x.reshape(new_shape), a.reshape(new_shape))\n\n pytest.raises(ValueError, lambda: da.reshape(a, (-1, -1)))\n\n\ndef test_full():\n d = da.full((3, 4), 2, chunks=((2, 1), (2, 2)))\n assert d.chunks == ((2, 1), (2, 2))\n assert_eq(d, np.full((3, 4), 2))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_map_blocks_test_map_blocks.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_map_blocks_test_map_blocks.None_2", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 1204, "end_line": 1229, "span_ids": ["test_map_blocks"], "tokens": 258}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_map_blocks():\n x = np.arange(400).reshape((20, 20))\n d = from_array(x, chunks=(7, 7))\n\n e = d.map_blocks(inc, dtype=d.dtype)\n\n assert d.chunks == e.chunks\n assert_eq(e, x + 1)\n\n e = d.map_blocks(inc, name=\"increment\")\n assert e.name.startswith(\"increment-\")\n\n assert d.map_blocks(inc, name=\"foo\").name != d.map_blocks(dec, name=\"foo\").name\n\n d = from_array(x, chunks=(10, 10))\n e = d.map_blocks(lambda x: x[::2, ::2], chunks=(5, 5), dtype=d.dtype)\n\n assert e.chunks == ((5, 5), (5, 5))\n assert_eq(e, x[::2, ::2])\n\n d = from_array(x, chunks=(8, 8))\n e = d.map_blocks(\n lambda x: x[::2, ::2], chunks=((4, 4, 2), (4, 4, 2)), dtype=d.dtype\n )\n\n assert_eq(e, x[::2, ::2])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_map_blocks2_test_map_blocks2.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_map_blocks2_test_map_blocks2.None_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 1232, "end_line": 1249, "span_ids": ["test_map_blocks2"], "tokens": 187}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_map_blocks2():\n x = np.arange(10, dtype=\"i8\")\n d = from_array(x, chunks=(2,))\n\n def func(block, block_id=None, c=0):\n return np.ones_like(block) * sum(block_id) + c\n\n out = d.map_blocks(func, dtype=\"i8\")\n expected = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4], dtype=\"i8\")\n\n assert_eq(out, expected)\n assert same_keys(d.map_blocks(func, dtype=\"i8\"), out)\n\n out = d.map_blocks(func, dtype=\"i8\", c=1)\n expected = expected + 1\n\n assert_eq(out, expected)\n assert same_keys(d.map_blocks(func, dtype=\"i8\", c=1), out)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_map_blocks_block_info_test_map_blocks_block_info.assert_eq_z_x_x_1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_map_blocks_block_info_test_map_blocks_block_info.assert_eq_z_x_x_1_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 1252, "end_line": 1271, "span_ids": ["test_map_blocks_block_info"], "tokens": 210}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_map_blocks_block_info():\n x = da.arange(50, chunks=10)\n\n def func(a, b, c, block_info=None):\n for idx in [0, 2, None]: # positions in args\n assert block_info[idx][\"shape\"] == (50,)\n assert block_info[idx][\"num-chunks\"] == (5,)\n start, stop = block_info[idx][\"array-location\"][0]\n assert stop - start == 10\n assert 0 <= start <= 40\n assert 10 <= stop <= 50\n\n assert 0 <= block_info[idx][\"chunk-location\"][0] <= 4\n assert block_info[None][\"chunk-shape\"] == (10,)\n assert block_info[None][\"dtype\"] == x.dtype\n\n return a + b + c\n\n z = da.map_blocks(func, x, 100, x + 1, dtype=x.dtype)\n assert_eq(z, x + x + 1 + 100)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_map_blocks_block_info_with_new_axis_test_map_blocks_block_info_with_new_axis.assert_eq_z_np_ones_4_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_map_blocks_block_info_with_new_axis_test_map_blocks_block_info_with_new_axis.assert_eq_z_np_ones_4_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 1274, "end_line": 1301, "span_ids": ["test_map_blocks_block_info_with_new_axis"], "tokens": 385}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_map_blocks_block_info_with_new_axis():\n # https://github.com/dask/dask/issues/4298\n values = da.from_array(np.array([\"a\", \"a\", \"b\", \"c\"]), 2)\n\n def func(x, block_info=None):\n assert set(block_info.keys()) == {0, None}\n assert block_info[0][\"shape\"] == (4,)\n assert block_info[0][\"num-chunks\"] == (2,)\n assert block_info[None][\"shape\"] == (4, 3)\n assert block_info[None][\"num-chunks\"] == (2, 1)\n assert block_info[None][\"chunk-shape\"] == (2, 3)\n assert block_info[None][\"dtype\"] == np.dtype(\"f8\")\n\n assert block_info[0][\"chunk-location\"] in {(0,), (1,)}\n\n if block_info[0][\"chunk-location\"] == (0,):\n assert block_info[0][\"array-location\"] == [(0, 2)]\n assert block_info[None][\"chunk-location\"] == (0, 0)\n assert block_info[None][\"array-location\"] == [(0, 2), (0, 3)]\n elif block_info[0][\"chunk-location\"] == (1,):\n assert block_info[0][\"array-location\"] == [(2, 4)]\n assert block_info[None][\"chunk-location\"] == (1, 0)\n assert block_info[None][\"array-location\"] == [(2, 4), (0, 3)]\n\n return np.ones((len(x), 3))\n\n z = values.map_blocks(func, chunks=((2, 2), 3), new_axis=1, dtype=\"f8\")\n assert_eq(z, np.ones((4, 3), dtype=\"f8\"))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_map_blocks_block_info_with_drop_axis_test_map_blocks_block_info_with_drop_axis.assert_eq_z_np_array_7_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_map_blocks_block_info_with_drop_axis_test_map_blocks_block_info_with_drop_axis.assert_eq_z_np_array_7_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 1304, "end_line": 1337, "span_ids": ["test_map_blocks_block_info_with_drop_axis"], "tokens": 449}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_map_blocks_block_info_with_drop_axis():\n # https://github.com/dask/dask/issues/4584\n values = da.from_array(\n np.array(\n [[1, 2, 4], [8, 16, 32], [64, 128, 256], [1024, 2048, 4096]], dtype=\"u4\"\n ),\n (2, 1),\n )\n\n def func(x, block_info=None):\n assert set(block_info.keys()) == {0, None}\n assert block_info[0][\"shape\"] == (4, 3)\n # drop_axis concatenates along the dropped dimension, hence not (2, 3)\n assert block_info[0][\"num-chunks\"] == (2, 1)\n assert block_info[None][\"shape\"] == (4,)\n assert block_info[None][\"num-chunks\"] == (2,)\n assert block_info[None][\"chunk-shape\"] == (2,)\n assert block_info[None][\"dtype\"] == np.dtype(\"u4\")\n\n assert block_info[0][\"chunk-location\"] in {(0, 0), (1, 0)}\n\n if block_info[0][\"chunk-location\"] == (0, 0):\n assert block_info[0][\"array-location\"] == [(0, 2), (0, 3)]\n assert block_info[None][\"chunk-location\"] == (0,)\n assert block_info[None][\"array-location\"] == [(0, 2)]\n elif block_info[0][\"chunk-location\"] == (1, 0):\n assert block_info[0][\"array-location\"] == [(2, 4), (0, 3)]\n assert block_info[None][\"chunk-location\"] == (1,)\n assert block_info[None][\"array-location\"] == [(2, 4)]\n\n return np.sum(x, axis=1, dtype=\"u4\")\n\n z = values.map_blocks(func, drop_axis=1, dtype=\"u4\")\n assert_eq(z, np.array([7, 56, 448, 7168], dtype=\"u4\"))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_map_blocks_block_info_with_broadcast_test_map_blocks_block_info_with_broadcast.expected2._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_map_blocks_block_info_with_broadcast_test_map_blocks_block_info_with_broadcast.expected2._", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 1340, "end_line": 1382, "span_ids": ["test_map_blocks_block_info_with_broadcast"], "tokens": 329}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_map_blocks_block_info_with_broadcast():\n expected0 = [\n {\n \"shape\": (3, 4),\n \"num-chunks\": (1, 2),\n \"array-location\": [(0, 3), (0, 2)],\n \"chunk-location\": (0, 0),\n },\n {\n \"shape\": (3, 4),\n \"num-chunks\": (1, 2),\n \"array-location\": [(0, 3), (2, 4)],\n \"chunk-location\": (0, 1),\n },\n ]\n expected1 = [\n {\n \"shape\": (6, 2),\n \"num-chunks\": (2, 1),\n \"array-location\": [(0, 3), (0, 2)],\n \"chunk-location\": (0, 0),\n },\n {\n \"shape\": (6, 2),\n \"num-chunks\": (2, 1),\n \"array-location\": [(3, 6), (0, 2)],\n \"chunk-location\": (1, 0),\n },\n ]\n expected2 = [\n {\n \"shape\": (4,),\n \"num-chunks\": (2,),\n \"array-location\": [(0, 2)],\n \"chunk-location\": (0,),\n },\n {\n \"shape\": (4,),\n \"num-chunks\": (2,),\n \"array-location\": [(2, 4)],\n \"chunk-location\": (1,),\n },\n ]\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_map_blocks_block_info_with_broadcast.expected_test_map_blocks_block_info_with_broadcast.assert_eq_d_3_np_ones_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_map_blocks_block_info_with_broadcast.expected_test_map_blocks_block_info_with_broadcast.assert_eq_d_3_np_ones_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 1383, "end_line": 1452, "span_ids": ["test_map_blocks_block_info_with_broadcast"], "tokens": 620}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_map_blocks_block_info_with_broadcast():\n # ... other code\n expected = [\n {\n 0: expected0[0],\n 1: expected1[0],\n 2: expected2[0],\n None: {\n \"shape\": (6, 4),\n \"num-chunks\": (2, 2),\n \"dtype\": np.float_,\n \"chunk-shape\": (3, 2),\n \"array-location\": [(0, 3), (0, 2)],\n \"chunk-location\": (0, 0),\n },\n },\n {\n 0: expected0[1],\n 1: expected1[0],\n 2: expected2[1],\n None: {\n \"shape\": (6, 4),\n \"num-chunks\": (2, 2),\n \"dtype\": np.float_,\n \"chunk-shape\": (3, 2),\n \"array-location\": [(0, 3), (2, 4)],\n \"chunk-location\": (0, 1),\n },\n },\n {\n 0: expected0[0],\n 1: expected1[1],\n 2: expected2[0],\n None: {\n \"shape\": (6, 4),\n \"num-chunks\": (2, 2),\n \"dtype\": np.float_,\n \"chunk-shape\": (3, 2),\n \"array-location\": [(3, 6), (0, 2)],\n \"chunk-location\": (1, 0),\n },\n },\n {\n 0: expected0[1],\n 1: expected1[1],\n 2: expected2[1],\n None: {\n \"shape\": (6, 4),\n \"num-chunks\": (2, 2),\n \"dtype\": np.float_,\n \"chunk-shape\": (3, 2),\n \"array-location\": [(3, 6), (2, 4)],\n \"chunk-location\": (1, 1),\n },\n },\n ]\n\n def func(x, y, z, block_info=None):\n for info in expected:\n if block_info[None][\"chunk-location\"] == info[None][\"chunk-location\"]:\n assert block_info == info\n break\n else:\n assert False\n return x + y + z\n\n a = da.ones((3, 4), chunks=(3, 2))\n b = da.ones((6, 2), chunks=(3, 2))\n c = da.ones((4,), chunks=(2,))\n d = da.map_blocks(func, a, b, c, chunks=((3, 3), (2, 2)), dtype=a.dtype)\n assert d.chunks == ((3, 3), (2, 2))\n assert_eq(d, 3 * np.ones((6, 4)))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_map_blocks_with_constants_test_map_blocks_with_kwargs.assert_eq_result_np_arra": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_map_blocks_with_constants_test_map_blocks_with_kwargs.assert_eq_result_np_arra", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 1455, "end_line": 1470, "span_ids": ["test_map_blocks_with_constants", "test_map_blocks_with_kwargs"], "tokens": 157}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_map_blocks_with_constants():\n d = da.arange(10, chunks=3)\n e = d.map_blocks(add, 100, dtype=d.dtype)\n\n assert_eq(e, np.arange(10) + 100)\n\n assert_eq(da.map_blocks(sub, d, 10, dtype=d.dtype), np.arange(10) - 10)\n assert_eq(da.map_blocks(sub, 10, d, dtype=d.dtype), 10 - np.arange(10))\n\n\ndef test_map_blocks_with_kwargs():\n d = da.arange(10, chunks=5)\n\n result = d.map_blocks(np.max, axis=0, keepdims=True, dtype=d.dtype, chunks=(1,))\n\n assert_eq(result, np.array([4, 9]))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_map_blocks_infer_chunks_broadcast_test_map_blocks_with_chunks.assert_eq_dz_np_ones_5_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_map_blocks_infer_chunks_broadcast_test_map_blocks_with_chunks.assert_eq_dz_np_ones_5_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 1473, "end_line": 1485, "span_ids": ["test_map_blocks_infer_chunks_broadcast", "test_map_blocks_with_chunks"], "tokens": 215}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_map_blocks_infer_chunks_broadcast():\n dx = da.from_array([[1, 2, 3, 4]], chunks=((1,), (2, 2)))\n dy = da.from_array([[10, 20], [30, 40]], chunks=((1, 1), (2,)))\n result = da.map_blocks(lambda x, y: x + y, dx, dy)\n assert result.chunks == ((1, 1), (2, 2))\n assert_eq(result, np.array([[11, 22, 13, 24], [31, 42, 33, 44]]))\n\n\ndef test_map_blocks_with_chunks():\n dx = da.ones((5, 3), chunks=(2, 2))\n dy = da.ones((5, 3), chunks=(2, 2))\n dz = da.map_blocks(np.add, dx, dy, chunks=dx.chunks)\n assert_eq(dz, np.ones((5, 3)) * 2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_map_blocks_infer_newaxis_test_map_blocks_no_array_args.assert_eq_x_np_arange_8_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_map_blocks_infer_newaxis_test_map_blocks_no_array_args.assert_eq_x_np_arange_8_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 1516, "end_line": 1529, "span_ids": ["test_map_blocks_infer_newaxis", "test_map_blocks_no_array_args"], "tokens": 178}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_map_blocks_infer_newaxis():\n x = da.ones((5, 3), chunks=(2, 2))\n y = da.map_blocks(lambda x: x[None], x, chunks=((1,), (2, 2, 1), (2, 1)))\n assert_eq(y, da.ones((1, 5, 3)))\n\n\ndef test_map_blocks_no_array_args():\n def func(dtype, block_info=None):\n loc = block_info[None][\"array-location\"]\n return np.arange(loc[0][0], loc[0][1], dtype=dtype)\n\n x = da.map_blocks(func, np.float32, chunks=((5, 3),), dtype=np.float32)\n assert x.chunks == ((5, 3),)\n assert_eq(x, np.arange(8, dtype=np.float32))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_map_blocks_optimize_blockwise_test_map_blocks_optimize_blockwise.assert_len_optimized_laye": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_map_blocks_optimize_blockwise_test_map_blocks_optimize_blockwise.assert_len_optimized_laye", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 1532, "end_line": 1542, "span_ids": ["test_map_blocks_optimize_blockwise"], "tokens": 163}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"func\", [lambda x, y: x + y, lambda x, y, block_info: x + y])\ndef test_map_blocks_optimize_blockwise(func):\n # Check that map_blocks layers can merge with elementwise layers\n base = [da.full((1,), i, chunks=1) for i in range(4)]\n a = base[0] + base[1]\n b = da.map_blocks(func, a, base[2], dtype=np.int8)\n c = b + base[3]\n dsk = c.__dask_graph__()\n optimized = optimize_blockwise(dsk)\n # The two additions and the map_blocks should be fused together\n assert len(optimized.layers) == len(dsk.layers) - 2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_repr_test_dtype._no_shape": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_repr_test_dtype._no_shape", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 1545, "end_line": 1594, "span_ids": ["test_repr_meta", "test_dtype", "test_repr", "test_slicing_with_ellipsis", "test_slicing_flexible_type", "test_slicing_with_ndarray"], "tokens": 474}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_repr():\n d = da.ones((4, 4), chunks=(2, 2))\n assert key_split(d.name) in repr(d)\n assert str(d.shape) in repr(d)\n assert str(d.dtype) in repr(d)\n d = da.ones((4000, 4), chunks=(4, 2))\n assert len(str(d)) < 1000\n\n\ndef test_repr_meta():\n d = da.ones((4, 4), chunks=(2, 2))\n assert \"chunktype=numpy.ndarray\" in repr(d)\n\n # Test non-numpy meta\n sparse = pytest.importorskip(\"sparse\")\n s = d.map_blocks(sparse.COO)\n assert \"chunktype=sparse.COO\" in repr(s)\n\n\ndef test_slicing_with_ellipsis():\n x = np.arange(256).reshape((4, 4, 4, 4))\n d = da.from_array(x, chunks=((2, 2, 2, 2)))\n\n assert_eq(d[..., 1], x[..., 1])\n assert_eq(d[0, ..., 1], x[0, ..., 1])\n\n\ndef test_slicing_with_ndarray():\n x = np.arange(64).reshape((8, 8))\n d = da.from_array(x, chunks=((4, 4)))\n\n assert_eq(d[np.arange(8)], x)\n assert_eq(d[np.ones(8, dtype=bool)], x)\n assert_eq(d[np.array([1])], x[[1]])\n assert_eq(d[np.array([True, False, True] + [False] * 5)], x[[0, 2]])\n\n\ndef test_slicing_flexible_type():\n a = np.array([[\"a\", \"b\"], [\"c\", \"d\"]])\n b = da.from_array(a, 2)\n\n assert_eq(a[:, 0], b[:, 0])\n\n\ndef test_dtype():\n d = da.ones((4, 4), chunks=(2, 2))\n\n assert d.dtype == d.compute().dtype\n assert (d * 1.0).dtype == (d + 1.0).compute().dtype\n assert d.sum().dtype == d.sum().compute().dtype # no shape", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_blockdims_from_blockshape_test_blockdims_from_blockshape.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_blockdims_from_blockshape_test_blockdims_from_blockshape.None_2", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 1597, "end_line": 1601, "span_ids": ["test_blockdims_from_blockshape"], "tokens": 127}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_blockdims_from_blockshape():\n assert blockdims_from_blockshape((10, 10), (4, 3)) == ((4, 4, 2), (3, 3, 3, 1))\n pytest.raises(TypeError, lambda: blockdims_from_blockshape((10,), None))\n assert blockdims_from_blockshape((1e2, 3), [1e1, 3]) == ((10,) * 10, (3,))\n assert blockdims_from_blockshape((np.int8(10),), (5,)) == ((5, 5),)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_coerce_test_bool.with_pytest_raises_ValueE.bool_darr_darr_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_coerce_test_bool.with_pytest_raises_ValueE.bool_darr_darr_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 1604, "end_line": 1625, "span_ids": ["test_bool", "test_coerce"], "tokens": 195}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_coerce():\n d0 = da.from_array(np.array(1), chunks=(1,))\n d1 = da.from_array(np.array([1]), chunks=(1,))\n with dask.config.set(scheduler=\"sync\"):\n for d in d0, d1:\n assert bool(d) is True\n assert int(d) == 1\n assert float(d) == 1.0\n assert complex(d) == complex(1)\n\n a2 = np.arange(2)\n d2 = da.from_array(a2, chunks=(2,))\n for func in (int, float, complex):\n pytest.raises(TypeError, lambda: func(d2))\n\n\ndef test_bool():\n arr = np.arange(100).reshape((10, 10))\n darr = da.from_array(arr, chunks=(10, 10))\n with pytest.raises(ValueError):\n bool(darr)\n bool(darr == darr)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_store_kwargs_test_store_kwargs.None_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_store_kwargs_test_store_kwargs.None_5", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 1628, "end_line": 1653, "span_ids": ["test_store_kwargs"], "tokens": 218}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_store_kwargs():\n d = da.ones((10, 10), chunks=(2, 2))\n a = d + 1\n\n called = [False]\n\n def get_func(*args, **kwargs):\n assert kwargs.pop(\"foo\") == \"test kwarg\"\n r = dask.get(*args, **kwargs)\n called[0] = True\n return r\n\n called[0] = False\n at = np.zeros(shape=(10, 10))\n store([a], [at], scheduler=get_func, foo=\"test kwarg\")\n assert called[0]\n\n called[0] = False\n at = np.zeros(shape=(10, 10))\n a.store(at, scheduler=get_func, foo=\"test kwarg\")\n assert called[0]\n\n called[0] = False\n at = np.zeros(shape=(10, 10))\n store([a], [at], scheduler=get_func, return_stored=True, foo=\"test kwarg\")\n assert called[0]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_store_delayed_target_test_store_delayed_target.for_st_compute_in_False_.None_7": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_store_delayed_target_test_store_delayed_target.for_st_compute_in_False_.None_7", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 1656, "end_line": 1707, "span_ids": ["test_store_delayed_target"], "tokens": 388}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_store_delayed_target():\n from dask.delayed import delayed\n\n d = da.ones((4, 4), chunks=(2, 2))\n a, b = d + 1, d + 2\n\n # empty buffers to be used as targets\n targs = {}\n\n def make_target(key):\n a = np.empty((4, 4))\n targs[key] = a\n return a\n\n # delayed calls to these targets\n atd = delayed(make_target)(\"at\")\n btd = delayed(make_target)(\"bt\")\n\n # test not keeping result\n st = store([a, b], [atd, btd])\n\n at = targs[\"at\"]\n bt = targs[\"bt\"]\n\n assert st is None\n assert_eq(at, a)\n assert_eq(bt, b)\n\n # test keeping result\n for st_compute in [False, True]:\n targs.clear()\n\n st = store([a, b], [atd, btd], return_stored=True, compute=st_compute)\n if st_compute:\n assert all(not any(dask.core.get_deps(e.dask)[0].values()) for e in st)\n\n st = dask.compute(*st)\n\n at = targs[\"at\"]\n bt = targs[\"bt\"]\n\n assert st is not None\n assert isinstance(st, tuple)\n assert all([isinstance(v, np.ndarray) for v in st])\n assert_eq(at, a)\n assert_eq(bt, b)\n assert_eq(st[0], a)\n assert_eq(st[1], b)\n\n pytest.raises(ValueError, lambda: store([a], [at, bt]))\n pytest.raises(ValueError, lambda: store(at, at))\n pytest.raises(ValueError, lambda: store([at, bt], [at, bt]))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_store_test_store.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_store_test_store.None_2", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 1710, "end_line": 1724, "span_ids": ["test_store"], "tokens": 147}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_store():\n d = da.ones((4, 4), chunks=(2, 2))\n a, b = d + 1, d + 2\n\n at = np.empty(shape=(4, 4))\n bt = np.empty(shape=(4, 4))\n\n st = store([a, b], [at, bt])\n assert st is None\n assert (at == 2).all()\n assert (bt == 3).all()\n\n pytest.raises(ValueError, lambda: store([a], [at, bt]))\n pytest.raises(ValueError, lambda: store(at, at))\n pytest.raises(ValueError, lambda: store([at, bt], [at, bt]))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_store_regions_test_store_regions._Multiple_regions_keep_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_store_regions_test_store_regions._Multiple_regions_keep_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 1727, "end_line": 1785, "span_ids": ["test_store_regions"], "tokens": 747}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_store_regions():\n d = da.ones((4, 4, 4), dtype=int, chunks=(2, 2, 2))\n a, b = d + 1, d + 2\n a = a[:, 1:, :].astype(float)\n\n region = (slice(None, None, 2), slice(None), [1, 2, 4, 5])\n\n # Single region:\n at = np.zeros(shape=(8, 3, 6))\n bt = np.zeros(shape=(8, 4, 6))\n v = store([a, b], [at, bt], regions=region, compute=False)\n assert isinstance(v, Delayed)\n assert (at == 0).all() and (bt[region] == 0).all()\n assert all([ev is None for ev in v.compute()])\n assert (at[region] == 2).all() and (bt[region] == 3).all()\n assert not (bt == 3).all() and not (bt == 0).all()\n assert not (at == 2).all() and not (at == 0).all()\n\n # Multiple regions:\n at = np.zeros(shape=(8, 3, 6))\n bt = np.zeros(shape=(8, 4, 6))\n v = store([a, b], [at, bt], regions=[region, region], compute=False)\n assert isinstance(v, Delayed)\n assert (at == 0).all() and (bt[region] == 0).all()\n assert all([ev is None for ev in v.compute()])\n assert (at[region] == 2).all() and (bt[region] == 3).all()\n assert not (bt == 3).all() and not (bt == 0).all()\n assert not (at == 2).all() and not (at == 0).all()\n\n # Single region (keep result):\n for st_compute in [False, True]:\n at = np.zeros(shape=(8, 3, 6))\n bt = np.zeros(shape=(8, 4, 6))\n v = store(\n [a, b], [at, bt], regions=region, compute=st_compute, return_stored=True\n )\n assert isinstance(v, tuple)\n assert all([isinstance(e, da.Array) for e in v])\n if st_compute:\n assert all(not any(dask.core.get_deps(e.dask)[0].values()) for e in v)\n else:\n assert (at == 0).all() and (bt[region] == 0).all()\n\n ar, br = v\n assert ar.dtype == a.dtype\n assert br.dtype == b.dtype\n assert ar.shape == a.shape\n assert br.shape == b.shape\n assert ar.chunks == a.chunks\n assert br.chunks == b.chunks\n\n ar, br = da.compute(ar, br)\n assert (at[region] == 2).all() and (bt[region] == 3).all()\n assert not (bt == 3).all() and not (bt == 0).all()\n assert not (at == 2).all() and not (at == 0).all()\n assert (br == 3).all()\n assert (ar == 2).all()\n\n # Multiple regions (keep result):\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_store_regions.None_1_test_store_regions.None_1.assert_ar_2_all_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_store_regions.None_1_test_store_regions.None_1.assert_ar_2_all_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 1786, "end_line": 1816, "span_ids": ["test_store_regions"], "tokens": 322}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_store_regions():\n # ... other code\n for st_compute in [False, True]:\n at = np.zeros(shape=(8, 3, 6))\n bt = np.zeros(shape=(8, 4, 6))\n v = store(\n [a, b],\n [at, bt],\n regions=[region, region],\n compute=st_compute,\n return_stored=True,\n )\n assert isinstance(v, tuple)\n assert all([isinstance(e, da.Array) for e in v])\n if st_compute:\n assert all(not any(dask.core.get_deps(e.dask)[0].values()) for e in v)\n else:\n assert (at == 0).all() and (bt[region] == 0).all()\n\n ar, br = v\n assert ar.dtype == a.dtype\n assert br.dtype == b.dtype\n assert ar.shape == a.shape\n assert br.shape == b.shape\n assert ar.chunks == a.chunks\n assert br.chunks == b.chunks\n\n ar, br = da.compute(ar, br)\n assert (at[region] == 2).all() and (bt[region] == 3).all()\n assert not (bt == 3).all() and not (bt == 0).all()\n assert not (at == 2).all() and not (at == 0).all()\n assert (br == 3).all()\n assert (ar == 2).all()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_store_compute_false_test_store_compute_false.None_9": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_store_compute_false_test_store_compute_false.None_9", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 1819, "end_line": 1839, "span_ids": ["test_store_compute_false"], "tokens": 262}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_store_compute_false():\n d = da.ones((4, 4), chunks=(2, 2))\n a, b = d + 1, d + 2\n\n at = np.zeros(shape=(4, 4))\n bt = np.zeros(shape=(4, 4))\n\n v = store([a, b], [at, bt], compute=False)\n assert isinstance(v, Delayed)\n assert (at == 0).all() and (bt == 0).all()\n assert all([ev is None for ev in v.compute()])\n assert (at == 2).all() and (bt == 3).all()\n\n at = np.zeros(shape=(4, 4))\n bt = np.zeros(shape=(4, 4))\n\n dat, dbt = store([a, b], [at, bt], compute=False, return_stored=True)\n assert isinstance(dat, Array) and isinstance(dbt, Array)\n assert (at == 0).all() and (bt == 0).all()\n assert (dat.compute() == at).all() and (dbt.compute() == bt).all()\n assert (at == 2).all() and (bt == 3).all()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_store_nocompute_regions_CounterLock.release.return.self_lock_release_args_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_store_nocompute_regions_CounterLock.release.return.self_lock_release_args_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 1842, "end_line": 1891, "span_ids": ["NonthreadSafeStore.__init__", "CounterLock.acquire", "NonthreadSafeStore.__setitem__", "ThreadSafeStore.__setitem__", "NonthreadSafeStore", "test_store_nocompute_regions", "ThreadSafeStore.__init__", "CounterLock", "CounterLock.release", "ThreadSafetyError", "CounterLock.__init__", "ThreadSafeStore"], "tokens": 348}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_store_nocompute_regions():\n x = da.ones(10, chunks=1)\n y = np.zeros((2, 10))\n d1 = da.store(x, y, regions=(0,), compute=False)\n d2 = da.store(x, y, regions=(1,), compute=False)\n assert d1.key != d2.key\n\n\nclass ThreadSafetyError(Exception):\n pass\n\n\nclass NonthreadSafeStore(object):\n def __init__(self):\n self.in_use = False\n\n def __setitem__(self, key, value):\n if self.in_use:\n raise ThreadSafetyError()\n self.in_use = True\n time.sleep(0.001)\n self.in_use = False\n\n\nclass ThreadSafeStore(object):\n def __init__(self):\n self.concurrent_uses = 0\n self.max_concurrent_uses = 0\n\n def __setitem__(self, key, value):\n self.concurrent_uses += 1\n self.max_concurrent_uses = max(self.concurrent_uses, self.max_concurrent_uses)\n time.sleep(0.01)\n self.concurrent_uses -= 1\n\n\nclass CounterLock(object):\n def __init__(self, *args, **kwargs):\n self.lock = Lock(*args, **kwargs)\n\n self.acquire_count = 0\n self.release_count = 0\n\n def acquire(self, *args, **kwargs):\n self.acquire_count += 1\n return self.lock.acquire(*args, **kwargs)\n\n def release(self, *args, **kwargs):\n self.release_count += 1\n return self.lock.release(*args, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_store_locks_test_store_locks.for_c_in_False_True_.if_c_.else_.assert_lock_acquire_count": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_store_locks_test_store_locks.for_c_in_False_True_.if_c_.else_.assert_lock_acquire_count", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 1894, "end_line": 1948, "span_ids": ["test_store_locks"], "tokens": 522}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_store_locks():\n _Lock = type(Lock())\n d = da.ones((10, 10), chunks=(2, 2))\n a, b = d + 1, d + 2\n\n at = np.zeros(shape=(10, 10))\n bt = np.zeros(shape=(10, 10))\n\n lock = Lock()\n v = store([a, b], [at, bt], compute=False, lock=lock)\n assert isinstance(v, Delayed)\n dsk = v.dask\n locks = set(vv for v in dsk.values() for vv in v if isinstance(vv, _Lock))\n assert locks == set([lock])\n\n # Ensure same lock applies over multiple stores\n at = NonthreadSafeStore()\n v = store([a, b], [at, at], lock=lock, scheduler=\"threads\", num_workers=10)\n assert v is None\n\n # Don't assume thread safety by default\n at = NonthreadSafeStore()\n assert store(a, at, scheduler=\"threads\", num_workers=10) is None\n assert a.store(at, scheduler=\"threads\", num_workers=10) is None\n\n # Ensure locks can be removed\n at = ThreadSafeStore()\n for i in range(10):\n st = a.store(at, lock=False, scheduler=\"threads\", num_workers=10)\n assert st is None\n if at.max_concurrent_uses > 1:\n break\n if i == 9:\n assert False\n\n # Verify number of lock calls\n nchunks = np.sum([np.prod([len(c) for c in e.chunks]) for e in [a, b]])\n for c in (False, True):\n at = np.zeros(shape=(10, 10))\n bt = np.zeros(shape=(10, 10))\n lock = CounterLock()\n\n v = store([a, b], [at, bt], lock=lock, compute=c, return_stored=True)\n assert all(isinstance(e, Array) for e in v)\n\n da.compute(v)\n\n # When `return_stored=True` and `compute=False`,\n # the lock should be acquired only once for store and load steps\n # as they are fused together into one step.\n assert lock.acquire_count == lock.release_count\n if c:\n assert lock.acquire_count == 2 * nchunks\n else:\n assert lock.acquire_count == nchunks", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_store_method_return_test_store_multiprocessing_lock.assert_st_is_None": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_store_method_return_test_store_multiprocessing_lock.assert_st_is_None", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 1951, "end_line": 1977, "span_ids": ["test_store_multiprocessing_lock", "test_store_method_return"], "tokens": 204}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_store_method_return():\n d = da.ones((10, 10), chunks=(2, 2))\n a = d + 1\n\n for compute in [False, True]:\n for return_stored in [False, True]:\n at = np.zeros(shape=(10, 10))\n r = a.store(\n at, scheduler=\"threads\", compute=compute, return_stored=return_stored\n )\n\n if return_stored:\n assert isinstance(r, Array)\n elif compute:\n assert r is None\n else:\n assert isinstance(r, Delayed)\n\n\n@pytest.mark.xfail(reason=\"can't lock with multiprocessing\")\ndef test_store_multiprocessing_lock():\n d = da.ones((10, 10), chunks=(2, 2))\n a = d + 1\n\n at = np.zeros(shape=(10, 10))\n st = a.store(at, scheduler=\"processes\", num_workers=10)\n assert st is None", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_to_hdf5_test_to_hdf5.None_3.with_h5py_File_fn_mode_.assert_f_y_chunks_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_to_hdf5_test_to_hdf5.None_3.with_h5py_File_fn_mode_.assert_f_y_chunks_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 1980, "end_line": 2016, "span_ids": ["test_to_hdf5"], "tokens": 357}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_hdf5():\n h5py = pytest.importorskip(\"h5py\")\n x = da.ones((4, 4), chunks=(2, 2))\n y = da.ones(4, chunks=2, dtype=\"i4\")\n\n with tmpfile(\".hdf5\") as fn:\n x.to_hdf5(fn, \"/x\")\n with h5py.File(fn, mode=\"r+\") as f:\n d = f[\"/x\"]\n\n assert_eq(d[:], x)\n assert d.chunks == (2, 2)\n\n with tmpfile(\".hdf5\") as fn:\n x.to_hdf5(fn, \"/x\", chunks=None)\n with h5py.File(fn, mode=\"r+\") as f:\n d = f[\"/x\"]\n\n assert_eq(d[:], x)\n assert d.chunks is None\n\n with tmpfile(\".hdf5\") as fn:\n x.to_hdf5(fn, \"/x\", chunks=(1, 1))\n with h5py.File(fn, mode=\"r+\") as f:\n d = f[\"/x\"]\n\n assert_eq(d[:], x)\n assert d.chunks == (1, 1)\n\n with tmpfile(\".hdf5\") as fn:\n da.to_hdf5(fn, {\"/x\": x, \"/y\": y})\n\n with h5py.File(fn, mode=\"r+\") as f:\n assert_eq(f[\"/x\"][:], x)\n assert f[\"/x\"].chunks == (2, 2)\n assert_eq(f[\"/y\"][:], y)\n assert f[\"/y\"].chunks == (2,)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_to_dask_dataframe_test_np_array_with_zero_dimensions.assert_eq_np_array_d_sum_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_to_dask_dataframe_test_np_array_with_zero_dimensions.assert_eq_np_array_d_sum_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 2019, "end_line": 2032, "span_ids": ["test_np_array_with_zero_dimensions", "test_to_dask_dataframe"], "tokens": 125}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_dask_dataframe():\n dd = pytest.importorskip(\"dask.dataframe\")\n a = da.ones((4,), chunks=(2,))\n d = a.to_dask_dataframe()\n assert isinstance(d, dd.Series)\n\n a = da.ones((4, 4), chunks=(2, 2))\n d = a.to_dask_dataframe()\n assert isinstance(d, dd.DataFrame)\n\n\ndef test_np_array_with_zero_dimensions():\n d = da.ones((4, 4), chunks=(2, 2))\n assert_eq(np.array(d.sum()), np.array(d.compute().sum()))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_dtype_complex_test_dtype_complex.assert_eq_d_numbers_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_dtype_complex_test_dtype_complex.assert_eq_d_numbers_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 2035, "end_line": 2077, "span_ids": ["test_dtype_complex"], "tokens": 527}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_dtype_complex():\n x = np.arange(24).reshape((4, 6)).astype(\"f4\")\n y = np.arange(24).reshape((4, 6)).astype(\"i8\")\n z = np.arange(24).reshape((4, 6)).astype(\"i2\")\n\n a = da.from_array(x, chunks=(2, 3))\n b = da.from_array(y, chunks=(2, 3))\n c = da.from_array(z, chunks=(2, 3))\n\n def assert_eq(a, b):\n return isinstance(a, np.dtype) and isinstance(b, np.dtype) and str(a) == str(b)\n\n assert_eq(a.dtype, x.dtype)\n assert_eq(b.dtype, y.dtype)\n\n assert_eq((a + 1).dtype, (x + 1).dtype)\n assert_eq((a + b).dtype, (x + y).dtype)\n assert_eq(a.T.dtype, x.T.dtype)\n assert_eq(a[:3].dtype, x[:3].dtype)\n assert_eq((a.dot(b.T)).dtype, (x.dot(y.T)).dtype)\n\n assert_eq(stack([a, b]).dtype, np.vstack([x, y]).dtype)\n assert_eq(concatenate([a, b]).dtype, np.concatenate([x, y]).dtype)\n\n assert_eq(b.std().dtype, y.std().dtype)\n assert_eq(c.sum().dtype, z.sum().dtype)\n assert_eq(a.min().dtype, a.min().dtype)\n assert_eq(b.std().dtype, b.std().dtype)\n assert_eq(a.argmin(axis=0).dtype, a.argmin(axis=0).dtype)\n\n assert_eq(da.sin(c).dtype, np.sin(z).dtype)\n assert_eq(da.exp(b).dtype, np.exp(y).dtype)\n assert_eq(da.floor(a).dtype, np.floor(x).dtype)\n assert_eq(da.isnan(b).dtype, np.isnan(y).dtype)\n with ignoring(ImportError):\n assert da.isnull(b).dtype == \"bool\"\n assert da.notnull(b).dtype == \"bool\"\n\n x = np.array([(\"a\", 1)], dtype=[(\"text\", \"S1\"), (\"numbers\", \"i4\")])\n d = da.from_array(x, chunks=(1,))\n\n assert_eq(d[\"text\"].dtype, x[\"text\"].dtype)\n assert_eq(d[[\"numbers\", \"text\"]].dtype, x[[\"numbers\", \"text\"]].dtype)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_astype_test_astype.assert_d_astype_f8_is_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_astype_test_astype.assert_d_astype_f8_is_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 2080, "end_line": 2098, "span_ids": ["test_astype"], "tokens": 166}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_astype():\n x = np.ones((5, 5), dtype=\"f8\")\n d = da.from_array(x, chunks=(2, 2))\n\n assert d.astype(\"i8\").dtype == \"i8\"\n assert_eq(d.astype(\"i8\"), x.astype(\"i8\"))\n assert same_keys(d.astype(\"i8\"), d.astype(\"i8\"))\n\n with pytest.raises(TypeError):\n d.astype(\"i8\", casting=\"safe\")\n\n with pytest.raises(TypeError):\n d.astype(\"i8\", not_a_real_kwarg=\"foo\")\n\n # smoketest with kwargs\n assert_eq(d.astype(\"i8\", copy=False), x.astype(\"i8\", copy=False))\n\n # Check it's a noop\n assert d.astype(\"f8\") is d", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_arithmetic_test_arithmetic.assert_eq_da_log10_a_np": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_arithmetic_test_arithmetic.assert_eq_da_log10_a_np", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 2101, "end_line": 2169, "span_ids": ["test_arithmetic"], "tokens": 882}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_arithmetic():\n x = np.arange(5).astype(\"f4\") + 2\n y = np.arange(5).astype(\"i8\") + 2\n z = np.arange(5).astype(\"i4\") + 2\n a = da.from_array(x, chunks=(2,))\n b = da.from_array(y, chunks=(2,))\n c = da.from_array(z, chunks=(2,))\n assert_eq(a + b, x + y)\n assert_eq(a * b, x * y)\n assert_eq(a - b, x - y)\n assert_eq(a / b, x / y)\n assert_eq(b & b, y & y)\n assert_eq(b | b, y | y)\n assert_eq(b ^ b, y ^ y)\n assert_eq(a // b, x // y)\n assert_eq(a ** b, x ** y)\n assert_eq(a % b, x % y)\n assert_eq(a > b, x > y)\n assert_eq(a < b, x < y)\n assert_eq(a >= b, x >= y)\n assert_eq(a <= b, x <= y)\n assert_eq(a == b, x == y)\n assert_eq(a != b, x != y)\n\n assert_eq(a + 2, x + 2)\n assert_eq(a * 2, x * 2)\n assert_eq(a - 2, x - 2)\n assert_eq(a / 2, x / 2)\n assert_eq(b & True, y & True)\n assert_eq(b | True, y | True)\n assert_eq(b ^ True, y ^ True)\n assert_eq(a // 2, x // 2)\n assert_eq(a ** 2, x ** 2)\n assert_eq(a % 2, x % 2)\n assert_eq(a > 2, x > 2)\n assert_eq(a < 2, x < 2)\n assert_eq(a >= 2, x >= 2)\n assert_eq(a <= 2, x <= 2)\n assert_eq(a == 2, x == 2)\n assert_eq(a != 2, x != 2)\n\n assert_eq(2 + b, 2 + y)\n assert_eq(2 * b, 2 * y)\n assert_eq(2 - b, 2 - y)\n assert_eq(2 / b, 2 / y)\n assert_eq(True & b, True & y)\n assert_eq(True | b, True | y)\n assert_eq(True ^ b, True ^ y)\n assert_eq(2 // b, 2 // y)\n assert_eq(2 ** b, 2 ** y)\n assert_eq(2 % b, 2 % y)\n assert_eq(2 > b, 2 > y)\n assert_eq(2 < b, 2 < y)\n assert_eq(2 >= b, 2 >= y)\n assert_eq(2 <= b, 2 <= y)\n assert_eq(2 == b, 2 == y)\n assert_eq(2 != b, 2 != y)\n\n assert_eq(-a, -x)\n assert_eq(abs(a), abs(x))\n assert_eq(~(a == b), ~(x == y))\n assert_eq(~(a == b), ~(x == y))\n\n assert_eq(da.logaddexp(a, b), np.logaddexp(x, y))\n assert_eq(da.logaddexp2(a, b), np.logaddexp2(x, y))\n with pytest.warns(None): # Overflow warning\n assert_eq(da.exp(b), np.exp(y))\n assert_eq(da.log(a), np.log(x))\n assert_eq(da.log10(a), np.log10(x))\n # ... other code\n with pytest.warns(None): # Overflow warning\n # ... other code\n # ... other code\n with pytest.warns(None): # Overflow warning\n # ... other code\n # ... other code\n with pytest.warns(None): # overflow warning\n # ... other code\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_arithmetic.assert_eq_da_log1p_a_np_test_arithmetic.assert_eq_da_ceil_a_np_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_arithmetic.assert_eq_da_log1p_a_np_test_arithmetic.assert_eq_da_ceil_a_np_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 2170, "end_line": 2215, "span_ids": ["test_arithmetic"], "tokens": 841}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_arithmetic():\n # ... other code\n assert_eq(a * b, x * y)\n assert_eq(a - b, x - y)\n assert_eq(a / b, x / y)\n # ... other code\n assert_eq(da.log1p(a), np.log1p(x))\n with pytest.warns(None): # Overflow warning\n assert_eq(da.expm1(b), np.expm1(y))\n assert_eq(da.sqrt(a), np.sqrt(x))\n assert_eq(da.square(a), np.square(x))\n\n assert_eq(da.sin(a), np.sin(x))\n assert_eq(da.cos(b), np.cos(y))\n assert_eq(da.tan(a), np.tan(x))\n assert_eq(da.arcsin(b / 10), np.arcsin(y / 10))\n assert_eq(da.arccos(b / 10), np.arccos(y / 10))\n assert_eq(da.arctan(b / 10), np.arctan(y / 10))\n assert_eq(da.arctan2(b * 10, a), np.arctan2(y * 10, x))\n assert_eq(da.hypot(b, a), np.hypot(y, x))\n assert_eq(da.sinh(a), np.sinh(x))\n with pytest.warns(None): # Overflow warning\n assert_eq(da.cosh(b), np.cosh(y))\n assert_eq(da.tanh(a), np.tanh(x))\n assert_eq(da.arcsinh(b * 10), np.arcsinh(y * 10))\n assert_eq(da.arccosh(b * 10), np.arccosh(y * 10))\n assert_eq(da.arctanh(b / 10), np.arctanh(y / 10))\n assert_eq(da.deg2rad(a), np.deg2rad(x))\n assert_eq(da.rad2deg(a), np.rad2deg(x))\n\n assert_eq(da.logical_and(a < 1, b < 4), np.logical_and(x < 1, y < 4))\n assert_eq(da.logical_or(a < 1, b < 4), np.logical_or(x < 1, y < 4))\n assert_eq(da.logical_xor(a < 1, b < 4), np.logical_xor(x < 1, y < 4))\n assert_eq(da.logical_not(a < 1), np.logical_not(x < 1))\n assert_eq(da.maximum(a, 5 - a), np.maximum(a, 5 - a))\n assert_eq(da.minimum(a, 5 - a), np.minimum(a, 5 - a))\n assert_eq(da.fmax(a, 5 - a), np.fmax(a, 5 - a))\n assert_eq(da.fmin(a, 5 - a), np.fmin(a, 5 - a))\n\n assert_eq(da.isreal(a + 1j * b), np.isreal(x + 1j * y))\n assert_eq(da.iscomplex(a + 1j * b), np.iscomplex(x + 1j * y))\n assert_eq(da.isfinite(a), np.isfinite(x))\n assert_eq(da.isinf(a), np.isinf(x))\n assert_eq(da.isnan(a), np.isnan(x))\n assert_eq(da.signbit(a - 3), np.signbit(x - 3))\n assert_eq(da.copysign(a - 3, b), np.copysign(x - 3, y))\n assert_eq(da.nextafter(a - 3, b), np.nextafter(x - 3, y))\n with pytest.warns(None): # overflow warning\n assert_eq(da.ldexp(c, c), np.ldexp(z, z))\n assert_eq(da.fmod(a * 12, b), np.fmod(x * 12, y))\n assert_eq(da.floor(a * 0.5), np.floor(x * 0.5))\n assert_eq(da.ceil(a), np.ceil(x))\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_arithmetic.assert_eq_da_trunc_a_2__test_arithmetic.assert_eq_da_around_a_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_arithmetic.assert_eq_da_trunc_a_2__test_arithmetic.assert_eq_da_around_a_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 2216, "end_line": 2249, "span_ids": ["test_arithmetic"], "tokens": 462}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_arithmetic():\n # ... other code\n assert_eq(da.trunc(a / 2), np.trunc(x / 2))\n\n assert_eq(da.degrees(b), np.degrees(y))\n assert_eq(da.radians(a), np.radians(x))\n\n assert_eq(da.rint(a + 0.3), np.rint(x + 0.3))\n assert_eq(da.fix(a - 2.5), np.fix(x - 2.5))\n\n assert_eq(da.angle(a + 1j), np.angle(x + 1j))\n assert_eq(da.real(a + 1j), np.real(x + 1j))\n assert_eq((a + 1j).real, np.real(x + 1j))\n assert_eq(da.imag(a + 1j), np.imag(x + 1j))\n assert_eq((a + 1j).imag, np.imag(x + 1j))\n assert_eq(da.conj(a + 1j * b), np.conj(x + 1j * y))\n assert_eq((a + 1j * b).conj(), (x + 1j * y).conj())\n\n assert_eq(da.clip(b, 1, 4), np.clip(y, 1, 4))\n assert_eq(b.clip(1, 4), y.clip(1, 4))\n assert_eq(da.fabs(b), np.fabs(y))\n assert_eq(da.sign(b - 2), np.sign(y - 2))\n assert_eq(da.absolute(b - 2), np.absolute(y - 2))\n assert_eq(da.absolute(b - 2 + 1j), np.absolute(y - 2 + 1j))\n\n l1, l2 = da.frexp(a)\n r1, r2 = np.frexp(x)\n assert_eq(l1, r1)\n assert_eq(l2, r2)\n\n l1, l2 = da.modf(a)\n r1, r2 = np.modf(x)\n assert_eq(l1, r1)\n assert_eq(l2, r2)\n\n assert_eq(da.around(a, -1), np.around(x, -1))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_elemwise_consistent_names_test_optimize.assert_all_key_in_result_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_elemwise_consistent_names_test_optimize.assert_all_key_in_result_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 2252, "end_line": 2268, "span_ids": ["test_elemwise_consistent_names", "test_optimize"], "tokens": 205}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_elemwise_consistent_names():\n a = da.from_array(np.arange(5, dtype=\"f4\"), chunks=(2,))\n b = da.from_array(np.arange(5, dtype=\"f4\"), chunks=(2,))\n assert same_keys(a + b, a + b)\n assert same_keys(a + 2, a + 2)\n assert same_keys(da.exp(a), da.exp(a))\n assert same_keys(da.exp(a, dtype=\"f8\"), da.exp(a, dtype=\"f8\"))\n assert same_keys(da.maximum(a, b), da.maximum(a, b))\n\n\ndef test_optimize():\n x = np.arange(5).astype(\"f4\")\n a = da.from_array(x, chunks=(2,))\n expr = a[1:4] + 1\n result = optimize(expr.dask, expr.__dask_keys__())\n assert isinstance(result, dict)\n assert all(key in result for key in expr.__dask_keys__())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_slicing_with_non_ndarrays_test_slicing_with_non_ndarrays.assert_eq_x_1_sum_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_slicing_with_non_ndarrays_test_slicing_with_non_ndarrays.assert_eq_x_1_sum_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 2271, "end_line": 2299, "span_ids": ["test_slicing_with_non_ndarrays"], "tokens": 199}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_slicing_with_non_ndarrays():\n class ARangeSlice(object):\n dtype = np.dtype(\"i8\")\n ndim = 1\n\n def __init__(self, start, stop):\n self.start = start\n self.stop = stop\n\n def __array__(self):\n return np.arange(self.start, self.stop)\n\n class ARangeSlicable(object):\n dtype = np.dtype(\"i8\")\n ndim = 1\n\n def __init__(self, n):\n self.n = n\n\n @property\n def shape(self):\n return (self.n,)\n\n def __getitem__(self, key):\n return ARangeSlice(key[0].start, key[0].stop)\n\n x = da.from_array(ARangeSlicable(10), chunks=(4,))\n\n assert_eq((x + 1).sum(), (np.arange(10, dtype=x.dtype) + 1).sum())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_getter_test_getter.assert_eq_getter_np_arang": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_getter_test_getter.assert_eq_getter_np_arang", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 2302, "end_line": 2308, "span_ids": ["test_getter"], "tokens": 121}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.filterwarnings(\"ignore:the matrix subclass\")\ndef test_getter():\n assert type(getter(np.matrix([[1]]), 0)) is np.ndarray\n assert type(getter(np.matrix([[1]]), 0, asarray=False)) is np.matrix\n assert_eq(getter([1, 2, 3, 4, 5], slice(1, 4)), np.array([2, 3, 4]))\n\n assert_eq(getter(np.arange(5), (None, slice(None, None))), np.arange(5)[None, :])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_size_test_Array_normalizes_dtype.assert_isinstance_x_dtype": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_size_test_Array_normalizes_dtype.assert_isinstance_x_dtype", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 2311, "end_line": 2329, "span_ids": ["test_itemsize", "test_size", "test_Array_normalizes_dtype", "test_nbytes"], "tokens": 140}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_size():\n x = da.ones((10, 2), chunks=(3, 1))\n assert x.size == np.array(x).size\n assert isinstance(x.size, int)\n\n\ndef test_nbytes():\n x = da.ones((10, 2), chunks=(3, 1))\n assert x.nbytes == np.array(x).nbytes\n\n\ndef test_itemsize():\n x = da.ones((10, 2), chunks=(3, 1))\n assert x.itemsize == 8\n\n\ndef test_Array_normalizes_dtype():\n x = da.ones((3,), chunks=(1,), dtype=int)\n assert isinstance(x.dtype, np.dtype)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_from_array_with_lock_test_from_array_with_lock.assert_eq_e_f_x_x_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_from_array_with_lock_test_from_array_with_lock.assert_eq_e_f_x_x_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 2332, "end_line": 2347, "span_ids": ["test_from_array_with_lock"], "tokens": 139}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_from_array_with_lock():\n x = np.arange(10)\n d = da.from_array(x, chunks=5, lock=True)\n\n tasks = [v for k, v in d.dask.items() if k[0] == d.name]\n\n assert hasattr(tasks[0][4], \"acquire\")\n assert len(set(task[4] for task in tasks)) == 1\n\n assert_eq(d, x)\n\n lock = Lock()\n e = da.from_array(x, chunks=5, lock=lock)\n f = da.from_array(x, chunks=5, lock=lock)\n\n assert_eq(e + f, x + x)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_MyArray_test_from_array_tasks_always_call_getter.assert_eq_x_dx_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_MyArray_test_from_array_tasks_always_call_getter.assert_eq_x_dx_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 2350, "end_line": 2372, "span_ids": ["MyArray", "test_from_array_tasks_always_call_getter", "MyArray.__getitem__", "MyArray.__init__"], "tokens": 169}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class MyArray(object):\n def __init__(self, x):\n self.x = x\n self.dtype = x.dtype\n self.shape = x.shape\n self.ndim = len(x.shape)\n\n def __getitem__(self, i):\n return self.x[i]\n\n\n@pytest.mark.parametrize(\n \"x,chunks\",\n [\n (np.arange(25).reshape((5, 5)), (5, 5)),\n (np.arange(25).reshape((5, 5)), -1),\n (np.array([[1]]), 1),\n (np.array(1), 1),\n ],\n)\ndef test_from_array_tasks_always_call_getter(x, chunks):\n dx = da.from_array(MyArray(x), chunks=chunks, asarray=False)\n assert_eq(x, dx)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_from_array_ndarray_onechunk_test_from_array_ndarray_getitem.assert_dx_dask_dx_name_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_from_array_ndarray_onechunk_test_from_array_ndarray_getitem.assert_dx_dask_dx_name_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 2375, "end_line": 2390, "span_ids": ["test_from_array_ndarray_onechunk", "test_from_array_ndarray_getitem"], "tokens": 189}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_from_array_ndarray_onechunk():\n \"\"\"ndarray with a single chunk produces a minimal single key dict\"\"\"\n x = np.array([[1, 2], [3, 4]])\n dx = da.from_array(x, chunks=-1)\n assert_eq(x, dx)\n assert len(dx.dask) == 1\n assert dx.dask[dx.name, 0, 0] is x\n\n\ndef test_from_array_ndarray_getitem():\n \"\"\"For ndarray, don't use getter / getter_nofancy; use the cleaner\n operator.getitem\"\"\"\n x = np.array([[1, 2], [3, 4]])\n dx = da.from_array(x, chunks=(1, 2))\n assert_eq(x, dx)\n assert (dx.dask[dx.name, 0, 0] == np.array([[1, 2]])).all()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_from_array_list_test_from_array_list.assert_dx_dask_dx_name_0": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_from_array_list_test_from_array_list.assert_dx_dask_dx_name_0", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 2393, "end_line": 2402, "span_ids": ["test_from_array_list"], "tokens": 124}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"x\", [[1, 2], (1, 2), memoryview(b\"abc\")])\ndef test_from_array_list(x):\n \"\"\"Lists, tuples, and memoryviews are automatically converted to ndarray\"\"\"\n dx = da.from_array(x, chunks=-1)\n assert_eq(np.array(x), dx)\n assert isinstance(dx.dask[dx.name, 0], np.ndarray)\n\n dx = da.from_array(x, chunks=1)\n assert_eq(np.array(x), dx)\n assert dx.dask[dx.name, 0][0] == x[0]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_from_array_scalar_test_from_array_scalar.assert_isinstance_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_from_array_scalar_test_from_array_scalar.assert_isinstance_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 2405, "end_line": 2420, "span_ids": ["test_from_array_scalar"], "tokens": 118}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"type_\", [t for t in np.ScalarType if t is not memoryview])\ndef test_from_array_scalar(type_):\n \"\"\"Python and numpy scalars are automatically converted to ndarray\"\"\"\n if type_ == np.datetime64:\n x = np.datetime64(\"2000-01-01\")\n else:\n x = type_(1)\n\n dx = da.from_array(x, chunks=-1)\n assert_eq(np.array(x), dx)\n assert isinstance(\n dx.dask[\n dx.name,\n ],\n np.ndarray,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_from_array_no_asarray_test_from_array_no_asarray.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_from_array_no_asarray_test_from_array_no_asarray.None_2", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 2423, "end_line": 2435, "span_ids": ["test_from_array_no_asarray"], "tokens": 163}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"asarray,cls\", [(True, np.ndarray), (False, np.matrix)])\n@pytest.mark.filterwarnings(\"ignore:the matrix subclass\")\ndef test_from_array_no_asarray(asarray, cls):\n def assert_chunks_are_of_type(x):\n chunks = compute_as_if_collection(Array, x.dask, x.__dask_keys__())\n for c in concat(chunks):\n assert type(c) is cls\n\n x = np.matrix(np.arange(100).reshape((10, 10)))\n dx = da.from_array(x, chunks=(5, 5), asarray=asarray)\n assert_chunks_are_of_type(dx)\n assert_chunks_are_of_type(dx[0:5])\n assert_chunks_are_of_type(dx[0:5][:, 0])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_from_array_getitem_test_asarray.assert_eq_asarray_y_x_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_from_array_getitem_test_asarray.assert_eq_asarray_y_x_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 2438, "end_line": 2499, "span_ids": ["test_from_array_minus_one", "test_asarray", "test_from_array_dask_collection_warns", "test_from_array_dask_array", "test_from_array_copy", "test_from_array_getitem"], "tokens": 435}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_from_array_getitem():\n x = np.arange(10)\n\n def my_getitem(x, ind):\n return x[ind]\n\n y = da.from_array(x, chunks=(5,), getitem=my_getitem)\n\n for k, v in y.dask.items():\n if isinstance(v, tuple):\n assert v[0] is my_getitem\n\n assert_eq(x, y)\n\n\ndef test_from_array_minus_one():\n x = np.arange(10)\n y = da.from_array(x, -1)\n assert y.chunks == ((10,),)\n assert_eq(x, y)\n\n\ndef test_from_array_copy():\n # Regression test for https://github.com/dask/dask/issues/3751\n x = np.arange(10)\n y = da.from_array(x, -1)\n assert y.npartitions == 1\n y_c = y.copy()\n assert y is not y_c\n assert y.compute() is not y_c.compute()\n\n\ndef test_from_array_dask_array():\n x = np.array([[1, 2], [3, 4]])\n dx = da.from_array(x, chunks=(1, 2))\n with pytest.raises(ValueError):\n da.from_array(dx)\n\n\ndef test_from_array_dask_collection_warns():\n class CustomCollection(np.ndarray):\n def __dask_graph__(self):\n return {\"bar\": 1}\n\n x = CustomCollection([1, 2, 3])\n with pytest.warns(UserWarning):\n da.from_array(x)\n\n # Ensure da.array warns too\n with pytest.warns(UserWarning):\n da.array(x)\n\n\n@pytest.mark.parametrize(\"asarray\", [da.asarray, da.asanyarray])\ndef test_asarray(asarray):\n assert_eq(asarray([1, 2, 3]), np.asarray([1, 2, 3]))\n\n x = asarray([1, 2, 3])\n assert asarray(x) is x\n\n y = [x[0], 2, x[2]]\n assert_eq(asarray(y), x)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_asarray_dask_dataframe_test_asarray_dask_dataframe.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_asarray_dask_dataframe_test_asarray_dask_dataframe.None_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 2502, "end_line": 2516, "span_ids": ["test_asarray_dask_dataframe"], "tokens": 134}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"asarray\", [da.asarray, da.asanyarray])\ndef test_asarray_dask_dataframe(asarray):\n # https://github.com/dask/dask/issues/3885\n dd = pytest.importorskip(\"dask.dataframe\")\n import pandas as pd\n\n s = dd.from_pandas(pd.Series([1, 2, 3, 4]), 2)\n result = asarray(s)\n expected = s.values\n assert_eq(result, expected)\n\n df = s.to_frame(name=\"s\")\n result = asarray(df)\n expected = df.values\n assert_eq(result, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_asarray_h5py_test_asarray_h5py.with_tmpfile_hdf5_as_.with_h5py_File_fn_mode_.assert_not_any_isinstance": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_asarray_h5py_test_asarray_h5py.with_tmpfile_hdf5_as_.with_h5py_File_fn_mode_.assert_not_any_isinstance", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 2519, "end_line": 2528, "span_ids": ["test_asarray_h5py"], "tokens": 120}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"asarray\", [da.asarray, da.asanyarray])\ndef test_asarray_h5py(asarray):\n h5py = pytest.importorskip(\"h5py\")\n\n with tmpfile(\".hdf5\") as fn:\n with h5py.File(fn, mode=\"a\") as f:\n d = f.create_dataset(\"/x\", shape=(2, 2), dtype=float)\n x = asarray(d)\n assert d in x.dask.values()\n assert not any(isinstance(v, np.ndarray) for v in x.dask.values())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_asarray_chunks_test_asanyarray.assert_da_asanyarray_dx_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_asarray_chunks_test_asanyarray.assert_da_asanyarray_dx_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 2531, "end_line": 2545, "span_ids": ["test_asanyarray", "test_asarray_chunks"], "tokens": 144}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_asarray_chunks():\n with dask.config.set({\"array.chunk-size\": \"100 B\"}):\n x = np.ones(1000)\n d = da.asarray(x)\n assert d.npartitions > 1\n\n\n@pytest.mark.filterwarnings(\"ignore:the matrix subclass\")\ndef test_asanyarray():\n x = np.matrix([1, 2, 3])\n dx = da.asanyarray(x)\n assert dx.numblocks == (1, 1)\n chunks = compute_as_if_collection(Array, dx.dask, dx.__dask_keys__())\n assert isinstance(chunks[0][0], np.matrix)\n assert da.asanyarray(dx) is dx", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_asanyarray_dataframe_test_asanyarray_dataframe.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_asanyarray_dataframe_test_asanyarray_dataframe.None_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 2548, "end_line": 2565, "span_ids": ["test_asanyarray_dataframe"], "tokens": 134}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_asanyarray_dataframe():\n pd = pytest.importorskip(\"pandas\")\n dd = pytest.importorskip(\"dask.dataframe\")\n\n df = pd.DataFrame({\"x\": [1, 2, 3]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n x = np.asanyarray(df)\n dx = da.asanyarray(ddf)\n assert isinstance(dx, da.Array)\n\n assert_eq(x, dx)\n\n x = np.asanyarray(df.x)\n dx = da.asanyarray(ddf.x)\n assert isinstance(dx, da.Array)\n\n assert_eq(x, dx)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_asanyarray_datetime64_test_from_func.assert_same_keys_d_from_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_asanyarray_datetime64_test_from_func.assert_same_keys_d_from_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 2568, "end_line": 2583, "span_ids": ["test_asanyarray_datetime64", "test_from_func"], "tokens": 143}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_asanyarray_datetime64():\n x = np.array([\"2000-01-01\"], dtype=\"datetime64\")\n dx = da.asanyarray(x)\n assert isinstance(dx, da.Array)\n assert_eq(x, dx)\n\n\ndef test_from_func():\n x = np.arange(10)\n f = lambda n: n * x\n d = from_func(f, (10,), x.dtype, kwargs={\"n\": 2})\n\n assert d.shape == x.shape\n assert d.dtype == x.dtype\n assert_eq(d.compute(), 2 * x)\n assert same_keys(d, from_func(f, (10,), x.dtype, kwargs={\"n\": 2}))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_concatenate3_2_test_concatenate3_2.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_concatenate3_2_test_concatenate3_2.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 2586, "end_line": 2632, "span_ids": ["test_concatenate3_2"], "tokens": 723}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_concatenate3_2():\n x = np.array([1, 2])\n assert_eq(concatenate3([x, x, x]), np.array([1, 2, 1, 2, 1, 2]))\n\n x = np.array([[1, 2]])\n assert (\n concatenate3([[x, x, x], [x, x, x]])\n == np.array([[1, 2, 1, 2, 1, 2], [1, 2, 1, 2, 1, 2]])\n ).all()\n\n assert (\n concatenate3([[x, x], [x, x], [x, x]])\n == np.array([[1, 2, 1, 2], [1, 2, 1, 2], [1, 2, 1, 2]])\n ).all()\n\n x = np.arange(12).reshape((2, 2, 3))\n assert_eq(\n concatenate3([[[x, x, x], [x, x, x]], [[x, x, x], [x, x, x]]]),\n np.array(\n [\n [\n [0, 1, 2, 0, 1, 2, 0, 1, 2],\n [3, 4, 5, 3, 4, 5, 3, 4, 5],\n [0, 1, 2, 0, 1, 2, 0, 1, 2],\n [3, 4, 5, 3, 4, 5, 3, 4, 5],\n ],\n [\n [6, 7, 8, 6, 7, 8, 6, 7, 8],\n [9, 10, 11, 9, 10, 11, 9, 10, 11],\n [6, 7, 8, 6, 7, 8, 6, 7, 8],\n [9, 10, 11, 9, 10, 11, 9, 10, 11],\n ],\n [\n [0, 1, 2, 0, 1, 2, 0, 1, 2],\n [3, 4, 5, 3, 4, 5, 3, 4, 5],\n [0, 1, 2, 0, 1, 2, 0, 1, 2],\n [3, 4, 5, 3, 4, 5, 3, 4, 5],\n ],\n [\n [6, 7, 8, 6, 7, 8, 6, 7, 8],\n [9, 10, 11, 9, 10, 11, 9, 10, 11],\n [6, 7, 8, 6, 7, 8, 6, 7, 8],\n [9, 10, 11, 9, 10, 11, 9, 10, 11],\n ],\n ]\n ),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_map_blocks3_test_from_array_with_missing_chunks.assert_d_chunks_da_fro": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_map_blocks3_test_from_array_with_missing_chunks.assert_d_chunks_da_fro", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 2635, "end_line": 2660, "span_ids": ["test_from_array_with_missing_chunks", "test_map_blocks3"], "tokens": 264}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_map_blocks3():\n x = np.arange(10)\n y = np.arange(10) * 2\n\n d = da.from_array(x, chunks=5)\n e = da.from_array(y, chunks=5)\n\n assert_eq(\n da.core.map_blocks(lambda a, b: a + 2 * b, d, e, dtype=d.dtype), x + 2 * y\n )\n\n z = np.arange(100).reshape((10, 10))\n f = da.from_array(z, chunks=5)\n\n func = lambda a, b: a + 2 * b\n res = da.core.map_blocks(func, d, f, dtype=d.dtype)\n assert_eq(res, x + 2 * z)\n assert same_keys(da.core.map_blocks(func, d, f, dtype=d.dtype), res)\n\n assert_eq(da.map_blocks(func, f, d, dtype=d.dtype), z + 2 * x)\n\n\ndef test_from_array_with_missing_chunks():\n x = np.random.randn(2, 4, 3)\n d = da.from_array(x, chunks=(None, 2, None))\n assert d.chunks == da.from_array(x, chunks=(2, 2, 3)).chunks", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_normalize_chunks_test_normalize_chunks.None_1.normalize_chunks_5_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_normalize_chunks_test_normalize_chunks.None_1.normalize_chunks_5_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 2663, "end_line": 2684, "span_ids": ["test_normalize_chunks"], "tokens": 405}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_normalize_chunks():\n assert normalize_chunks(3, (4, 6)) == ((3, 1), (3, 3))\n assert normalize_chunks(((3, 3), (8,)), (6, 8)) == ((3, 3), (8,))\n assert normalize_chunks((4, 5), (9,)) == ((4, 5),)\n assert normalize_chunks((4, 5), (9, 9)) == ((4, 4, 1), (5, 4))\n assert normalize_chunks(-1, (5, 5)) == ((5,), (5,))\n assert normalize_chunks((3, -1), (5, 5)) == ((3, 2), (5,))\n assert normalize_chunks((3, None), (5, 5)) == ((3, 2), (5,))\n assert normalize_chunks({0: 3}, (5, 5)) == ((3, 2), (5,))\n assert normalize_chunks([[2, 2], [3, 3]]) == ((2, 2), (3, 3))\n assert normalize_chunks(10, (30, 5)) == ((10, 10, 10), (5,))\n assert normalize_chunks((), (0, 0)) == ((0,), (0,))\n assert normalize_chunks(-1, (0, 3)) == ((0,), (3,))\n assert normalize_chunks(\"auto\", shape=(20,), limit=5, dtype=\"uint8\") == (\n (5, 5, 5, 5),\n )\n assert normalize_chunks((\"auto\", None), (5, 5), dtype=int) == ((5,), (5,))\n\n with pytest.raises(ValueError):\n normalize_chunks(((10,),), (11,))\n with pytest.raises(ValueError):\n normalize_chunks(((5,), (5,)), (5,))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_align_chunks_to_previous_chunks_test_align_chunks_to_previous_chunks.None_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_align_chunks_to_previous_chunks_test_align_chunks_to_previous_chunks.None_5", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 2687, "end_line": 2712, "span_ids": ["test_align_chunks_to_previous_chunks"], "tokens": 276}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_align_chunks_to_previous_chunks():\n chunks = normalize_chunks(\n \"auto\", shape=(2000,), previous_chunks=(512,), limit=\"600 B\", dtype=np.uint8\n )\n assert chunks == ((512, 512, 512, 2000 - 512 * 3),)\n\n chunks = normalize_chunks(\n \"auto\", shape=(2000,), previous_chunks=(128,), limit=\"600 B\", dtype=np.uint8\n )\n assert chunks == ((512, 512, 512, 2000 - 512 * 3),)\n\n chunks = normalize_chunks(\n \"auto\", shape=(2000,), previous_chunks=(512,), limit=\"1200 B\", dtype=np.uint8\n )\n assert chunks == ((1024, 2000 - 1024),)\n\n chunks = normalize_chunks(\n \"auto\",\n shape=(3, 10211, 10376),\n previous_chunks=(1, 512, 512),\n limit=\"1MiB\",\n dtype=np.float32,\n )\n assert chunks[0] == (1, 1, 1)\n assert all(c % 512 == 0 for c in chunks[1][:-1])\n assert all(c % 512 == 0 for c in chunks[2][:-1])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_raise_on_no_chunks_test_long_slice.assert_eq_d_8000_8200_x": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_raise_on_no_chunks_test_long_slice.assert_eq_d_8000_8200_x", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 2715, "end_line": 2747, "span_ids": ["test_raise_on_bad_kwargs", "test_raise_on_no_chunks", "test_long_slice", "test_chunks_is_immutable"], "tokens": 221}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_raise_on_no_chunks():\n x = da.ones(6, chunks=3)\n try:\n Array(x.dask, x.name, chunks=None, dtype=x.dtype, shape=None)\n assert False\n except ValueError as e:\n assert \"dask\" in str(e)\n assert \".org\" in str(e)\n\n\ndef test_chunks_is_immutable():\n x = da.ones(6, chunks=3)\n try:\n x.chunks = 2\n assert False\n except TypeError as e:\n assert \"rechunk(2)\" in str(e)\n\n\ndef test_raise_on_bad_kwargs():\n x = da.ones(5, chunks=3)\n try:\n da.minimum(x, foo=None)\n except TypeError as e:\n assert \"minimum\" in str(e)\n assert \"foo\" in str(e)\n\n\ndef test_long_slice():\n x = np.arange(10000)\n d = da.from_array(x, chunks=1)\n\n assert_eq(d[8000:8200], x[8000:8200])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_h5py_newaxis_test_ellipsis_slicing.assert_eq_da_ones_4_chun": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_h5py_newaxis_test_ellipsis_slicing.assert_eq_da_ones_4_chun", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 2750, "end_line": 2764, "span_ids": ["test_ellipsis_slicing", "test_h5py_newaxis"], "tokens": 199}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_h5py_newaxis():\n h5py = pytest.importorskip(\"h5py\")\n\n with tmpfile(\"h5\") as fn:\n with h5py.File(fn, mode=\"a\") as f:\n x = f.create_dataset(\"/x\", shape=(10, 10), dtype=\"f8\")\n d = da.from_array(x, chunks=(5, 5))\n assert d[None, :, :].compute(scheduler=\"sync\").shape == (1, 10, 10)\n assert d[:, None, :].compute(scheduler=\"sync\").shape == (10, 1, 10)\n assert d[:, :, None].compute(scheduler=\"sync\").shape == (10, 10, 1)\n assert same_keys(d[:, :, None], d[:, :, None])\n\n\ndef test_ellipsis_slicing():\n assert_eq(da.ones(4, chunks=2)[...], np.ones(4))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_point_slicing_test_point_slicing.assert_same_keys_result_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_point_slicing_test_point_slicing.assert_same_keys_result_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 2767, "end_line": 2776, "span_ids": ["test_point_slicing"], "tokens": 189}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_point_slicing():\n x = np.arange(56).reshape((7, 8))\n d = da.from_array(x, chunks=(3, 4))\n\n result = d.vindex[[1, 2, 5, 5], [3, 1, 6, 1]]\n assert_eq(result, x[[1, 2, 5, 5], [3, 1, 6, 1]])\n\n result = d.vindex[[0, 1, 6, 0], [0, 1, 0, 7]]\n assert_eq(result, x[[0, 1, 6, 0], [0, 1, 0, 7]])\n assert same_keys(result, d.vindex[[0, 1, 6, 0], [0, 1, 0, 7]])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_point_slicing_with_full_slice_test_point_slicing_with_full_slice.for_ind_in_inds_.assert_result_shape_0_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_point_slicing_with_full_slice_test_point_slicing_with_full_slice.for_ind_in_inds_.assert_result_shape_0_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 2779, "end_line": 2810, "span_ids": ["test_point_slicing_with_full_slice"], "tokens": 404}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_point_slicing_with_full_slice():\n from dask.array.core import _vindex_transpose, _get_axis\n\n x = np.arange(4 * 5 * 6 * 7).reshape((4, 5, 6, 7))\n d = da.from_array(x, chunks=(2, 3, 3, 4))\n\n inds = [\n [[1, 2, 3], None, [3, 2, 1], [5, 3, 4]],\n [[1, 2, 3], None, [4, 3, 2], None],\n [[1, 2, 3], [3, 2, 1]],\n [[1, 2, 3], [3, 2, 1], [3, 2, 1], [5, 3, 4]],\n [[], [], [], None],\n [np.array([1, 2, 3]), None, np.array([4, 3, 2]), None],\n [None, None, [1, 2, 3], [4, 3, 2]],\n [None, [0, 2, 3], None, [0, 3, 2]],\n ]\n\n for ind in inds:\n slc = [\n i if isinstance(i, (np.ndarray, list)) else slice(None, None) for i in ind\n ]\n result = d.vindex[tuple(slc)]\n\n # Rotate the expected result accordingly\n axis = _get_axis(ind)\n expected = _vindex_transpose(x[tuple(slc)], axis)\n\n assert_eq(result, expected)\n\n # Always have the first axis be the length of the points\n k = len(next(i for i in ind if isinstance(i, (np.ndarray, list))))\n assert result.shape[0] == k", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_slice_with_floats_test_slice_with_integer_types.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_slice_with_floats_test_slice_with_integer_types.None_3", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 2813, "end_line": 2832, "span_ids": ["test_slice_with_floats", "test_slice_with_integer_types"], "tokens": 196}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_slice_with_floats():\n d = da.ones((5,), chunks=(3,))\n with pytest.raises(IndexError):\n d[1.5]\n with pytest.raises(IndexError):\n d[0:1.5]\n with pytest.raises(IndexError):\n d[[1, 1.5]]\n\n\ndef test_slice_with_integer_types():\n x = np.arange(10)\n dx = da.from_array(x, chunks=5)\n inds = np.array([0, 3, 6], dtype=\"u8\")\n assert_eq(dx[inds], x[inds])\n assert_eq(dx[inds.astype(\"u4\")], x[inds.astype(\"u4\")])\n\n inds = np.array([0, 3, 6], dtype=np.int64)\n assert_eq(dx[inds], x[inds])\n assert_eq(dx[inds.astype(\"u4\")], x[inds.astype(\"u4\")])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_index_with_integer_types_test_vindex_basic.assert_eq_result_x_2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_index_with_integer_types_test_vindex_basic.assert_eq_result_x_2_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 2835, "end_line": 2857, "span_ids": ["test_vindex_basic", "test_index_with_integer_types"], "tokens": 187}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_index_with_integer_types():\n x = np.arange(10)\n dx = da.from_array(x, chunks=5)\n inds = int(3)\n assert_eq(dx[inds], x[inds])\n\n inds = np.int64(3)\n assert_eq(dx[inds], x[inds])\n\n\ndef test_vindex_basic():\n x = np.arange(56).reshape((7, 8))\n d = da.from_array(x, chunks=(3, 4))\n\n # cases where basic and advanced indexing coincide\n result = d.vindex[0]\n assert_eq(result, x[0])\n\n result = d.vindex[0, 1]\n assert_eq(result, x[0, 1])\n\n result = d.vindex[[0, 1], ::-1] # slices last\n assert_eq(result, x[:2, ::-1])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_vindex_nd_test_vindex_nd.assert_eq_result_x_T_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_vindex_nd_test_vindex_nd.assert_eq_result_x_T_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 2860, "end_line": 2871, "span_ids": ["test_vindex_nd"], "tokens": 160}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_vindex_nd():\n x = np.arange(56).reshape((7, 8))\n d = da.from_array(x, chunks=(3, 4))\n\n result = d.vindex[[[0, 1], [6, 0]], [[0, 1], [0, 7]]]\n assert_eq(result, x[[[0, 1], [6, 0]], [[0, 1], [0, 7]]])\n\n result = d.vindex[np.arange(7)[:, None], np.arange(8)[None, :]]\n assert_eq(result, x)\n\n result = d.vindex[np.arange(7)[None, :], np.arange(8)[:, None]]\n assert_eq(result, x.T)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_vindex_negative_test_vindex_errors.None_4": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_vindex_negative_test_vindex_errors.None_4", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 2874, "end_line": 2888, "span_ids": ["test_vindex_negative", "test_vindex_errors"], "tokens": 180}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_vindex_negative():\n x = np.arange(10)\n d = da.from_array(x, chunks=(5, 5))\n\n result = d.vindex[np.array([0, -1])]\n assert_eq(result, x[np.array([0, -1])])\n\n\ndef test_vindex_errors():\n d = da.ones((5, 5, 5), chunks=(3, 3, 3))\n pytest.raises(IndexError, lambda: d.vindex[np.newaxis])\n pytest.raises(IndexError, lambda: d.vindex[[1, 2], [1, 2, 3]])\n pytest.raises(IndexError, lambda: d.vindex[[True] * 5])\n pytest.raises(IndexError, lambda: d.vindex[[0], [5]])\n pytest.raises(IndexError, lambda: d.vindex[[0], [-6]])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_vindex_merge_test_vindex_merge.assert_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_vindex_merge_test_vindex_merge.assert_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 2891, "end_line": 2900, "span_ids": ["test_vindex_merge"], "tokens": 114}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_vindex_merge():\n from dask.array.core import _vindex_merge\n\n locations = [1], [2, 0]\n values = [np.array([[1, 2, 3]]), np.array([[10, 20, 30], [40, 50, 60]])]\n\n assert (\n _vindex_merge(locations, values)\n == np.array([[40, 50, 60], [1, 2, 3], [10, 20, 30]])\n ).all()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_vindex_identity_test_vindex_identity.None_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_vindex_identity_test_vindex_identity.None_5", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 2903, "end_line": 2919, "span_ids": ["test_vindex_identity"], "tokens": 201}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_vindex_identity():\n rng = da.random.RandomState(42)\n a, b = 10, 20\n\n x = rng.random(a, chunks=a // 2)\n assert x is x.vindex[:]\n assert x is x.vindex[:a]\n pytest.raises(IndexError, lambda: x.vindex[: a - 1])\n pytest.raises(IndexError, lambda: x.vindex[1:])\n pytest.raises(IndexError, lambda: x.vindex[0:a:2])\n\n x = rng.random((a, b), chunks=(a // 2, b // 2))\n assert x is x.vindex[:, :]\n assert x is x.vindex[:a, :b]\n pytest.raises(IndexError, lambda: x.vindex[:, : b - 1])\n pytest.raises(IndexError, lambda: x.vindex[:, 1:])\n pytest.raises(IndexError, lambda: x.vindex[:, 0:b:2])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_empty_array_test_memmap.with_tmpfile_npy_as_fn.with_tmpfile_npy_as_fn.try_.finally_.target__mmap_close_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_empty_array_test_memmap.with_tmpfile_npy_as_fn.with_tmpfile_npy_as_fn.try_.finally_.target__mmap_close_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 2922, "end_line": 2941, "span_ids": ["test_memmap", "test_empty_array"], "tokens": 137}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_empty_array():\n assert_eq(np.arange(0), da.arange(0, chunks=5))\n\n\ndef test_memmap():\n with tmpfile(\"npy\") as fn_1:\n with tmpfile(\"npy\") as fn_2:\n try:\n x = da.arange(100, chunks=15)\n target = np.memmap(fn_1, shape=x.shape, mode=\"w+\", dtype=x.dtype)\n\n x.store(target)\n\n assert_eq(target, x)\n\n np.save(fn_2, target)\n\n assert_eq(np.load(fn_2, mmap_mode=\"r\"), x)\n finally:\n target._mmap.close()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_to_npy_stack_test_to_npy_stack.with_tmpdir_as_dirname_.assert_eq_d_e_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_to_npy_stack_test_to_npy_stack.with_tmpdir_as_dirname_.assert_eq_d_e_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 2944, "end_line": 2955, "span_ids": ["test_to_npy_stack"], "tokens": 141}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_npy_stack():\n x = np.arange(5 * 10 * 10).reshape((5, 10, 10))\n d = da.from_array(x, chunks=(2, 4, 4))\n\n with tmpdir() as dirname:\n stackdir = os.path.join(dirname, \"test\")\n da.to_npy_stack(stackdir, d, axis=0)\n assert os.path.exists(os.path.join(stackdir, \"0.npy\"))\n assert (np.load(os.path.join(stackdir, \"1.npy\")) == x[2:4]).all()\n\n e = da.from_npy_stack(stackdir)\n assert_eq(d, e)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_view_test_view.None_1.d_view_i4_order_asdf_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_view_test_view.None_1.d_view_i4_order_asdf_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 2958, "end_line": 2977, "span_ids": ["test_view"], "tokens": 182}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_view():\n x = np.arange(56).reshape((7, 8))\n d = da.from_array(x, chunks=(2, 3))\n\n assert_eq(x.view(), d.view())\n assert_eq(x.view(\"i4\"), d.view(\"i4\"))\n assert_eq(x.view(\"i2\"), d.view(\"i2\"))\n assert all(isinstance(s, int) for s in d.shape)\n\n x = np.arange(8, dtype=\"i1\")\n d = da.from_array(x, chunks=(4,))\n assert_eq(x.view(\"i4\"), d.view(\"i4\"))\n\n with pytest.raises(ValueError):\n x = np.arange(8, dtype=\"i1\")\n d = da.from_array(x, chunks=(3,))\n d.view(\"i4\")\n\n with pytest.raises(ValueError):\n d.view(\"i4\", order=\"asdf\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_view_fortran_test_h5py_tokenize.with_tmpfile_hdf5_as_f.with_tmpfile_hdf5_as_f.assert_tokenize_x1_to": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_view_fortran_test_h5py_tokenize.with_tmpfile_hdf5_as_f.with_tmpfile_hdf5_as_f.assert_tokenize_x1_to", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 2980, "end_line": 3000, "span_ids": ["test_h5py_tokenize", "test_view_fortran"], "tokens": 217}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_view_fortran():\n x = np.asfortranarray(np.arange(64).reshape((8, 8)))\n d = da.from_array(x, chunks=(2, 3))\n assert_eq(x.T.view(\"i4\").T, d.view(\"i4\", order=\"F\"))\n assert_eq(x.T.view(\"i2\").T, d.view(\"i2\", order=\"F\"))\n\n\ndef test_h5py_tokenize():\n h5py = pytest.importorskip(\"h5py\")\n with tmpfile(\"hdf5\") as fn1:\n with tmpfile(\"hdf5\") as fn2:\n f = h5py.File(fn1, mode=\"a\")\n g = h5py.File(fn2, mode=\"a\")\n\n f[\"x\"] = np.arange(10).astype(float)\n g[\"x\"] = np.ones(10).astype(float)\n\n x1 = f[\"x\"]\n x2 = g[\"x\"]\n\n assert tokenize(x1) != tokenize(x2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_map_blocks_with_changed_dimension_test_map_blocks_with_changed_dimension.None_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_map_blocks_with_changed_dimension_test_map_blocks_with_changed_dimension.None_5", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 3003, "end_line": 3064, "span_ids": ["test_map_blocks_with_changed_dimension"], "tokens": 663}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_map_blocks_with_changed_dimension():\n x = np.arange(56).reshape((7, 8))\n d = da.from_array(x, chunks=(7, 4))\n\n e = d.map_blocks(lambda b: b.sum(axis=0), chunks=(4,), drop_axis=0, dtype=d.dtype)\n assert e.chunks == ((4, 4),)\n assert_eq(e, x.sum(axis=0))\n\n # Provided chunks have wrong shape\n with pytest.raises(ValueError):\n d.map_blocks(lambda b: b.sum(axis=0), chunks=(), drop_axis=0)\n\n with pytest.raises(ValueError):\n d.map_blocks(lambda b: b.sum(axis=0), chunks=((4, 4, 4),), drop_axis=0)\n\n with pytest.raises(ValueError):\n d.map_blocks(lambda b: b.sum(axis=1), chunks=((3, 4),), drop_axis=1)\n\n d = da.from_array(x, chunks=(4, 8))\n e = d.map_blocks(lambda b: b.sum(axis=1), drop_axis=1, dtype=d.dtype)\n assert e.chunks == ((4, 3),)\n assert_eq(e, x.sum(axis=1))\n\n x = np.arange(64).reshape((8, 8))\n d = da.from_array(x, chunks=(4, 4))\n e = d.map_blocks(\n lambda b: b[None, :, :, None],\n chunks=(1, 4, 4, 1),\n new_axis=[0, 3],\n dtype=d.dtype,\n )\n assert e.chunks == ((1,), (4, 4), (4, 4), (1,))\n assert_eq(e, x[None, :, :, None])\n\n e = d.map_blocks(lambda b: b[None, :, :, None], new_axis=[0, 3], dtype=d.dtype)\n assert e.chunks == ((1,), (4, 4), (4, 4), (1,))\n assert_eq(e, x[None, :, :, None])\n\n # Adding axis with a gap\n with pytest.raises(ValueError):\n d.map_blocks(lambda b: b, new_axis=(3, 4))\n\n # Both new_axis and drop_axis\n d = da.from_array(x, chunks=(8, 4))\n e = d.map_blocks(\n lambda b: b.sum(axis=0)[:, None, None],\n drop_axis=0,\n new_axis=(1, 2),\n dtype=d.dtype,\n )\n assert e.chunks == ((4, 4), (1,), (1,))\n assert_eq(e, x.sum(axis=0)[:, None, None])\n\n d = da.from_array(x, chunks=(4, 8))\n e = d.map_blocks(\n lambda b: b.sum(axis=1)[:, None, None],\n drop_axis=1,\n new_axis=(1, 2),\n dtype=d.dtype,\n )\n assert e.chunks == ((4, 4), (1,), (1,))\n assert_eq(e, x.sum(axis=1)[:, None, None])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_map_blocks_with_changed_dimension_and_broadcast_chunks_test_map_blocks_with_changed_dimension_and_broadcast_chunks.assert_eq_result_expecte": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_map_blocks_with_changed_dimension_and_broadcast_chunks_test_map_blocks_with_changed_dimension_and_broadcast_chunks.assert_eq_result_expecte", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 3067, "end_line": 3073, "span_ids": ["test_map_blocks_with_changed_dimension_and_broadcast_chunks"], "tokens": 129}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_map_blocks_with_changed_dimension_and_broadcast_chunks():\n # https://github.com/dask/dask/issues/4299\n a = da.from_array([1, 2, 3], 3)\n b = da.from_array(np.array([0, 1, 2, 0, 1, 2]), chunks=3)\n result = da.map_blocks(operator.add, a, b, chunks=b.chunks)\n expected = da.from_array(np.array([1, 3, 5, 1, 3, 5]), chunks=3)\n assert_eq(result, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_broadcast_chunks_test_broadcast_chunks.None_1.broadcast_chunks_a_b_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_broadcast_chunks_test_broadcast_chunks.None_1.broadcast_chunks_a_b_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 3076, "end_line": 3114, "span_ids": ["test_broadcast_chunks"], "tokens": 440}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_broadcast_chunks():\n assert broadcast_chunks() == ()\n\n assert broadcast_chunks(((2, 3),)) == ((2, 3),)\n\n assert broadcast_chunks(((5, 5),), ((5, 5),)) == ((5, 5),)\n\n a = ((10, 10, 10), (5, 5))\n b = ((5, 5),)\n assert broadcast_chunks(a, b) == ((10, 10, 10), (5, 5))\n assert broadcast_chunks(b, a) == ((10, 10, 10), (5, 5))\n\n a = ((10, 10, 10), (5, 5))\n b = ((1,), (5, 5))\n assert broadcast_chunks(a, b) == ((10, 10, 10), (5, 5))\n\n a = ((10, 10, 10), (5, 5))\n b = ((3, 3), (5, 5))\n with pytest.raises(ValueError):\n broadcast_chunks(a, b)\n\n a = ((1,), (5, 5))\n b = ((1,), (5, 5))\n assert broadcast_chunks(a, b) == a\n\n a = ((1,), (np.nan, np.nan, np.nan))\n b = ((3, 3), (1,))\n r = broadcast_chunks(a, b)\n assert r[0] == b[0] and np.allclose(r[1], a[1], equal_nan=True)\n\n a = ((3, 3), (1,))\n b = ((1,), (np.nan, np.nan, np.nan))\n r = broadcast_chunks(a, b)\n assert r[0] == a[0] and np.allclose(r[1], b[1], equal_nan=True)\n\n a = ((3, 3), (5, 5))\n b = ((1,), (np.nan, np.nan, np.nan))\n with pytest.raises(ValueError):\n broadcast_chunks(a, b)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_chunks_error_test_dont_fuse_outputs.assert_eq_a_np_array_1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_chunks_error_test_dont_fuse_outputs.assert_eq_a_np_array_1_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 3117, "end_line": 3132, "span_ids": ["test_chunks_error", "test_dont_fuse_outputs", "test_array_compute_forward_kwargs"], "tokens": 157}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_chunks_error():\n x = np.ones((10, 10))\n with pytest.raises(ValueError):\n da.from_array(x, chunks=(5,))\n\n\ndef test_array_compute_forward_kwargs():\n x = da.arange(10, chunks=2).sum()\n x.compute(bogus_keyword=10)\n\n\ndef test_dont_fuse_outputs():\n dsk = {(\"x\", 0): np.array([1, 2]), (\"x\", 1): (inc, (\"x\", 0))}\n\n a = da.Array(dsk, \"x\", chunks=(2,), shape=(4,), dtype=np.array([1]).dtype)\n assert_eq(a, np.array([1, 2, 2, 3], dtype=a.dtype))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_dont_dealias_outputs_test_dont_dealias_outputs.assert_eq_a_np_ones_4_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_dont_dealias_outputs_test_dont_dealias_outputs.assert_eq_a_np_ones_4_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 3135, "end_line": 3144, "span_ids": ["test_dont_dealias_outputs"], "tokens": 134}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_dont_dealias_outputs():\n dsk = {\n (\"x\", 0, 0): np.ones((2, 2)),\n (\"x\", 0, 1): np.ones((2, 2)),\n (\"x\", 1, 0): np.ones((2, 2)),\n (\"x\", 1, 1): (\"x\", 0, 0),\n }\n\n a = da.Array(dsk, \"x\", chunks=(2, 2), shape=(4, 4), dtype=np.ones(1).dtype)\n assert_eq(a, np.ones((4, 4)))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_timedelta_op_test_to_delayed.assert_a_compute_s": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_timedelta_op_test_to_delayed.assert_a_compute_s", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 3147, "end_line": 3164, "span_ids": ["test_timedelta_op", "test_to_delayed"], "tokens": 160}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_timedelta_op():\n x = np.array([np.timedelta64(10, \"h\")])\n y = np.timedelta64(1, \"h\")\n a = da.from_array(x, chunks=(1,)) / y\n assert a.compute() == x / y\n\n\ndef test_to_delayed():\n x = da.random.random((4, 4), chunks=(2, 2))\n y = x + 10\n\n [[a, b], [c, d]] = y.to_delayed()\n assert_eq(a.compute(), y[:2, :2])\n\n s = 2\n x = da.from_array(np.array(s), chunks=0)\n a = x.to_delayed()[tuple()]\n assert a.compute() == s", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_to_delayed_optimize_graph_test_to_delayed_optimize_graph.assert_d_compute_d2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_to_delayed_optimize_graph_test_to_delayed_optimize_graph.assert_d_compute_d2", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 3167, "end_line": 3179, "span_ids": ["test_to_delayed_optimize_graph"], "tokens": 149}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_delayed_optimize_graph():\n x = da.ones((4, 4), chunks=(2, 2))\n y = x[1:][1:][1:][:, 1:][:, 1:][:, 1:]\n\n # optimizations\n d = y.to_delayed().flatten().tolist()[0]\n assert len([k for k in d.dask if k[0].startswith(\"getitem\")]) == 1\n\n # no optimizations\n d2 = y.to_delayed(optimize_graph=False).flatten().tolist()[0]\n assert dict(d2.dask) == dict(y.dask)\n\n assert (d.compute() == d2.compute()).all()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_cumulative_test_cumulative.None_3.x_cumsum_axis_4_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_cumulative_test_cumulative.None_3.x_cumsum_axis_4_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 3182, "end_line": 3239, "span_ids": ["test_cumulative"], "tokens": 829}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_cumulative():\n x = da.arange(20, chunks=5)\n assert_eq(x.cumsum(axis=0), np.arange(20).cumsum())\n assert_eq(x.cumprod(axis=0), np.arange(20).cumprod())\n\n assert_eq(da.nancumsum(x, axis=0), nancumsum(np.arange(20)))\n assert_eq(da.nancumprod(x, axis=0), nancumprod(np.arange(20)))\n\n a = np.random.random(20)\n rs = np.random.RandomState(0)\n a[rs.rand(*a.shape) < 0.5] = np.nan\n x = da.from_array(a, chunks=5)\n assert_eq(da.nancumsum(x, axis=0), nancumsum(a))\n assert_eq(da.nancumprod(x, axis=0), nancumprod(a))\n\n a = np.random.random((20, 24))\n x = da.from_array(a, chunks=(6, 5))\n assert_eq(x.cumsum(axis=0), a.cumsum(axis=0))\n assert_eq(x.cumsum(axis=1), a.cumsum(axis=1))\n assert_eq(x.cumprod(axis=0), a.cumprod(axis=0))\n assert_eq(x.cumprod(axis=1), a.cumprod(axis=1))\n\n assert_eq(da.nancumsum(x, axis=0), nancumsum(a, axis=0))\n assert_eq(da.nancumsum(x, axis=1), nancumsum(a, axis=1))\n assert_eq(da.nancumprod(x, axis=0), nancumprod(a, axis=0))\n assert_eq(da.nancumprod(x, axis=1), nancumprod(a, axis=1))\n\n a = np.random.random((20, 24))\n rs = np.random.RandomState(0)\n a[rs.rand(*a.shape) < 0.5] = np.nan\n x = da.from_array(a, chunks=(6, 5))\n assert_eq(da.nancumsum(x, axis=0), nancumsum(a, axis=0))\n assert_eq(da.nancumsum(x, axis=1), nancumsum(a, axis=1))\n assert_eq(da.nancumprod(x, axis=0), nancumprod(a, axis=0))\n assert_eq(da.nancumprod(x, axis=1), nancumprod(a, axis=1))\n\n a = np.random.random((20, 24, 13))\n x = da.from_array(a, chunks=(6, 5, 4))\n for axis in [0, 1, 2, -1, -2, -3]:\n assert_eq(x.cumsum(axis=axis), a.cumsum(axis=axis))\n assert_eq(x.cumprod(axis=axis), a.cumprod(axis=axis))\n\n assert_eq(da.nancumsum(x, axis=axis), nancumsum(a, axis=axis))\n assert_eq(da.nancumprod(x, axis=axis), nancumprod(a, axis=axis))\n\n a = np.random.random((20, 24, 13))\n rs = np.random.RandomState(0)\n a[rs.rand(*a.shape) < 0.5] = np.nan\n x = da.from_array(a, chunks=(6, 5, 4))\n for axis in [0, 1, 2, -1, -2, -3]:\n assert_eq(da.nancumsum(x, axis=axis), nancumsum(a, axis=axis))\n assert_eq(da.nancumprod(x, axis=axis), nancumprod(a, axis=axis))\n\n with pytest.raises(ValueError):\n x.cumsum(axis=3)\n\n with pytest.raises(ValueError):\n x.cumsum(axis=-4)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_from_delayed_test_A_property.assert_x_A_is_x": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_from_delayed_test_A_property.assert_x_A_is_x", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 3242, "end_line": 3258, "span_ids": ["test_from_delayed_meta", "test_from_delayed", "test_A_property"], "tokens": 141}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_from_delayed():\n v = delayed(np.ones)((5, 3))\n x = from_delayed(v, shape=(5, 3), dtype=np.ones(0).dtype)\n assert isinstance(x, Array)\n assert_eq(x, np.ones((5, 3)))\n\n\ndef test_from_delayed_meta():\n v = delayed(np.ones)((5, 3))\n x = from_delayed(v, shape=(5, 3), meta=np.ones(0))\n assert isinstance(x, Array)\n assert isinstance(x._meta, np.ndarray)\n\n\ndef test_A_property():\n x = da.ones(5, chunks=(2,))\n assert x.A is x", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_copy_mutate_test_copy_mutate.assert_memo_id_x_is_y2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_copy_mutate_test_copy_mutate.assert_memo_id_x_is_y2", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 3261, "end_line": 3274, "span_ids": ["test_copy_mutate"], "tokens": 114}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_copy_mutate():\n x = da.arange(5, chunks=(2,))\n y = x.copy()\n memo = {}\n y2 = copy.deepcopy(x, memo=memo)\n x[x % 2 == 0] = -1\n\n xx = np.arange(5)\n xx[xx % 2 == 0] = -1\n assert_eq(x, xx)\n\n assert_eq(y, np.arange(5))\n assert_eq(y2, np.arange(5))\n assert memo[id(x)] is y2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_npartitions_test_from_array_raises_on_bad_chunks.None_1.da_from_array_x_chunks_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_npartitions_test_from_array_raises_on_bad_chunks.None_1.da_from_array_x_chunks_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 3277, "end_line": 3330, "span_ids": ["test_from_array_names", "test_elemwise_name", "test_map_blocks_name", "test_from_array_raises_on_bad_chunks", "test_array_picklable", "test_npartitions", "test_astype_gh1151"], "tokens": 360}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_npartitions():\n assert da.ones(5, chunks=(2,)).npartitions == 3\n assert da.ones((5, 5), chunks=(2, 3)).npartitions == 6\n\n\ndef test_astype_gh1151():\n a = np.arange(5).astype(np.int32)\n b = da.from_array(a, (1,))\n assert_eq(a.astype(np.int16), b.astype(np.int16))\n\n\ndef test_elemwise_name():\n assert (da.ones(5, chunks=2) + 1).name.startswith(\"add-\")\n\n\ndef test_map_blocks_name():\n assert da.ones(5, chunks=2).map_blocks(inc).name.startswith(\"inc-\")\n\n\ndef test_from_array_names():\n pytest.importorskip(\"distributed\")\n\n x = np.ones(10)\n d = da.from_array(x, chunks=2)\n\n names = countby(key_split, d.dask)\n assert set(names.values()) == set([5])\n\n\n@pytest.mark.parametrize(\n \"array\",\n [\n da.arange(100, chunks=25),\n da.ones((10, 10), chunks=25),\n ],\n)\ndef test_array_picklable(array):\n from pickle import loads, dumps\n\n a2 = loads(dumps(array))\n assert_eq(array, a2)\n\n\ndef test_from_array_raises_on_bad_chunks():\n x = np.ones(10)\n\n with pytest.raises(ValueError):\n da.from_array(x, chunks=(5, 5, 5))\n\n # with pytest.raises(ValueError):\n # da.from_array(x, chunks=100)\n\n with pytest.raises(ValueError):\n da.from_array(x, chunks=((5, 5, 5),))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_concatenate_axes_test_concatenate_axes.None_1._too_many_axes": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_concatenate_axes_test_concatenate_axes.None_1._too_many_axes", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 3333, "end_line": 3348, "span_ids": ["test_concatenate_axes"], "tokens": 274}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_concatenate_axes():\n x = np.ones((2, 2, 2))\n\n assert_eq(concatenate_axes([x, x], axes=[0]), np.ones((4, 2, 2)))\n assert_eq(concatenate_axes([x, x, x], axes=[0]), np.ones((6, 2, 2)))\n assert_eq(concatenate_axes([x, x], axes=[1]), np.ones((2, 4, 2)))\n assert_eq(concatenate_axes([[x, x], [x, x]], axes=[0, 1]), np.ones((4, 4, 2)))\n assert_eq(concatenate_axes([[x, x], [x, x]], axes=[0, 2]), np.ones((4, 2, 4)))\n assert_eq(concatenate_axes([[x, x, x], [x, x, x]], axes=[1, 2]), np.ones((2, 4, 6)))\n\n with pytest.raises(ValueError):\n concatenate_axes(\n [[x, x], [x, x]], axes=[0]\n ) # not all nested lists accounted for\n with pytest.raises(ValueError):\n concatenate_axes([x, x], axes=[0, 1, 2, 3]) # too many axes", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_blockwise_concatenate_test_blockwise_concatenate.assert_eq_z_np_ones_10_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_blockwise_concatenate_test_blockwise_concatenate.assert_eq_z_np_ones_10_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 3351, "end_line": 3384, "span_ids": ["test_blockwise_concatenate"], "tokens": 326}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_blockwise_concatenate():\n x = da.ones((4, 4, 4), chunks=(2, 2, 2))\n y = da.ones((4, 4), chunks=(2, 2))\n\n def f(a, b):\n assert isinstance(a, np.ndarray)\n assert isinstance(b, np.ndarray)\n\n assert a.shape == (2, 4, 4)\n assert b.shape == (4, 4)\n\n return (a + b).sum(axis=(1, 2))\n\n z = da.blockwise(f, \"i\", x, \"ijk\", y, \"jk\", concatenate=True, dtype=x.dtype)\n assert_eq(z, np.ones(4) * 32)\n\n z = da.blockwise(add, \"ij\", y, \"ij\", y, \"ij\", concatenate=True, dtype=x.dtype)\n assert_eq(z, np.ones((4, 4)) * 2)\n\n def f(a, b, c):\n assert isinstance(a, np.ndarray)\n assert isinstance(b, np.ndarray)\n assert isinstance(c, np.ndarray)\n\n assert a.shape == (4, 2, 4)\n assert b.shape == (4, 4)\n assert c.shape == (4, 2)\n\n return np.ones(5)\n\n z = da.blockwise(\n f, \"j\", x, \"ijk\", y, \"ki\", y, \"ij\", concatenate=True, dtype=x.dtype\n )\n assert_eq(z, np.ones(10), check_shape=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_common_blockdim_test_common_blockdim.None_6": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_common_blockdim_test_common_blockdim.None_6", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 3387, "end_line": 3395, "span_ids": ["test_common_blockdim"], "tokens": 214}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_common_blockdim():\n assert common_blockdim([(5,), (5,)]) == (5,)\n assert common_blockdim([(5,), (2, 3)]) == (2, 3)\n assert common_blockdim([(5, 5), (2, 3, 5)]) == (2, 3, 5)\n assert common_blockdim([(5, 5), (2, 3, 5)]) == (2, 3, 5)\n assert common_blockdim([(5, 2, 3), (2, 3, 5)]) == (2, 3, 2, 3)\n\n assert common_blockdim([(1, 2), (2, 1)]) == (1, 1, 1)\n assert common_blockdim([(1, 2, 2), (2, 1, 2), (2, 2, 1)]) == (1, 1, 1, 1, 1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_uneven_chunks_that_fit_neatly_test_elemwise_uneven_chunks.assert_z_chunks_2_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_uneven_chunks_that_fit_neatly_test_elemwise_uneven_chunks.assert_z_chunks_2_2", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 3398, "end_line": 3422, "span_ids": ["test_elemwise_uneven_chunks", "test_uneven_chunks_that_fit_neatly"], "tokens": 289}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_uneven_chunks_that_fit_neatly():\n x = da.arange(10, chunks=((5, 5),))\n y = da.ones(10, chunks=((5, 2, 3),))\n\n assert_eq(x + y, np.arange(10) + np.ones(10))\n\n z = x + y\n assert z.chunks == ((5, 2, 3),)\n\n\ndef test_elemwise_uneven_chunks():\n x = da.arange(10, chunks=((4, 6),))\n y = da.ones(10, chunks=((6, 4),))\n\n assert_eq(x + y, np.arange(10) + np.ones(10))\n\n z = x + y\n assert z.chunks == ((4, 2, 4),)\n\n x = da.random.random((10, 10), chunks=((4, 6), (5, 2, 3)))\n y = da.random.random((4, 10, 10), chunks=((2, 2), (6, 4), (2, 3, 5)))\n\n z = x + y\n assert_eq(x + y, x.compute() + y.compute())\n assert z.chunks == ((2, 2), (4, 2, 4), (2, 3, 2, 3))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_uneven_chunks_blockwise_test_uneven_chunks_blockwise.assert_eq_z_x_compute_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_uneven_chunks_blockwise_test_uneven_chunks_blockwise.assert_eq_z_x_compute_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 3425, "end_line": 3431, "span_ids": ["test_uneven_chunks_blockwise"], "tokens": 130}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_uneven_chunks_blockwise():\n x = da.random.random((10, 10), chunks=((2, 3, 2, 3), (5, 5)))\n y = da.random.random((10, 10), chunks=((4, 4, 2), (4, 2, 4)))\n z = da.blockwise(np.dot, \"ik\", x, \"ij\", y, \"jk\", dtype=x.dtype, concatenate=True)\n assert z.chunks == (x.chunks[0], y.chunks[1])\n\n assert_eq(z, x.compute().dot(y))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_warn_bad_rechunking_test_map_blocks_delayed.assert_yy_key_in_zz_dask": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_warn_bad_rechunking_test_map_blocks_delayed.assert_yy_key_in_zz_dask", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 3434, "end_line": 3463, "span_ids": ["test_warn_bad_rechunking", "test_map_blocks_delayed", "test_concatenate_stack_dont_warn"], "tokens": 227}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_warn_bad_rechunking():\n x = da.ones((20, 20), chunks=(20, 1))\n y = da.ones((20, 20), chunks=(1, 20))\n\n with pytest.warns(da.core.PerformanceWarning, match=\"factor of 20\"):\n x + y\n\n\ndef test_concatenate_stack_dont_warn():\n with warnings.catch_warnings(record=True) as record:\n da.concatenate([da.ones(2, chunks=1)] * 62)\n assert not record\n\n with warnings.catch_warnings(record=True) as record:\n da.stack([da.ones(2, chunks=1)] * 62)\n assert not record\n\n\ndef test_map_blocks_delayed():\n x = da.ones((10, 10), chunks=(5, 5))\n y = np.ones((5, 5))\n\n z = x.map_blocks(add, y, dtype=x.dtype)\n\n yy = delayed(y)\n zz = x.map_blocks(add, yy, dtype=x.dtype)\n\n assert_eq(z, zz)\n\n assert yy.key in zz.dask", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_no_chunks_test_no_chunks.assert_eq_x_x_std_kee": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_no_chunks_test_no_chunks.assert_eq_x_x_std_kee", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 3466, "end_line": 3474, "span_ids": ["test_no_chunks"], "tokens": 151}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_no_chunks():\n X = np.arange(11)\n dsk = {(\"x\", 0): np.arange(5), (\"x\", 1): np.arange(5, 11)}\n x = Array(dsk, \"x\", ((np.nan, np.nan),), np.arange(1).dtype)\n assert_eq(x + 1, X + 1)\n assert_eq(x.sum(), X.sum())\n assert_eq((x + 1).std(), (X + 1).std())\n assert_eq((x + x).std(), (X + X).std())\n assert_eq((x + x).std(keepdims=True), (X + X).std(keepdims=True))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_no_chunks_2d_test_no_chunks_2d.assert_eq_x_dot_x_T_1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_no_chunks_2d_test_no_chunks_2d.assert_eq_x_dot_x_T_1_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 3477, "end_line": 3487, "span_ids": ["test_no_chunks_2d"], "tokens": 158}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_no_chunks_2d():\n X = np.arange(24).reshape((4, 6))\n x = da.from_array(X, chunks=(2, 2))\n x._chunks = ((np.nan, np.nan), (np.nan, np.nan, np.nan))\n\n with pytest.warns(None): # zero division warning\n assert_eq(da.log(x), np.log(X))\n assert_eq(x.T, X.T)\n assert_eq(x.sum(axis=0, keepdims=True), X.sum(axis=0, keepdims=True))\n assert_eq(x.sum(axis=1, keepdims=True), X.sum(axis=1, keepdims=True))\n assert_eq(x.dot(x.T + 1), X.dot(X.T + 1))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_no_chunks_yes_chunks_test_no_chunks_yes_chunks.assert_x_dot_x_T_chunk": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_no_chunks_yes_chunks_test_no_chunks_yes_chunks.assert_x_dot_x_T_chunk", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 3490, "end_line": 3497, "span_ids": ["test_no_chunks_yes_chunks"], "tokens": 130}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_no_chunks_yes_chunks():\n X = np.arange(24).reshape((4, 6))\n x = da.from_array(X, chunks=(2, 2))\n x._chunks = ((2, 2), (np.nan, np.nan, np.nan))\n\n assert (x + 1).chunks == ((2, 2), (np.nan, np.nan, np.nan))\n assert (x.T).chunks == ((np.nan, np.nan, np.nan), (2, 2))\n assert (x.dot(x.T)).chunks == ((2, 2), (2, 2))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_raise_informative_errors_no_chunks_test_raise_informative_errors_no_chunks.for_op_in_.if_chunk_not_in_str_e_v.op_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_raise_informative_errors_no_chunks_test_raise_informative_errors_no_chunks.for_op_in_.if_chunk_not_in_str_e_v.op_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 3500, "end_line": 3519, "span_ids": ["test_raise_informative_errors_no_chunks"], "tokens": 173}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_raise_informative_errors_no_chunks():\n X = np.arange(10)\n a = da.from_array(X, chunks=(5, 5))\n a._chunks = ((np.nan, np.nan),)\n\n b = da.from_array(X, chunks=(4, 4, 2))\n b._chunks = ((np.nan, np.nan, np.nan),)\n\n for op in [\n lambda: a + b,\n lambda: a[1],\n lambda: a[::2],\n lambda: a[-5],\n lambda: a.rechunk(3),\n lambda: a.reshape(2, 5),\n ]:\n with pytest.raises(ValueError) as e:\n op()\n if \"chunk\" not in str(e.value) or \"unknown\" not in str(e.value):\n op()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_no_chunks_slicing_2d_test_no_chunks_slicing_2d.for_op_in_lambda_x_4.with_pytest_raises_ValueE.op_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_no_chunks_slicing_2d_test_no_chunks_slicing_2d.for_op_in_lambda_x_4.with_pytest_raises_ValueE.op_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 3522, "end_line": 3531, "span_ids": ["test_no_chunks_slicing_2d"], "tokens": 118}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_no_chunks_slicing_2d():\n X = np.arange(24).reshape((4, 6))\n x = da.from_array(X, chunks=(2, 2))\n x._chunks = ((2, 2), (np.nan, np.nan, np.nan))\n\n assert_eq(x[0], X[0])\n\n for op in [lambda: x[:, 4], lambda: x[:, ::2], lambda: x[0, 2:4]]:\n with pytest.raises(ValueError, match=\"chunk sizes are unknown\"):\n op()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_index_array_with_array_1d_test_index_array_with_array_1d.with_pytest_raises_ValueE.dx_dy_5_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_index_array_with_array_1d_test_index_array_with_array_1d.with_pytest_raises_ValueE.dx_dy_5_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 3534, "end_line": 3545, "span_ids": ["test_index_array_with_array_1d"], "tokens": 108}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_index_array_with_array_1d():\n x = np.arange(10)\n dx = da.from_array(x, chunks=(5,))\n dx._chunks = ((np.nan, np.nan),)\n\n assert_eq(x[x > 6], dx[dx > 6])\n assert_eq(x[x % 2 == 0], dx[dx % 2 == 0])\n\n dy = da.ones(11, chunks=(3,))\n\n with pytest.raises(ValueError):\n dx[dy > 5]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_index_array_with_array_2d_test_index_array_with_array_2d.assert_len_record_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_index_array_with_array_2d_test_index_array_with_array_2d.assert_len_record_2", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 3548, "end_line": 3564, "span_ids": ["test_index_array_with_array_2d"], "tokens": 188}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_index_array_with_array_2d():\n x = np.arange(24).reshape((4, 6))\n dx = da.from_array(x, chunks=(2, 2))\n\n assert_eq(x[x > 6], dx[dx > 6])\n assert_eq(x[x % 2 == 0], dx[dx % 2 == 0])\n\n # Test with unknown chunks\n dx._chunks = ((2, 2), (np.nan, np.nan, np.nan))\n\n with pytest.warns(UserWarning, match=\"different ordering\") as record:\n assert sorted(x[x % 2 == 0].tolist()) == sorted(\n dx[dx % 2 == 0].compute().tolist()\n )\n assert sorted(x[x > 6].tolist()) == sorted(dx[dx > 6].compute().tolist())\n\n assert len(record) == 2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_index_array_with_array_3d_2d_test_index_array_with_array_3d_2d.assert_eq_x_ind_dx_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_index_array_with_array_3d_2d_test_index_array_with_array_3d_2d.assert_eq_x_ind_dx_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 3567, "end_line": 3577, "span_ids": ["test_index_array_with_array_3d_2d"], "tokens": 145}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.xfail(reason=\"Chunking does not align well\")\ndef test_index_array_with_array_3d_2d():\n x = np.arange(4 ** 3).reshape((4, 4, 4))\n dx = da.from_array(x, chunks=(2, 2, 2))\n\n ind = np.random.random((4, 4)) > 0.5\n ind = np.arange(4 ** 2).reshape((4, 4)) % 2 == 0\n dind = da.from_array(ind, (2, 2))\n\n assert_eq(x[ind], dx[dind])\n assert_eq(x[:, ind], dx[:, dind])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_setitem_1d_test_blockwise_zero_shape_new_axes.da_blockwise_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_setitem_1d_test_blockwise_zero_shape_new_axes.da_blockwise_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 3580, "end_line": 3651, "span_ids": ["test_blockwise_zero_shape", "test_zero_sized_array_rechunk", "test_setitem_errs", "test_blockwise_zero_shape_new_axes", "test_zero_slice_dtypes", "test_setitem_1d", "test_setitem_2d"], "tokens": 502}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_setitem_1d():\n x = np.arange(10)\n dx = da.from_array(x.copy(), chunks=(5,))\n\n x[x > 6] = -1\n x[x % 2 == 0] = -2\n\n dx[dx > 6] = -1\n dx[dx % 2 == 0] = -2\n\n assert_eq(x, dx)\n\n\ndef test_setitem_2d():\n x = np.arange(24).reshape((4, 6))\n dx = da.from_array(x.copy(), chunks=(2, 2))\n\n x[x > 6] = -1\n x[x % 2 == 0] = -2\n\n dx[dx > 6] = -1\n dx[dx % 2 == 0] = -2\n\n assert_eq(x, dx)\n\n\ndef test_setitem_errs():\n x = da.ones((4, 4), chunks=(2, 2))\n\n with pytest.raises(ValueError):\n x[x > 1] = x\n\n\ndef test_zero_slice_dtypes():\n x = da.arange(5, chunks=1)\n y = x[[]]\n assert y.dtype == x.dtype\n assert y.shape == (0,)\n assert_eq(x[[]], np.arange(5)[[]])\n\n\ndef test_zero_sized_array_rechunk():\n x = da.arange(5, chunks=1)[:0]\n y = da.blockwise(identity, \"i\", x, \"i\", dtype=x.dtype)\n assert_eq(x, y)\n\n\ndef test_blockwise_zero_shape():\n da.blockwise(\n lambda x: x,\n \"i\",\n da.arange(10, chunks=10),\n \"i\",\n da.from_array(np.ones((0, 2)), ((0,), 2)),\n \"ab\",\n da.from_array(np.ones((0,)), ((0,),)),\n \"a\",\n dtype=\"float64\",\n )\n\n\ndef test_blockwise_zero_shape_new_axes():\n da.blockwise(\n lambda x: np.ones(42),\n \"i\",\n da.from_array(np.ones((0, 2)), ((0,), 2)),\n \"ab\",\n da.from_array(np.ones((0,)), ((0,),)),\n \"a\",\n dtype=\"float64\",\n new_axes={\"i\": 42},\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_broadcast_against_zero_shape_test_broadcast_against_zero_shape.None_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_broadcast_against_zero_shape_test_broadcast_against_zero_shape.None_5", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 3654, "end_line": 3660, "span_ids": ["test_broadcast_against_zero_shape"], "tokens": 242}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_broadcast_against_zero_shape():\n assert_eq(da.arange(1, chunks=1)[:0] + 0, np.arange(1)[:0] + 0)\n assert_eq(da.arange(1, chunks=1)[:0] + 0.1, np.arange(1)[:0] + 0.1)\n assert_eq(da.ones((5, 5), chunks=(2, 3))[:0] + 0, np.ones((5, 5))[:0] + 0)\n assert_eq(da.ones((5, 5), chunks=(2, 3))[:0] + 0.1, np.ones((5, 5))[:0] + 0.1)\n assert_eq(da.ones((5, 5), chunks=(2, 3))[:, :0] + 0, np.ones((5, 5))[:, :0] + 0)\n assert_eq(da.ones((5, 5), chunks=(2, 3))[:, :0] + 0.1, np.ones((5, 5))[:, :0] + 0.1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_from_array_name_test_from_array_name.assert_dx2_name_dx3_na": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_from_array_name_test_from_array_name.assert_dx2_name_dx3_na", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 3663, "end_line": 3677, "span_ids": ["test_from_array_name"], "tokens": 168}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_from_array_name():\n x = np.array([1, 2, 3, 4, 5])\n chunks = x.shape\n # Default is tokenize the array\n dx = da.from_array(x, chunks=chunks)\n hashed_name = dx.name\n assert da.from_array(x, chunks=chunks).name == hashed_name\n # Specify name directly\n assert da.from_array(x, chunks=chunks, name=\"x\").name == \"x\"\n # False gives a random name\n dx2 = da.from_array(x, chunks=chunks, name=False)\n dx3 = da.from_array(x, chunks=chunks, name=False)\n assert dx2.name != hashed_name\n assert dx3.name != hashed_name\n assert dx2.name != dx3.name", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_concatenate_errs_test_concatenate_errs.None_1.da_concatenate_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_concatenate_errs_test_concatenate_errs.None_1.da_concatenate_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 3680, "end_line": 3689, "span_ids": ["test_concatenate_errs"], "tokens": 112}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_concatenate_errs():\n with pytest.raises(ValueError, match=r\"Shapes.*\\(2, 1\\)\"):\n da.concatenate(\n [da.zeros((2, 1), chunks=(2, 1)), da.zeros((2, 3), chunks=(2, 3))]\n )\n\n with pytest.raises(ValueError):\n da.concatenate(\n [da.zeros((1, 2), chunks=(1, 2)), da.zeros((3, 2), chunks=(3, 2))], axis=1\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_stack_errs_test_blockwise_with_numpy_arrays.assert_any_x_is_v_for_v_i": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_stack_errs_test_blockwise_with_numpy_arrays.assert_any_x_is_v_for_v_i", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 3692, "end_line": 3710, "span_ids": ["test_stack_errs", "test_blockwise_with_numpy_arrays"], "tokens": 159}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_stack_errs():\n with pytest.raises(ValueError) as e:\n da.stack([da.zeros((2,), chunks=2)] * 10 + [da.zeros((3,), chunks=3)] * 10)\n\n assert (\n str(e.value)\n == \"Stacked arrays must have the same shape. The first array had shape (2,), while array 11 has shape (3,).\"\n )\n assert len(str(e.value)) < 105\n\n\ndef test_blockwise_with_numpy_arrays():\n x = np.ones(10)\n y = da.ones(10, chunks=(5,))\n\n assert_eq(x + y, x + x)\n\n s = da.sum(x)\n assert any(x is v for v in s.dask.values())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_elemwise_with_lists_test_elemwise_with_lists.assert_eq_x3_d3_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_elemwise_with_lists_test_elemwise_with_lists.assert_eq_x3_d3_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 3713, "end_line": 3727, "span_ids": ["test_elemwise_with_lists"], "tokens": 176}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"chunks\", (100, 6))\n@pytest.mark.parametrize(\"other\", [[0, 0, 1], [2, 1, 3], (0, 0, 1)])\ndef test_elemwise_with_lists(chunks, other):\n x = np.arange(12).reshape((4, 3))\n d = da.arange(12, chunks=chunks).reshape((4, 3))\n\n x2 = np.vstack([x[:, 0], x[:, 1], x[:, 2]]).T\n d2 = da.vstack([d[:, 0], d[:, 1], d[:, 2]]).T\n\n assert_eq(x2, d2)\n\n x3 = x2 * other\n d3 = d2 * other\n\n assert_eq(x3, d3)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_constructor_plugin_test_constructor_plugin.assert_len_L_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_constructor_plugin_test_constructor_plugin.assert_len_L_2", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 3730, "end_line": 3744, "span_ids": ["test_constructor_plugin"], "tokens": 114}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_constructor_plugin():\n L = []\n L2 = []\n with dask.config.set(array_plugins=[L.append, L2.append]):\n x = da.ones(10, chunks=5)\n y = x + 1\n\n assert L == L2 == [x, y]\n\n with dask.config.set(array_plugins=[lambda x: x.compute()]):\n x = da.ones(10, chunks=5)\n y = x + 1\n\n assert isinstance(y, np.ndarray)\n assert len(L) == 2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_no_warnings_on_metadata_test_meta.assert_a_nbytes_1000": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_no_warnings_on_metadata_test_meta.assert_a_nbytes_1000", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 3747, "end_line": 3776, "span_ids": ["test_no_warnings_on_metadata", "test_delayed_array_key_hygeine", "test_meta", "test_empty_chunks_in_array_len"], "tokens": 219}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_no_warnings_on_metadata():\n x = da.ones(5, chunks=3)\n with warnings.catch_warnings(record=True) as record:\n da.arccos(x)\n\n assert not record\n\n\ndef test_delayed_array_key_hygeine():\n a = da.zeros((1,), chunks=(1,))\n d = delayed(identity)(a)\n b = da.from_delayed(d, shape=a.shape, dtype=a.dtype)\n assert_eq(a, b)\n\n\ndef test_empty_chunks_in_array_len():\n x = da.ones((), chunks=())\n with pytest.raises(TypeError) as exc_info:\n len(x)\n\n err_msg = \"len() of unsized object\"\n assert err_msg in str(exc_info.value)\n\n\n@pytest.mark.parametrize(\"dtype\", [None, [(\"a\", \"f4\"), (\"b\", object)]])\ndef test_meta(dtype):\n a = da.zeros((1,), chunks=(1,))\n assert a._meta.dtype == a.dtype\n assert isinstance(a._meta, np.ndarray)\n assert a.nbytes < 1000", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_normalize_chunks_auto_1d_test_normalize_chunks_auto_1d.assert_result_expecte": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_normalize_chunks_auto_1d_test_normalize_chunks_auto_1d.assert_result_expecte", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 3779, "end_line": 3792, "span_ids": ["test_normalize_chunks_auto_1d"], "tokens": 194}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"shape,limit,expected\",\n [\n (100, 10, (10,) * 10),\n (20, 10, (10, 10)),\n (20, 5, (5, 5, 5, 5)),\n (24, 5, (4, 4, 4, 4, 4, 4)), # common factor is close, use it\n (23, 5, (5, 5, 5, 5, 3)), # relatively prime, don't use 1s\n (1000, 167, (125,) * 8), # find close value\n ],\n)\ndef test_normalize_chunks_auto_1d(shape, limit, expected):\n result = normalize_chunks(\"auto\", (shape,), limit=limit, dtype=np.uint8)\n assert result == (expected,)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_normalize_chunks_auto_2d_test_normalize_chunks_auto_2d.assert_result_expected": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_normalize_chunks_auto_2d_test_normalize_chunks_auto_2d.assert_result_expected", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 3795, "end_line": 3810, "span_ids": ["test_normalize_chunks_auto_2d"], "tokens": 187}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"shape,chunks,limit,expected\",\n [\n ((20, 20), (\"auto\", 2), 20, ((10, 10), (2,) * 10)),\n (\n (20, 20),\n (\"auto\", (2, 2, 2, 2, 2, 5, 5)),\n 20,\n ((4, 4, 4, 4, 4), (2, 2, 2, 2, 2, 5, 5)),\n ),\n ((1, 20), \"auto\", 10, ((1,), (10, 10))),\n ],\n)\ndef test_normalize_chunks_auto_2d(shape, chunks, limit, expected):\n result = normalize_chunks(chunks, shape, limit=limit, dtype=\"uint8\")\n assert result == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_normalize_chunks_auto_3d_test_normalize_chunks_auto_3d.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_normalize_chunks_auto_3d_test_normalize_chunks_auto_3d.None_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 3813, "end_line": 3822, "span_ids": ["test_normalize_chunks_auto_3d"], "tokens": 118}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_normalize_chunks_auto_3d():\n result = normalize_chunks(\n (\"auto\", \"auto\", 2), (20, 20, 20), limit=200, dtype=\"uint8\"\n )\n expected = ((10, 10), (10, 10), (2,) * 10)\n assert result == expected\n\n result = normalize_chunks(\"auto\", (20, 20, 20), limit=8, dtype=\"uint8\")\n expected = ((2,) * 10,) * 3\n assert result == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_constructors_chunks_dict_test_zarr_return_stored.with_tmpdir_as_d_.assert_a2_chunks_a_chu": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_constructors_chunks_dict_test_zarr_return_stored.with_tmpdir_as_d_.assert_a2_chunks_a_chu", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 3825, "end_line": 3895, "span_ids": ["test_from_array_chunks_dict", "test_from_zarr_name", "test_normalize_chunks_object_dtype", "test_normalize_chunks_nan", "test_zarr_return_stored", "test_zarr_roundtrip", "test_from_zarr_unique_name", "test_constructors_chunks_dict", "test_normalize_chunks_tuples_of_tuples"], "tokens": 716}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_constructors_chunks_dict():\n x = da.ones((20, 20), chunks={0: 10, 1: 5})\n assert x.chunks == ((10, 10), (5, 5, 5, 5))\n\n x = da.ones((20, 20), chunks={0: 10, 1: \"auto\"})\n assert x.chunks == ((10, 10), (20,))\n\n\ndef test_from_array_chunks_dict():\n with dask.config.set({\"array.chunk-size\": \"128kiB\"}):\n x = np.empty((100, 100, 100))\n y = da.from_array(x, chunks={0: 10, 1: -1, 2: \"auto\"})\n z = da.from_array(x, chunks=(10, 100, 10))\n assert y.chunks == z.chunks\n\n\n@pytest.mark.parametrize(\"dtype\", [object, [(\"a\", object), (\"b\", int)]])\ndef test_normalize_chunks_object_dtype(dtype):\n x = np.array([\"a\", \"abc\"], dtype=object)\n with pytest.raises(NotImplementedError):\n da.from_array(x, chunks=\"auto\")\n\n\ndef test_normalize_chunks_tuples_of_tuples():\n result = normalize_chunks(((2, 3, 5), \"auto\"), (10, 10), limit=10, dtype=np.uint8)\n expected = ((2, 3, 5), (2, 2, 2, 2, 2))\n assert result == expected\n\n\ndef test_normalize_chunks_nan():\n with pytest.raises(ValueError) as info:\n normalize_chunks(\"auto\", (np.nan,), limit=10, dtype=np.uint8)\n assert \"auto\" in str(info.value)\n with pytest.raises(ValueError) as info:\n normalize_chunks(((np.nan, np.nan), \"auto\"), (10, 10), limit=10, dtype=np.uint8)\n assert \"auto\" in str(info.value)\n\n\ndef test_from_zarr_unique_name():\n zarr = pytest.importorskip(\"zarr\")\n a = zarr.array([1, 2, 3])\n b = zarr.array([4, 5, 6])\n\n assert da.from_zarr(a).name != da.from_zarr(b).name\n\n\ndef test_from_zarr_name():\n zarr = pytest.importorskip(\"zarr\")\n a = zarr.array([1, 2, 3])\n assert da.from_zarr(a, name=\"foo\").name == \"foo\"\n\n\ndef test_zarr_roundtrip():\n pytest.importorskip(\"zarr\")\n with tmpdir() as d:\n a = da.zeros((3, 3), chunks=(1, 1))\n a.to_zarr(d)\n a2 = da.from_zarr(d)\n assert_eq(a, a2)\n assert a2.chunks == a.chunks\n\n\n@pytest.mark.parametrize(\"compute\", [False, True])\ndef test_zarr_return_stored(compute):\n pytest.importorskip(\"zarr\")\n with tmpdir() as d:\n a = da.zeros((3, 3), chunks=(1, 1))\n a2 = a.to_zarr(d, compute=compute, return_stored=True)\n assert isinstance(a2, Array)\n assert_eq(a, a2, check_graph=False)\n assert a2.chunks == a.chunks", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_to_zarr_delayed_creates_no_metadata_test_zarr_pass_mapper.with_tmpdir_as_d_.assert_a2_chunks_a_chu": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_to_zarr_delayed_creates_no_metadata_test_zarr_pass_mapper.with_tmpdir_as_d_.assert_a2_chunks_a_chu", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 3898, "end_line": 3948, "span_ids": ["test_zarr_existing_array", "test_read_zarr_chunks", "test_to_zarr_delayed_creates_no_metadata", "test_to_zarr_unknown_chunks_raises", "test_zarr_pass_mapper"], "tokens": 411}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_zarr_delayed_creates_no_metadata():\n pytest.importorskip(\"zarr\")\n with tmpdir() as d:\n a = da.from_array([42])\n result = a.to_zarr(d, compute=False)\n assert not os.listdir(d) # No .zarray file\n # Verify array still created upon compute.\n result.compute()\n a2 = da.from_zarr(d)\n assert_eq(a, a2)\n\n\ndef test_zarr_existing_array():\n zarr = pytest.importorskip(\"zarr\")\n c = (1, 1)\n a = da.ones((3, 3), chunks=c)\n z = zarr.zeros_like(a, chunks=c)\n a.to_zarr(z)\n a2 = da.from_zarr(z)\n assert_eq(a, a2)\n assert a2.chunks == a.chunks\n\n\ndef test_to_zarr_unknown_chunks_raises():\n pytest.importorskip(\"zarr\")\n a = da.random.random((10,), chunks=(3,))\n a = a[a > 0.5]\n with pytest.raises(ValueError, match=\"unknown chunk sizes\"):\n a.to_zarr({})\n\n\ndef test_read_zarr_chunks():\n pytest.importorskip(\"zarr\")\n a = da.zeros((9,), chunks=(3,))\n with tmpdir() as d:\n a.to_zarr(d)\n arr = da.from_zarr(d, chunks=(5,))\n assert arr.chunks == ((5, 4),)\n\n\ndef test_zarr_pass_mapper():\n pytest.importorskip(\"zarr\")\n import zarr.storage\n\n with tmpdir() as d:\n mapper = zarr.storage.DirectoryStore(d)\n a = da.zeros((3, 3), chunks=(1, 1))\n a.to_zarr(mapper)\n a2 = da.from_zarr(mapper)\n assert_eq(a, a2)\n assert a2.chunks == a.chunks", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_zarr_group_test_zarr_group.with_tmpdir_as_d_.assert_a2_chunks_a_chu": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_zarr_group_test_zarr_group.with_tmpdir_as_d_.assert_a2_chunks_a_chu", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 3951, "end_line": 3969, "span_ids": ["test_zarr_group"], "tokens": 204}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_zarr_group():\n zarr = pytest.importorskip(\"zarr\")\n with tmpdir() as d:\n a = da.zeros((3, 3), chunks=(1, 1))\n a.to_zarr(d, component=\"test\")\n with pytest.raises((OSError, ValueError)):\n a.to_zarr(d, component=\"test\", overwrite=False)\n a.to_zarr(d, component=\"test\", overwrite=True)\n\n # second time is fine, group exists\n a.to_zarr(d, component=\"test2\", overwrite=False)\n a.to_zarr(d, component=\"nested/test\", overwrite=False)\n group = zarr.open_group(d, mode=\"r\")\n assert list(group) == [\"nested\", \"test\", \"test2\"]\n assert \"test\" in group[\"nested\"]\n\n a2 = da.from_zarr(d, component=\"test\")\n assert_eq(a, a2)\n assert a2.chunks == a.chunks", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_regular_chunks_test_zarr_nocompute.with_tmpdir_as_d_.assert_a2_chunks_a_chu": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_regular_chunks_test_zarr_nocompute.with_tmpdir_as_d_.assert_a2_chunks_a_chu", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 3972, "end_line": 3999, "span_ids": ["test_regular_chunks", "test_zarr_nocompute"], "tokens": 240}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"data\",\n [\n [(), True],\n [((1,),), True],\n [((1, 1, 1),), True],\n [((1,), (1,)), True],\n [((2, 2, 1),), True],\n [((2, 2, 3),), False],\n [((1, 1, 1), (2, 2, 3)), False],\n [((1, 2, 1),), False],\n ],\n)\ndef test_regular_chunks(data):\n chunkset, expected = data\n assert da.core._check_regular_chunks(chunkset) == expected\n\n\ndef test_zarr_nocompute():\n pytest.importorskip(\"zarr\")\n with tmpdir() as d:\n a = da.zeros((3, 3), chunks=(1, 1))\n out = a.to_zarr(d, compute=False)\n assert isinstance(out, Delayed)\n dask.compute(out)\n a2 = da.from_zarr(d)\n assert_eq(a, a2)\n assert a2.chunks == a.chunks", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_tiledb_roundtrip_test_tiledb_roundtrip.None_2.assert_a_chunks_tdb_ch": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_tiledb_roundtrip_test_tiledb_roundtrip.None_2.assert_a_chunks_tdb_ch", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 4002, "end_line": 4032, "span_ids": ["test_tiledb_roundtrip"], "tokens": 261}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_tiledb_roundtrip():\n tiledb = pytest.importorskip(\"tiledb\")\n # 1) load with default chunking\n # 2) load from existing tiledb.DenseArray\n # 3) write to existing tiledb.DenseArray\n a = da.random.random((3, 3))\n with tmpdir() as uri:\n da.to_tiledb(a, uri)\n tdb = da.from_tiledb(uri)\n\n assert_eq(a, tdb)\n assert a.chunks == tdb.chunks\n\n # from tiledb.array\n with tiledb.open(uri) as t:\n tdb2 = da.from_tiledb(t)\n assert_eq(a, tdb2)\n\n with tmpdir() as uri2:\n with tiledb.empty_like(uri2, a) as t:\n a.to_tiledb(t)\n assert_eq(da.from_tiledb(uri2), a)\n\n # specific chunking\n with tmpdir() as uri:\n a = da.random.random((3, 3), chunks=(1, 1))\n a.to_tiledb(uri)\n tdb = da.from_tiledb(uri)\n\n assert_eq(a, tdb)\n assert a.chunks == tdb.chunks", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_tiledb_multiattr_test_tiledb_multiattr.with_tmpdir_as_uri_.assert_eq_np_mean_ar2_d": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_tiledb_multiattr_test_tiledb_multiattr.with_tmpdir_as_uri_.assert_eq_np_mean_ar2_d", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 4035, "end_line": 4060, "span_ids": ["test_tiledb_multiattr"], "tokens": 263}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_tiledb_multiattr():\n tiledb = pytest.importorskip(\"tiledb\")\n dom = tiledb.Domain(\n tiledb.Dim(\"x\", (0, 1000), tile=100), tiledb.Dim(\"y\", (0, 1000), tile=100)\n )\n schema = tiledb.ArraySchema(\n attrs=(tiledb.Attr(\"attr1\"), tiledb.Attr(\"attr2\")), domain=dom\n )\n\n with tmpdir() as uri:\n tiledb.DenseArray.create(uri, schema)\n tdb = tiledb.DenseArray(uri, \"w\")\n\n ar1 = np.random.randn(*tdb.schema.shape)\n ar2 = np.random.randn(*tdb.schema.shape)\n\n tdb[:] = {\"attr1\": ar1, \"attr2\": ar2}\n tdb = tiledb.DenseArray(uri, \"r\")\n\n # basic round-trip from dask.array\n d = da.from_tiledb(uri, attribute=\"attr2\")\n assert_eq(d, ar2)\n\n # smoke-test computation directly on the TileDB view\n d = da.from_tiledb(uri, attribute=\"attr2\")\n assert_eq(np.mean(ar2), d.mean().compute(scheduler=\"threads\"))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_blocks_indexer_test_blocks_indexer.with_pytest_raises_IndexE.x_blocks_100_100_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_blocks_indexer_test_blocks_indexer.with_pytest_raises_IndexE.x_blocks_100_100_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 4063, "end_line": 4094, "span_ids": ["test_blocks_indexer"], "tokens": 381}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_blocks_indexer():\n x = da.arange(10, chunks=2)\n\n assert isinstance(x.blocks[0], da.Array)\n\n assert_eq(x.blocks[0], x[:2])\n assert_eq(x.blocks[-1], x[-2:])\n assert_eq(x.blocks[:3], x[:6])\n assert_eq(x.blocks[[0, 1, 2]], x[:6])\n assert_eq(x.blocks[[3, 0, 2]], np.array([6, 7, 0, 1, 4, 5]))\n\n x = da.random.random((20, 20), chunks=(4, 5))\n assert_eq(x.blocks[0], x[:4])\n assert_eq(x.blocks[0, :3], x[:4, :15])\n assert_eq(x.blocks[:, :3], x[:, :15])\n\n x = da.ones((40, 40, 40), chunks=(10, 10, 10))\n assert_eq(x.blocks[0, :, 0], np.ones((10, 40, 10)))\n\n x = da.ones((2, 2), chunks=1)\n with pytest.raises(ValueError):\n x.blocks[[0, 1], [0, 1]]\n with pytest.raises(ValueError):\n x.blocks[np.array([0, 1]), [0, 1]]\n with pytest.raises(ValueError) as info:\n x.blocks[np.array([0, 1]), np.array([0, 1])]\n assert \"list\" in str(info.value)\n with pytest.raises(ValueError) as info:\n x.blocks[None, :, :]\n assert \"newaxis\" in str(info.value) and \"not supported\" in str(info.value)\n with pytest.raises(IndexError) as info:\n x.blocks[100, 100]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_partitions_indexer_test_partitions_indexer.with_pytest_raises_IndexE.x_partitions_100_100_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_partitions_indexer_test_partitions_indexer.with_pytest_raises_IndexE.x_partitions_100_100_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 4097, "end_line": 4129, "span_ids": ["test_partitions_indexer"], "tokens": 412}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_partitions_indexer():\n # .partitions is an alias of .blocks for dask arrays\n x = da.arange(10, chunks=2)\n\n assert isinstance(x.partitions[0], da.Array)\n\n assert_eq(x.partitions[0], x[:2])\n assert_eq(x.partitions[-1], x[-2:])\n assert_eq(x.partitions[:3], x[:6])\n assert_eq(x.partitions[[0, 1, 2]], x[:6])\n assert_eq(x.partitions[[3, 0, 2]], np.array([6, 7, 0, 1, 4, 5]))\n\n x = da.random.random((20, 20), chunks=(4, 5))\n assert_eq(x.partitions[0], x[:4])\n assert_eq(x.partitions[0, :3], x[:4, :15])\n assert_eq(x.partitions[:, :3], x[:, :15])\n\n x = da.ones((40, 40, 40), chunks=(10, 10, 10))\n assert_eq(x.partitions[0, :, 0], np.ones((10, 40, 10)))\n\n x = da.ones((2, 2), chunks=1)\n with pytest.raises(ValueError):\n x.partitions[[0, 1], [0, 1]]\n with pytest.raises(ValueError):\n x.partitions[np.array([0, 1]), [0, 1]]\n with pytest.raises(ValueError) as info:\n x.partitions[np.array([0, 1]), np.array([0, 1])]\n assert \"list\" in str(info.value)\n with pytest.raises(ValueError) as info:\n x.partitions[None, :, :]\n assert \"newaxis\" in str(info.value) and \"not supported\" in str(info.value)\n with pytest.raises(IndexError) as info:\n x.partitions[100, 100]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_dask_array_holds_scipy_sparse_containers_test_dask_array_holds_scipy_sparse_containers.assert_zz_xx_T_all_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_dask_array_holds_scipy_sparse_containers_test_dask_array_holds_scipy_sparse_containers.assert_zz_xx_T_all_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 4132, "end_line": 4153, "span_ids": ["test_dask_array_holds_scipy_sparse_containers"], "tokens": 221}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.filterwarnings(\"ignore:the matrix subclass:PendingDeprecationWarning\")\ndef test_dask_array_holds_scipy_sparse_containers():\n pytest.importorskip(\"scipy.sparse\")\n import scipy.sparse\n\n x = da.random.random((1000, 10), chunks=(100, 10))\n x[x < 0.9] = 0\n xx = x.compute()\n y = x.map_blocks(scipy.sparse.csr_matrix)\n\n vs = y.to_delayed().flatten().tolist()\n values = dask.compute(*vs, scheduler=\"single-threaded\")\n assert all(isinstance(v, scipy.sparse.csr_matrix) for v in values)\n\n yy = y.compute(scheduler=\"single-threaded\")\n assert isinstance(yy, scipy.sparse.spmatrix)\n assert (yy == xx).all()\n\n z = x.T.map_blocks(scipy.sparse.csr_matrix)\n zz = z.compute(scheduler=\"single-threaded\")\n assert isinstance(zz, scipy.sparse.spmatrix)\n assert (zz == xx.T).all()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_scipy_sparse_concatenate_test_scipy_sparse_concatenate.assert_z_z_expected_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_scipy_sparse_concatenate_test_scipy_sparse_concatenate.assert_z_z_expected_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 4156, "end_line": 4180, "span_ids": ["test_scipy_sparse_concatenate"], "tokens": 201}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"axis\", [0, 1])\ndef test_scipy_sparse_concatenate(axis):\n pytest.importorskip(\"scipy.sparse\")\n import scipy.sparse\n\n rs = da.random.RandomState(RandomState=np.random.RandomState)\n\n xs = []\n ys = []\n for i in range(2):\n x = rs.random((1000, 10), chunks=(100, 10))\n x[x < 0.9] = 0\n xs.append(x)\n ys.append(x.map_blocks(scipy.sparse.csr_matrix))\n\n z = da.concatenate(ys, axis=axis)\n z = z.compute()\n\n if axis == 0:\n sp_concatenate = scipy.sparse.vstack\n elif axis == 1:\n sp_concatenate = scipy.sparse.hstack\n z_expected = sp_concatenate([scipy.sparse.csr_matrix(e.compute()) for e in xs])\n\n assert (z != z_expected).nnz == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_3851_test_map_blocks_large_inputs_delayed.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_3851_test_map_blocks_large_inputs_delayed.None_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 4183, "end_line": 4207, "span_ids": ["test_map_blocks_large_inputs_delayed", "test_3925", "test_3851"], "tokens": 238}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_3851():\n with warnings.catch_warnings() as record:\n Y = da.random.random((10, 10), chunks=\"auto\")\n da.argmax(Y, axis=0).compute()\n\n assert not record\n\n\ndef test_3925():\n x = da.from_array(np.array([\"a\", \"b\", \"c\"], dtype=object), chunks=-1)\n assert (x[0] == x[0]).compute(scheduler=\"sync\")\n\n\ndef test_map_blocks_large_inputs_delayed():\n a = da.ones(10, chunks=(5,))\n b = np.ones(1000000)\n\n c = a.map_blocks(add, b)\n assert any(b is v for v in c.dask.values())\n assert repr(dict(c.dask)).count(repr(b)[:10]) == 1 # only one occurrence\n\n d = a.map_blocks(lambda x, y: x + y.sum(), y=b)\n assert_eq(d, d)\n assert any(b is v for v in d.dask.values())\n assert repr(dict(c.dask)).count(repr(b)[:10]) == 1 # only one occurrence", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_blockwise_large_inputs_delayed_test_blockwise_large_inputs_delayed.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_blockwise_large_inputs_delayed_test_blockwise_large_inputs_delayed.None_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 4210, "end_line": 4220, "span_ids": ["test_blockwise_large_inputs_delayed"], "tokens": 160}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_blockwise_large_inputs_delayed():\n a = da.ones(10, chunks=(5,))\n b = np.ones(1000000)\n\n c = da.blockwise(add, \"i\", a, \"i\", b, None, dtype=a.dtype)\n assert any(b is v for v in c.dask.values())\n assert repr(dict(c.dask)).count(repr(b)[:10]) == 1 # only one occurrence\n\n d = da.blockwise(lambda x, y: x + y, \"i\", a, \"i\", y=b, dtype=a.dtype)\n assert any(b is v for v in d.dask.values())\n assert repr(dict(c.dask)).count(repr(b)[:10]) == 1 # only one occurrence", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_slice_reversed_test_map_blocks_chunks.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_slice_reversed_test_map_blocks_chunks.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 4223, "end_line": 4240, "span_ids": ["test_slice_reversed", "test_map_blocks_chunks"], "tokens": 138}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_slice_reversed():\n x = da.ones(10, chunks=-1)\n y = x[6:3]\n\n assert_eq(y, np.ones(0))\n\n\ndef test_map_blocks_chunks():\n x = da.arange(400, chunks=(100,))\n y = da.arange(40, chunks=(10,))\n\n def func(a, b):\n return np.array([a.max(), b.max()])\n\n assert_eq(\n da.map_blocks(func, x, y, chunks=(2,), dtype=x.dtype),\n np.array([99, 9, 199, 19, 299, 29, 399, 39]),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_nbytes_auto_test_nbytes_auto.None_3.normalize_chunks_10B_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_nbytes_auto_test_nbytes_auto.None_3.normalize_chunks_10B_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 4243, "end_line": 4262, "span_ids": ["test_nbytes_auto"], "tokens": 355}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_nbytes_auto():\n chunks = normalize_chunks(\"800B\", shape=(500,), dtype=\"float64\")\n assert chunks == ((100, 100, 100, 100, 100),)\n chunks = normalize_chunks(\"200B\", shape=(10, 10), dtype=\"float64\")\n assert chunks == ((5, 5), (5, 5))\n chunks = normalize_chunks((5, \"200B\"), shape=(10, 10), dtype=\"float64\")\n assert chunks == ((5, 5), (5, 5))\n chunks = normalize_chunks(\"33B\", shape=(10, 10), dtype=\"float64\")\n assert chunks == ((2, 2, 2, 2, 2), (2, 2, 2, 2, 2))\n chunks = normalize_chunks(\"1800B\", shape=(10, 20, 30), dtype=\"float64\")\n assert chunks == ((5, 5), (5, 5, 5, 5), (6, 6, 6, 6, 6))\n\n with pytest.raises(ValueError):\n normalize_chunks(\"10B\", shape=(10,), limit=20, dtype=\"float64\")\n with pytest.raises(ValueError):\n normalize_chunks(\"100B\", shape=(10, 10), limit=20, dtype=\"float64\")\n with pytest.raises(ValueError):\n normalize_chunks((\"100B\", \"10B\"), shape=(10, 10), dtype=\"float64\")\n with pytest.raises(ValueError):\n normalize_chunks((\"10B\", \"10B\"), shape=(10, 10), limit=20, dtype=\"float64\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_auto_chunks_h5py_test_auto_chunks_h5py.with_tmpfile_hdf5_as_.None_1.with_dask_config_set_ar.assert_x_chunks_256_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_auto_chunks_h5py_test_auto_chunks_h5py.with_tmpfile_hdf5_as_.None_1.with_dask_config_set_ar.assert_x_chunks_256_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 4265, "end_line": 4280, "span_ids": ["test_auto_chunks_h5py"], "tokens": 171}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_auto_chunks_h5py():\n h5py = pytest.importorskip(\"h5py\")\n\n with tmpfile(\".hdf5\") as fn:\n with h5py.File(fn, mode=\"a\") as f:\n d = f.create_dataset(\n \"/x\", shape=(1000, 1000), chunks=(32, 64), dtype=\"float64\"\n )\n d[:] = 1\n\n with h5py.File(fn, mode=\"a\") as f:\n d = f[\"x\"]\n with dask.config.set({\"array.chunk-size\": \"1 MiB\"}):\n x = da.from_array(d)\n assert isinstance(x._meta, np.ndarray)\n assert x.chunks == ((256, 256, 256, 232), (512, 488))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_no_warnings_from_blockwise_test_no_warnings_from_blockwise.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_no_warnings_from_blockwise_test_no_warnings_from_blockwise.None_2", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 4283, "end_line": 4297, "span_ids": ["test_no_warnings_from_blockwise"], "tokens": 159}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_no_warnings_from_blockwise():\n with pytest.warns(None) as record:\n x = da.ones((3, 10, 10), chunks=(3, 2, 2))\n da.map_blocks(lambda y: np.mean(y, axis=0), x, dtype=x.dtype, drop_axis=0)\n assert not record\n\n with pytest.warns(None) as record:\n x = da.ones((15, 15), chunks=(5, 5))\n (x.dot(x.T + 1) - x.mean(axis=0)).std()\n assert not record\n\n with pytest.warns(None) as record:\n x = da.ones((1,), chunks=(1,))\n 1 / x[0]\n assert not record", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_from_array_meta_test_compute_chunk_sizes.assert_isinstance_z_chunk": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_from_array_meta_test_compute_chunk_sizes.assert_isinstance_z_chunk", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 4300, "end_line": 4321, "span_ids": ["test_from_array_meta", "test_compute_chunk_sizes"], "tokens": 211}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.xfail(_numpy_120, reason=\"https://github.com/pydata/sparse/issues/383\")\ndef test_from_array_meta():\n sparse = pytest.importorskip(\"sparse\")\n x = np.ones(10)\n meta = sparse.COO.from_numpy(x)\n y = da.from_array(x, meta=meta)\n assert isinstance(y._meta, sparse.COO)\n\n\ndef test_compute_chunk_sizes():\n x = da.from_array(np.linspace(-1, 1, num=50), chunks=10)\n y = x[x < 0]\n assert np.isnan(y.shape[0])\n assert y.chunks == ((np.nan,) * 5,)\n\n z = y.compute_chunk_sizes()\n assert y is z\n assert z.chunks == ((10, 10, 5, 0, 0),)\n assert len(z) == 25\n\n # check that dtype of chunk dimensions is `int`\n assert isinstance(z.chunks[0][0], int)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_compute_chunk_sizes_2d_array_test_compute_chunk_sizes_2d_array.assert_Z_shape_4_4_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_compute_chunk_sizes_2d_array_test_compute_chunk_sizes_2d_array.assert_Z_shape_4_4_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 4324, "end_line": 4337, "span_ids": ["test_compute_chunk_sizes_2d_array"], "tokens": 164}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_compute_chunk_sizes_2d_array():\n X = np.linspace(-1, 1, num=9 * 4).reshape(9, 4)\n X = da.from_array(X, chunks=(3, 4))\n idx = X.sum(axis=1) > 0\n Y = X[idx]\n\n # This is very similar to the DataFrame->Array conversion\n assert np.isnan(Y.shape[0]) and Y.shape[1] == 4\n assert Y.chunks == ((np.nan, np.nan, np.nan), (4,))\n\n Z = Y.compute_chunk_sizes()\n assert Y is Z\n assert Z.chunks == ((0, 1, 3), (4,))\n assert Z.shape == (4, 4)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_compute_chunk_sizes_3d_array_test_compute_chunk_sizes_3d_array.assert_Z_chunks_4_4": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_compute_chunk_sizes_3d_array_test_compute_chunk_sizes_3d_array.assert_Z_chunks_4_4", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 4340, "end_line": 4359, "span_ids": ["test_compute_chunk_sizes_3d_array"], "tokens": 266}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_compute_chunk_sizes_3d_array(N=8):\n X = np.linspace(-1, 2, num=8 * 8 * 8).reshape(8, 8, 8)\n X = da.from_array(X, chunks=(4, 4, 4))\n idx = X.sum(axis=0).sum(axis=0) > 0\n Y = X[idx]\n idx = X.sum(axis=1).sum(axis=1) < 0\n Y = Y[:, idx]\n idx = X.sum(axis=2).sum(axis=1) > 0.1\n Y = Y[:, :, idx]\n\n # Checking to make sure shapes are different on outputs\n assert Y.compute().shape == (8, 3, 5)\n assert X.compute().shape == (8, 8, 8)\n\n assert Y.chunks == ((np.nan, np.nan),) * 3\n assert all(np.isnan(s) for s in Y.shape)\n Z = Y.compute_chunk_sizes()\n assert Z is Y\n assert Z.shape == (8, 3, 5)\n assert Z.chunks == ((4, 4), (3, 0), (1, 4))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py__known_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py__known_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 4360, "end_line": 4469, "span_ids": ["test_compute_chunk_sizes_warning_fixes_to_zarr", "_known", "test_map_blocks_dataframe", "test_rechunk_auto", "test_compute_chunk_sizes_warning_fixes_concatenate", "unknown", "test_compute_chunk_sizes_warning_fixes_reduction", "test_compute_chunk_sizes_warning_fixes_rechunk", "test_compute_chunk_sizes_warning_fixes_slicing", "test_map_blocks_series", "test_compute_chunk_sizes_warning_fixes_reshape", "test_compute_chunk_sizes_warning_fixes_to_svg"], "tokens": 771}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _known(num=50):\n return da.from_array(np.linspace(-1, 1, num=num), chunks=10)\n\n\n@pytest.fixture()\ndef unknown():\n x = _known()\n y = x[x < 0]\n assert y.chunks == ((np.nan,) * 5,)\n return y\n\n\ndef test_compute_chunk_sizes_warning_fixes_rechunk(unknown):\n y = unknown\n with pytest.raises(ValueError, match=\"compute_chunk_sizes\"):\n y.rechunk(\"auto\")\n y.compute_chunk_sizes()\n y.rechunk(\"auto\")\n\n\ndef test_compute_chunk_sizes_warning_fixes_to_zarr(unknown):\n pytest.importorskip(\"zarr\")\n y = unknown\n with pytest.raises(ValueError, match=\"compute_chunk_sizes\"):\n with StringIO() as f:\n y.to_zarr(f)\n y.compute_chunk_sizes()\n\n with pytest.raises(ValueError, match=\"irregular chunking\"):\n with StringIO() as f:\n y.to_zarr(f)\n\n\ndef test_compute_chunk_sizes_warning_fixes_to_svg(unknown):\n y = unknown\n with pytest.raises(NotImplementedError, match=\"compute_chunk_sizes\"):\n y.to_svg()\n y.compute_chunk_sizes()\n y.to_svg()\n\n\ndef test_compute_chunk_sizes_warning_fixes_concatenate():\n x = _known(num=100).reshape(10, 10)\n idx = x.sum(axis=0) > 0\n y1 = x[idx]\n y2 = x[idx]\n with pytest.raises(ValueError, match=\"compute_chunk_sizes\"):\n da.concatenate((y1, y2), axis=1)\n y1.compute_chunk_sizes()\n y2.compute_chunk_sizes()\n da.concatenate((y1, y2), axis=1)\n\n\ndef test_compute_chunk_sizes_warning_fixes_reduction(unknown):\n y = unknown\n with pytest.raises(ValueError, match=\"compute_chunk_sizes\"):\n da.argmin(y)\n y.compute_chunk_sizes()\n da.argmin(y)\n\n\ndef test_compute_chunk_sizes_warning_fixes_reshape(unknown):\n y = unknown\n with pytest.raises(ValueError, match=\"compute_chunk_sizes\"):\n da.reshape(y, (5, 5))\n y.compute_chunk_sizes()\n da.reshape(y, (5, 5))\n\n\ndef test_compute_chunk_sizes_warning_fixes_slicing():\n x = _known(num=100).reshape(10, 10)\n y = x[x.sum(axis=0) < 0]\n with pytest.raises(ValueError, match=\"compute_chunk_sizes\"):\n y[:3, :]\n y.compute_chunk_sizes()\n y[:3, :]\n\n\ndef test_rechunk_auto():\n x = da.ones(10, chunks=(1,))\n y = x.rechunk()\n\n assert y.npartitions == 1\n\n\ndef test_map_blocks_series():\n pd = pytest.importorskip(\"pandas\")\n import dask.dataframe as dd\n from dask.dataframe.utils import assert_eq as dd_assert_eq\n\n x = da.ones(10, chunks=(5,))\n s = x.map_blocks(pd.Series)\n assert isinstance(s, dd.Series)\n assert s.npartitions == x.npartitions\n\n dd_assert_eq(s, s)\n\n\n@pytest.mark.xfail(reason=\"need to remove singleton index dimension\")\ndef test_map_blocks_dataframe():\n pd = pytest.importorskip(\"pandas\")\n import dask.dataframe as dd\n from dask.dataframe.utils import assert_eq as dd_assert_eq\n\n x = da.ones((10, 2), chunks=(5, 2))\n s = x.map_blocks(pd.DataFrame)\n assert isinstance(s, dd.DataFrame)\n assert s.npartitions == x.npartitions\n dd_assert_eq(s, s)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_function.py_pytest_test_array_function_dask.assert_eq_res_y_res_x_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_function.py_pytest_test_array_function_dask.assert_eq_res_y_res_x_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_function.py", "file_name": "test_array_function.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 45, "span_ids": ["test_array_function_dask", "imports"], "tokens": 344}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\nimport numpy as np\n\nimport dask.array as da\nfrom dask.array.utils import assert_eq, IS_NEP18_ACTIVE\nfrom dask.array.numpy_compat import _numpy_120\n\nfrom .test_dispatch import EncapsulateNDArray, WrappedArray\n\n\nmissing_arrfunc_cond = not IS_NEP18_ACTIVE\nmissing_arrfunc_reason = \"NEP-18 support is not available in NumPy\"\n\n\n@pytest.mark.skipif(missing_arrfunc_cond, reason=missing_arrfunc_reason)\n@pytest.mark.parametrize(\n \"func\",\n [\n lambda x: np.concatenate([x, x, x]),\n lambda x: np.cov(x, x),\n lambda x: np.dot(x, x),\n lambda x: np.dstack(x),\n lambda x: np.flip(x, axis=0),\n lambda x: np.hstack(x),\n lambda x: np.matmul(x, x),\n lambda x: np.mean(x),\n lambda x: np.stack([x, x]),\n lambda x: np.block([x, x]),\n lambda x: np.sum(x),\n lambda x: np.var(x),\n lambda x: np.vstack(x),\n lambda x: np.linalg.norm(x),\n lambda x: np.min(x),\n lambda x: np.amin(x),\n lambda x: np.round(x),\n ],\n)\ndef test_array_function_dask(func):\n x = np.random.random((100, 100))\n y = da.from_array(x, chunks=(50, 50))\n res_x = func(x)\n res_y = func(y)\n\n assert isinstance(res_y, da.Array)\n assert_eq(res_y, res_x)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_function.py_test_array_function_fft_test_array_function_fft.assert_eq_res_y_res_x_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_function.py_test_array_function_fft_test_array_function_fft.assert_eq_res_y_res_x_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_function.py", "file_name": "test_array_function.py", "file_type": "text/x-python", "category": "test", "start_line": 48, "end_line": 58, "span_ids": ["test_array_function_fft"], "tokens": 114}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(missing_arrfunc_cond, reason=missing_arrfunc_reason)\n@pytest.mark.parametrize(\"func\", [np.fft.fft, np.fft.fft2])\ndef test_array_function_fft(func):\n x = np.random.random((100, 100))\n y = da.from_array(x, chunks=(100, 100))\n res_x = func(x)\n res_y = func(y)\n\n if func.__module__ != \"mkl_fft._numpy_fft\":\n assert isinstance(res_y, da.Array)\n assert_eq(res_y, res_x)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_function.py_test_array_notimpl_function_dask_test_array_notimpl_function_dask.with_pytest_warns_.func_y_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_function.py_test_array_notimpl_function_dask_test_array_notimpl_function_dask.with_pytest_warns_.func_y_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_function.py", "file_name": "test_array_function.py", "file_type": "text/x-python", "category": "test", "start_line": 61, "end_line": 77, "span_ids": ["test_array_notimpl_function_dask"], "tokens": 127}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(missing_arrfunc_cond, reason=missing_arrfunc_reason)\n@pytest.mark.parametrize(\n \"func\",\n [\n lambda x: np.min_scalar_type(x),\n lambda x: np.linalg.det(x),\n lambda x: np.linalg.eigvals(x),\n ],\n)\ndef test_array_notimpl_function_dask(func):\n x = np.random.random((100, 100))\n y = da.from_array(x, chunks=(50, 50))\n\n with pytest.warns(\n FutureWarning, match=\"The `.*` function is not implemented by Dask\"\n ):\n func(y)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_function.py_test_array_function_sparse_test_array_function_sparse.assert_eq_func_x_func_y": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_function.py_test_array_function_sparse_test_array_function_sparse.assert_eq_func_x_func_y", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_function.py", "file_name": "test_array_function.py", "file_type": "text/x-python", "category": "test", "start_line": 80, "end_line": 91, "span_ids": ["test_array_function_sparse"], "tokens": 117}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(missing_arrfunc_cond, reason=missing_arrfunc_reason)\n@pytest.mark.parametrize(\n \"func\", [lambda x: np.real(x), lambda x: np.imag(x), lambda x: np.transpose(x)]\n)\ndef test_array_function_sparse(func):\n sparse = pytest.importorskip(\"sparse\")\n x = da.random.random((500, 500), chunks=(100, 100))\n x[x < 0.9] = 0\n\n y = x.map_blocks(sparse.COO)\n\n assert_eq(func(x), func(y))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_function.py_test_array_function_sparse_tensordot_test_array_function_sparse_tensordot.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_function.py_test_array_function_sparse_tensordot_test_array_function_sparse_tensordot.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_function.py", "file_name": "test_array_function.py", "file_type": "text/x-python", "category": "test", "start_line": 94, "end_line": 108, "span_ids": ["test_array_function_sparse_tensordot"], "tokens": 166}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(missing_arrfunc_cond, reason=missing_arrfunc_reason)\n@pytest.mark.xfail(_numpy_120, reason=\"sparse-383\")\ndef test_array_function_sparse_tensordot():\n sparse = pytest.importorskip(\"sparse\")\n x = np.random.random((2, 3, 4))\n x[x < 0.9] = 0\n y = np.random.random((4, 3, 2))\n y[y < 0.9] = 0\n\n xx = sparse.COO(x)\n yy = sparse.COO(y)\n\n assert_eq(\n np.tensordot(x, y, axes=(2, 0)), np.tensordot(xx, yy, axes=(2, 0)).todense()\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_function.py_test_array_function_cupy_svd_test_array_function_cupy_svd.assert_eq_v_v_base_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_function.py_test_array_function_cupy_svd_test_array_function_cupy_svd.assert_eq_v_v_base_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_function.py", "file_name": "test_array_function.py", "file_type": "text/x-python", "category": "test", "start_line": 111, "end_line": 124, "span_ids": ["test_array_function_cupy_svd"], "tokens": 140}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(missing_arrfunc_cond, reason=missing_arrfunc_reason)\n@pytest.mark.parametrize(\"chunks\", [(100, 100), (500, 100)])\ndef test_array_function_cupy_svd(chunks):\n cupy = pytest.importorskip(\"cupy\")\n x = cupy.random.random((500, 100))\n\n y = da.from_array(x, chunks=chunks, asarray=False)\n\n u_base, s_base, v_base = da.linalg.svd(y)\n u, s, v = np.linalg.svd(y)\n\n assert_eq(u, u_base)\n assert_eq(s, s_base)\n assert_eq(v, v_base)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_function.py_test_unregistered_func_test_unregistered_func.assert_eq_xx_yy_check_m": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_function.py_test_unregistered_func_test_unregistered_func.assert_eq_xx_yy_check_m", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_function.py", "file_name": "test_array_function.py", "file_type": "text/x-python", "category": "test", "start_line": 126, "end_line": 164, "span_ids": ["test_unregistered_func"], "tokens": 306}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(missing_arrfunc_cond, reason=missing_arrfunc_reason)\n@pytest.mark.parametrize(\n \"func\",\n [\n lambda x: np.concatenate([x, x, x]),\n lambda x: np.cov(x, x),\n lambda x: np.dot(x, x),\n lambda x: np.dstack(x),\n lambda x: np.flip(x, axis=0),\n lambda x: np.hstack(x),\n lambda x: np.matmul(x, x),\n lambda x: np.mean(x),\n lambda x: np.stack([x, x]),\n lambda x: np.sum(x),\n lambda x: np.var(x),\n lambda x: np.vstack(x),\n lambda x: np.linalg.norm(x),\n ],\n)\ndef test_unregistered_func(func):\n # Wrap a procol-based encapsulated ndarray\n x = EncapsulateNDArray(np.random.random((100, 100)))\n\n # See if Dask holds the array fine\n y = da.from_array(x, chunks=(50, 50))\n\n # Check if it's an equivalent array\n assert_eq(x, y, check_meta=False)\n\n # Perform two NumPy functions, one on the\n # Encapsulated array\n xx = func(x)\n\n # And one on the Dask array holding these\n # encapsulated arrays\n yy = func(y)\n\n # Check that they are equivalent arrays.\n assert_eq(xx, yy, check_meta=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_function.py_test_non_existent_func_test_non_existent_func.if_IS_NEP18_ACTIVE_.else_.assert_list_np_sort_x_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_function.py_test_non_existent_func_test_non_existent_func.if_IS_NEP18_ACTIVE_.else_.assert_list_np_sort_x_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_function.py", "file_name": "test_array_function.py", "file_type": "text/x-python", "category": "test", "start_line": 167, "end_line": 177, "span_ids": ["test_non_existent_func"], "tokens": 145}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_non_existent_func():\n # Regression test for __array_function__ becoming default in numpy 1.17\n # dask has no sort function, so ensure that this still calls np.sort\n x = da.from_array(np.array([1, 2, 4, 3]), chunks=(2,))\n if IS_NEP18_ACTIVE:\n with pytest.warns(\n FutureWarning, match=\"The `numpy.sort` function is not implemented by Dask\"\n ):\n assert list(np.sort(x)) == [1, 2, 3, 4]\n else:\n assert list(np.sort(x)) == [1, 2, 3, 4]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_function.py_test_binary_function_type_precedence_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_function.py_test_binary_function_type_precedence_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_function.py", "file_name": "test_array_function.py", "file_type": "text/x-python", "category": "test", "start_line": 180, "end_line": 214, "span_ids": ["test_binary_function_type_precedence"], "tokens": 238}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(missing_arrfunc_cond, reason=missing_arrfunc_reason)\n@pytest.mark.parametrize(\n \"func\",\n [\n np.equal,\n np.matmul,\n np.dot,\n lambda x, y: np.stack([x, y]),\n ],\n)\n@pytest.mark.parametrize(\n \"arr_upcast, arr_downcast\",\n [\n (\n WrappedArray(np.random.random((10, 10))),\n da.random.random((10, 10), chunks=(5, 5)),\n ),\n (\n da.random.random((10, 10), chunks=(5, 5)),\n EncapsulateNDArray(np.random.random((10, 10))),\n ),\n (\n WrappedArray(np.random.random((10, 10))),\n EncapsulateNDArray(np.random.random((10, 10))),\n ),\n ],\n)\ndef test_binary_function_type_precedence(func, arr_upcast, arr_downcast):\n \"\"\" Test proper dispatch on binary NumPy functions\"\"\"\n assert (\n type(func(arr_upcast, arr_downcast))\n == type(func(arr_downcast, arr_upcast))\n == type(arr_upcast)\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_utils.py_np_test_meta_from_array.assert_meta_from_array_np": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_utils.py_np_test_meta_from_array.assert_meta_from_array_np", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_utils.py", "file_name": "test_array_utils.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 49, "span_ids": ["imports", "test_meta_from_array"], "tokens": 359}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import numpy as np\nimport pytest\n\nimport dask.array as da\nfrom dask.array.numpy_compat import _numpy_120\nfrom dask.array.utils import meta_from_array, assert_eq\n\nasarrays = [np.asarray]\n\ntry:\n import sparse\n\n asarrays.append(sparse.COO.from_numpy)\nexcept ImportError:\n pass\n\ntry:\n import cupy\n\n asarrays.append(cupy.asarray)\nexcept ImportError:\n pass\n\n\n@pytest.mark.parametrize(\"asarray\", asarrays)\ndef test_meta_from_array(asarray):\n if \"COO.from_numpy\" in str(asarray) and _numpy_120:\n raise pytest.xfail(reason=\"sparse-383\")\n\n x = np.array(1)\n assert meta_from_array(x, ndim=1).shape == (0,)\n\n x = np.ones((1, 2, 3), dtype=\"float32\")\n x = asarray(x)\n\n assert meta_from_array(x).shape == (0, 0, 0)\n assert meta_from_array(x).dtype == \"float32\"\n assert type(meta_from_array(x)) is type(x)\n\n assert meta_from_array(x, ndim=2).shape == (0, 0)\n assert meta_from_array(x, ndim=4).shape == (0, 0, 0, 0)\n assert meta_from_array(x, dtype=\"float64\").dtype == \"float64\"\n\n x = da.ones((1,))\n assert isinstance(meta_from_array(x), np.ndarray)\n\n assert meta_from_array(123) == 123\n assert meta_from_array(\"foo\") == \"foo\"\n assert meta_from_array(np.dtype(\"float32\")) == np.dtype(\"float32\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_collections_test_optimize_blockwise.assert_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_collections_test_optimize_blockwise.assert_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_atop.py", "file_name": "test_atop.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 202, "span_ids": ["imports", "test_optimize_blockwise", "test_index_subs"], "tokens": 286}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import collections\nimport warnings\nfrom operator import add\n\nimport pytest\nimport numpy as np\n\nimport dask\nimport dask.array as da\nfrom dask.highlevelgraph import HighLevelGraph\nfrom dask.blockwise import Blockwise, rewrite_blockwise, optimize_blockwise, index_subs\nfrom dask.array.utils import assert_eq\nfrom dask.array.numpy_compat import _numpy_116\nfrom dask.utils_test import inc, dec\n\na, b, c, d, e, f, g = \"abcdefg\"\n_0, _1, _2, _3, _4, _5, _6, _7, _8, _9 = [\"_%d\" % i for i in range(10)]\ni, j, k = \"ijk\"\n\n\ndef test_index_subs():\n assert index_subs(tuple(\"ij\"), {\"i\": \"j\", \"j\": \"i\"}) == tuple(\"ji\")\n\n\ndef test_optimize_blockwise():\n x = da.ones(10, chunks=(5,))\n y = (((x + 1) + 2) + 3) + 4\n\n dsk = da.optimization.optimize_blockwise(y.dask)\n\n assert isinstance(dsk, HighLevelGraph)\n\n assert (\n len([layer for layer in dsk.dicts.values() if isinstance(layer, Blockwise)])\n == 1\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_blockwise_diamond_fusion_test_blockwise_diamond_fusion.assert_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_blockwise_diamond_fusion_test_blockwise_diamond_fusion.assert_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_atop.py", "file_name": "test_atop.py", "file_type": "text/x-python", "category": "test", "start_line": 205, "end_line": 219, "span_ids": ["test_blockwise_diamond_fusion"], "tokens": 132}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_blockwise_diamond_fusion():\n x = da.ones(10, chunks=(5,))\n y = ((x + 1) + 2) + 3\n a = y * 2\n b = y * 3\n c = a + b\n d = ((c + 1) + 2) + 3\n\n dsk = da.optimization.optimize_blockwise(d.dask)\n assert isinstance(dsk, HighLevelGraph)\n\n assert (\n len([layer for layer in dsk.dicts.values() if isinstance(layer, Blockwise)])\n == 1\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_blockwise_non_blockwise_output_test_blockwise_non_blockwise_output.None_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_blockwise_non_blockwise_output_test_blockwise_non_blockwise_output.None_5", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_atop.py", "file_name": "test_atop.py", "file_type": "text/x-python", "category": "test", "start_line": 222, "end_line": 248, "span_ids": ["test_blockwise_non_blockwise_output"], "tokens": 269}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_blockwise_non_blockwise_output():\n x = da.ones(10, chunks=(5,))\n y = ((x + 1) + 2) + 3\n w = y.sum()\n z = ((y * 2) * 3) * 4\n\n z_top_before = tuple(z.dask.dicts[z.name].indices)\n (zz,) = dask.optimize(z)\n z_top_after = tuple(z.dask.dicts[z.name].indices)\n assert z_top_before == z_top_after, \"z_top mutated\"\n\n dsk = optimize_blockwise(z.dask, keys=list(dask.core.flatten(z.__dask_keys__())))\n assert isinstance(dsk, HighLevelGraph)\n assert (\n len([layer for layer in dsk.dicts.values() if isinstance(layer, Blockwise)])\n == 1\n )\n\n dsk = optimize_blockwise(\n HighLevelGraph.merge(w.dask, z.dask),\n keys=list(dask.core.flatten([w.__dask_keys__(), z.__dask_keys__()])),\n )\n assert isinstance(dsk, HighLevelGraph)\n assert (\n len([layer for layer in z.dask.dicts.values() if isinstance(layer, Blockwise)])\n >= 1\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_top_len_test_blockwise_names.assert_y_name_startswith_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_top_len_test_blockwise_names.assert_y_name_startswith_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_atop.py", "file_name": "test_atop.py", "file_type": "text/x-python", "category": "test", "start_line": 251, "end_line": 294, "span_ids": ["test_common_token_names_args", "test_inner_compute", "test_top_len", "test_common_token_names_kwargs", "test_blockwise_names"], "tokens": 371}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_top_len():\n x = da.ones(10, chunks=(5,))\n y = x[:, None] * x[None, :]\n\n d = y.dask.dicts[y.name]\n assert len(d) == 4\n\n\ndef test_inner_compute():\n x = da.ones(10, chunks=(5,)) + 1 + 2 + 3\n a = x.sum()\n y = x * 2 * 3 * 4\n b = y.sum()\n z = x * 2 * 3\n\n dask.compute(x, a, y, b, z)\n\n\n@pytest.mark.parametrize(\"name\", [\"_\", \"_0\", \"_1\", \".\", \".0\"])\ndef test_common_token_names_args(name):\n x = np.array([\"a\", \"bb\", \"ccc\"], dtype=object)\n d = da.from_array(x, chunks=2)\n\n result = da.blockwise(add, \"i\", d, \"i\", name, None, dtype=object)\n expected = x + name\n\n assert_eq(result, expected)\n\n\n@pytest.mark.parametrize(\"name\", [\"_0\", \"_1\", \".\", \".0\", \"_\"])\ndef test_common_token_names_kwargs(name):\n x = np.array([\"a\", \"bb\", \"ccc\"], dtype=object)\n d = da.from_array(x, chunks=2)\n\n result = da.blockwise(lambda x, y: x + y, \"i\", d, \"i\", y=name, dtype=object)\n expected = x + name\n\n assert_eq(result, expected)\n\n\ndef test_blockwise_names():\n x = da.ones(5, chunks=(2,))\n y = da.blockwise(add, \"i\", x, \"i\", dtype=x.dtype)\n assert y.name.startswith(\"add\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_blockwise_new_axes_test_blockwise_new_axes.assert_eq_y_np_ones_4_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_blockwise_new_axes_test_blockwise_new_axes.assert_eq_y_np_ones_4_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_atop.py", "file_name": "test_atop.py", "file_type": "text/x-python", "category": "test", "start_line": 297, "end_line": 327, "span_ids": ["test_blockwise_new_axes"], "tokens": 318}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_blockwise_new_axes():\n def f(x):\n return x[:, None] * np.ones((1, 7))\n\n x = da.ones(5, chunks=2)\n y = da.blockwise(\n f, \"aq\", x, \"a\", new_axes={\"q\": 7}, concatenate=True, dtype=x.dtype\n )\n assert y.chunks == ((2, 2, 1), (7,))\n assert_eq(y, np.ones((5, 7)))\n\n def f(x):\n return x[None, :] * np.ones((7, 1))\n\n x = da.ones(5, chunks=2)\n y = da.blockwise(\n f, \"qa\", x, \"a\", new_axes={\"q\": 7}, concatenate=True, dtype=x.dtype\n )\n assert y.chunks == ((7,), (2, 2, 1))\n assert_eq(y, np.ones((7, 5)))\n\n def f(x):\n y = x.sum(axis=1)\n return y[:, None] * np.ones((1, 5))\n\n x = da.ones((4, 6), chunks=(2, 2))\n y = da.blockwise(\n f, \"aq\", x, \"ab\", new_axes={\"q\": 5}, concatenate=True, dtype=x.dtype\n )\n assert y.chunks == ((2, 2), (5,))\n assert_eq(y, np.ones((4, 5)) * 6)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_blockwise_new_axes_2_test_blockwise_stacked_new_axes.assert_eq_z_np_ones_5_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_blockwise_new_axes_2_test_blockwise_stacked_new_axes.assert_eq_z_np_ones_5_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_atop.py", "file_name": "test_atop.py", "file_type": "text/x-python", "category": "test", "start_line": 330, "end_line": 362, "span_ids": ["test_blockwise_stacked_new_axes", "test_blockwise_new_axes_2"], "tokens": 270}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_blockwise_new_axes_2():\n x = da.ones((2, 2), chunks=(1, 1))\n\n def func(x):\n return np.stack([x, -x], axis=-1)\n\n y = da.blockwise(\n func,\n (\"x\", \"y\", \"sign\"),\n x,\n (\"x\", \"y\"),\n dtype=x.dtype,\n concatenate=True,\n new_axes={\"sign\": 2},\n )\n\n assert_eq(y, y)\n\n\n@pytest.mark.parametrize(\"concatenate\", [True, False])\ndef test_blockwise_stacked_new_axes(concatenate):\n def f(x):\n return x[..., None] * np.ones((1, 7))\n\n x = da.ones(5, chunks=2)\n y = da.blockwise(\n f, \"aq\", x, \"a\", new_axes={\"q\": 7}, concatenate=concatenate, dtype=x.dtype\n )\n z = da.blockwise(\n f, \"abq\", y, \"ab\", new_axes={\"q\": 7}, concatenate=concatenate, dtype=x.dtype\n )\n assert z.chunks == ((2, 2, 1), (7,), (7,))\n assert_eq(z, np.ones((5, 7, 7)))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_blockwise_stacked_new_axes_front_test_blockwise_stacked_new_axes_front.assert_eq_w_np_ones_7_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_blockwise_stacked_new_axes_front_test_blockwise_stacked_new_axes_front.assert_eq_w_np_ones_7_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_atop.py", "file_name": "test_atop.py", "file_type": "text/x-python", "category": "test", "start_line": 365, "end_line": 386, "span_ids": ["test_blockwise_stacked_new_axes_front"], "tokens": 251}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"concatenate\", [True, False])\ndef test_blockwise_stacked_new_axes_front(concatenate):\n def f(x):\n if isinstance(x, list):\n x = np.concatenate(x)\n return x[None, ...] * np.ones(7)[(slice(None),) + (None,) * x.ndim]\n\n x = da.ones(5, chunks=2)\n y = da.blockwise(\n f, \"qa\", x, \"a\", new_axes={\"q\": 7}, concatenate=concatenate, dtype=x.dtype\n )\n z = da.blockwise(\n f, \"qab\", y, \"ab\", new_axes={\"q\": 7}, concatenate=concatenate, dtype=x.dtype\n )\n assert z.chunks == ((7,), (7,), (2, 2, 1))\n assert_eq(z, np.ones((7, 7, 5)))\n\n w = da.blockwise(\n lambda x: x[:, 0, 0], \"a\", z, \"abc\", dtype=x.dtype, concatenate=True\n )\n assert w.chunks == ((7,),)\n assert_eq(w, np.ones((7,)))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_blockwise_stacked_new_axes_same_dim_test_blockwise_stacked_new_axes_same_dim.assert_eq_c_np_ones_5_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_blockwise_stacked_new_axes_same_dim_test_blockwise_stacked_new_axes_same_dim.assert_eq_c_np_ones_5_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_atop.py", "file_name": "test_atop.py", "file_type": "text/x-python", "category": "test", "start_line": 389, "end_line": 404, "span_ids": ["test_blockwise_stacked_new_axes_same_dim"], "tokens": 181}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"concatenate\", [True, False])\ndef test_blockwise_stacked_new_axes_same_dim(concatenate):\n def f(x):\n return x[..., None] * np.ones((1, 7))\n\n x = da.ones(5, chunks=2)\n y = da.zeros(5, chunks=2)\n a = da.blockwise(\n f, \"aq\", x, \"a\", new_axes={\"q\": 7}, concatenate=concatenate, dtype=x.dtype\n )\n b = da.blockwise(\n f, \"aq\", y, \"a\", new_axes={\"q\": 7}, concatenate=concatenate, dtype=x.dtype\n )\n c = a + b\n assert c.chunks == ((2, 2, 1), (7,))\n assert_eq(c, np.ones((5, 7)))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_blockwise_new_axes_chunked_test_blockwise_new_axes_chunked.assert_eq_y_np_array_0": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_blockwise_new_axes_chunked_test_blockwise_new_axes_chunked.assert_eq_y_np_array_0", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_atop.py", "file_name": "test_atop.py", "file_type": "text/x-python", "category": "test", "start_line": 407, "end_line": 414, "span_ids": ["test_blockwise_new_axes_chunked"], "tokens": 147}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_blockwise_new_axes_chunked():\n def f(x):\n return x[None, :] * 2\n\n x = da.arange(0, 6, 1, chunks=2, dtype=np.int32)\n y = da.blockwise(f, \"qa\", x, \"a\", new_axes={\"q\": (1, 1)}, dtype=x.dtype)\n assert y.chunks == ((1, 1), (2, 2, 2))\n assert_eq(y, np.array([[0, 2, 4, 6, 8, 10], [0, 2, 4, 6, 8, 10]], np.int32))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_blockwise_no_args_test_blockwise_kwargs.assert_eq_y_np_ones_5_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_blockwise_no_args_test_blockwise_kwargs.assert_eq_y_np_ones_5_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_atop.py", "file_name": "test_atop.py", "file_type": "text/x-python", "category": "test", "start_line": 417, "end_line": 443, "span_ids": ["test_blockwise_no_args", "test_blockwise_no_array_args", "test_blockwise_kwargs"], "tokens": 261}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_blockwise_no_args():\n def f():\n return np.ones((2, 3), np.float32)\n\n x = da.blockwise(f, \"ab\", new_axes={\"a\": 2, \"b\": (3, 3)}, dtype=np.float32)\n assert x.chunks == ((2,), (3, 3))\n assert_eq(x, np.ones((2, 6), np.float32))\n\n\ndef test_blockwise_no_array_args():\n def f(dtype):\n return np.ones((2, 3), dtype)\n\n x = da.blockwise(\n f, \"ab\", np.float32, None, new_axes={\"a\": 2, \"b\": (3, 3)}, dtype=np.float32\n )\n assert x.chunks == ((2,), (3, 3))\n assert_eq(x, np.ones((2, 6), np.float32))\n\n\ndef test_blockwise_kwargs():\n def f(a, b=0):\n return a + b\n\n x = da.ones(5, chunks=(2,))\n y = da.blockwise(f, \"i\", x, \"i\", b=10, dtype=x.dtype)\n assert_eq(y, np.ones(5) + 10)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_blockwise_chunks_test_blockwise_chunks.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_blockwise_chunks_test_blockwise_chunks.None_3", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_atop.py", "file_name": "test_atop.py", "file_type": "text/x-python", "category": "test", "start_line": 446, "end_line": 487, "span_ids": ["test_blockwise_chunks"], "tokens": 372}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_blockwise_chunks():\n x = da.ones((5, 5), chunks=((2, 1, 2), (3, 2)))\n\n def double(a, axis=0):\n return np.concatenate([a, a], axis=axis)\n\n y = da.blockwise(\n double,\n \"ij\",\n x,\n \"ij\",\n adjust_chunks={\"i\": lambda n: 2 * n},\n axis=0,\n dtype=x.dtype,\n )\n assert y.chunks == ((4, 2, 4), (3, 2))\n assert_eq(y, np.ones((10, 5)))\n\n y = da.blockwise(\n double,\n \"ij\",\n x,\n \"ij\",\n adjust_chunks={\"j\": lambda n: 2 * n},\n axis=1,\n dtype=x.dtype,\n )\n assert y.chunks == ((2, 1, 2), (6, 4))\n assert_eq(y, np.ones((5, 10)))\n\n x = da.ones((10, 10), chunks=(5, 5))\n y = da.blockwise(\n double, \"ij\", x, \"ij\", axis=0, adjust_chunks={\"i\": 10}, dtype=x.dtype\n )\n assert y.chunks == ((10, 10), (5, 5))\n assert_eq(y, np.ones((20, 10)))\n\n y = da.blockwise(\n double, \"ij\", x, \"ij\", axis=0, adjust_chunks={\"i\": (10, 10)}, dtype=x.dtype\n )\n assert y.chunks == ((10, 10), (5, 5))\n assert_eq(y, np.ones((20, 10)))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_blockwise_numpy_arg_test_blockwise_numpy_arg.with_warnings_catch_warni.assert_eq_x_np_arange_10": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_blockwise_numpy_arg_test_blockwise_numpy_arg.with_warnings_catch_warni.assert_eq_x_np_arange_10", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_atop.py", "file_name": "test_atop.py", "file_type": "text/x-python", "category": "test", "start_line": 490, "end_line": 507, "span_ids": ["test_blockwise_numpy_arg"], "tokens": 221}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_blockwise_numpy_arg():\n with warnings.catch_warnings():\n if not _numpy_116:\n # Not sure why, but this DeprecationWarning is no longer\n # showing up for NumPy >=1.16. So we only filter here\n # for 1.15 and earlier\n warnings.simplefilter(\"ignore\", DeprecationWarning)\n\n x = da.arange(10, chunks=(5,))\n y = np.arange(1000)\n\n x = x.map_blocks(lambda x, y: x, 1.0)\n x = x.map_blocks(lambda x, y: x, \"abc\")\n x = x.map_blocks(lambda x, y: x, y)\n x = x.map_blocks(lambda x, y: x, \"abc\")\n x = x.map_blocks(lambda x, y: x, 1.0)\n x = x.map_blocks(lambda x, y, z: x, \"abc\", np.array([\"a\", \"b\"], dtype=object))\n assert_eq(x, np.arange(10))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_bag_array_conversion_test_svd.assert_eq_z_z_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_bag_array_conversion_test_svd.assert_eq_z_z_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_atop.py", "file_name": "test_atop.py", "file_type": "text/x-python", "category": "test", "start_line": 510, "end_line": 525, "span_ids": ["test_bag_array_conversion", "test_svd"], "tokens": 149}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_bag_array_conversion():\n import dask.bag as db\n\n b = db.range(10, npartitions=1)\n (x,) = b.map_partitions(np.asarray).to_delayed()\n (x,) = [da.from_delayed(a, shape=(10,), dtype=int) for a in [x]]\n z = da.concatenate([x])\n assert_eq(z, np.arange(10), check_graph=False)\n\n\ndef test_svd():\n x = da.ones((1, 1), chunks=(1, 1))\n y = x * 2\n u, s, v = da.linalg.svd(y)\n z = y + u\n assert_eq(z, z)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_args_delayed_test_args_delayed.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_args_delayed_test_args_delayed.None_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_atop.py", "file_name": "test_atop.py", "file_type": "text/x-python", "category": "test", "start_line": 528, "end_line": 536, "span_ids": ["test_args_delayed"], "tokens": 113}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_args_delayed():\n x = da.arange(10, chunks=(5,))\n y = dask.delayed(lambda: 100)()\n\n z = da.blockwise(add, \"i\", x, \"i\", y, None, dtype=x.dtype)\n assert_eq(z, np.arange(10) + 100)\n\n z = da.blockwise(lambda x, y: x + y, \"i\", x, \"i\", y=y, dtype=x.dtype)\n assert_eq(z, np.arange(10) + 100)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_namedtuple_test_namedtuple.assert_eq_A_B_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_namedtuple_test_namedtuple.assert_eq_A_B_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_atop.py", "file_name": "test_atop.py", "file_type": "text/x-python", "category": "test", "start_line": 539, "end_line": 550, "span_ids": ["test_namedtuple"], "tokens": 112}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"tup\", [(1, 2), collections.namedtuple(\"foo\", [\"a\", \"b\"])(1, 2)]\n)\ndef test_namedtuple(tup):\n A = da.random.random((20, 20), chunks=(10, 10))\n\n def f(data, x):\n return data\n\n B = da.blockwise(f, (\"d1\", \"d2\"), A, (\"d1\", \"d2\"), x=tup, dtype=A.dtype)\n\n assert_eq(A, B)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_validate_top_inputs_test_validate_top_inputs.assert_i_in_str_info_va": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_validate_top_inputs_test_validate_top_inputs.assert_i_in_str_info_va", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_atop.py", "file_name": "test_atop.py", "file_type": "text/x-python", "category": "test", "start_line": 553, "end_line": 567, "span_ids": ["test_validate_top_inputs"], "tokens": 138}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_validate_top_inputs():\n A = da.random.random((20, 20), chunks=(10, 10))\n\n with pytest.raises(ValueError) as info:\n da.blockwise(inc, \"jk\", A, \"ij\", dtype=A.dtype)\n\n assert \"unknown dimension\" in str(info.value).lower()\n assert \"k\" in str(info.value)\n assert \"j\" not in str(info.value)\n\n with pytest.raises(ValueError) as info:\n da.blockwise(inc, \"ii\", A, \"ij\", dtype=A.dtype)\n\n assert \"repeated\" in str(info.value).lower()\n assert \"i\" in str(info.value)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_dont_merge_before_reductions_test_dont_merge_before_reductions.z_compute_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_dont_merge_before_reductions_test_dont_merge_before_reductions.z_compute_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_atop.py", "file_name": "test_atop.py", "file_type": "text/x-python", "category": "test", "start_line": 570, "end_line": 580, "span_ids": ["test_dont_merge_before_reductions"], "tokens": 117}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_dont_merge_before_reductions():\n x = da.ones(10, chunks=(5,))\n y = da.blockwise(inc, \"i\", x, \"i\", dtype=x.dtype)\n z = da.blockwise(sum, \"\", y, \"i\", dtype=y.dtype)\n w = da.blockwise(sum, \"\", z, \"\", dtype=y.dtype)\n\n dsk = optimize_blockwise(w.dask)\n\n assert len([d for d in dsk.dicts.values() if isinstance(d, Blockwise)]) == 2\n\n z.compute()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_atop_legacy_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_atop_legacy_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_atop.py", "file_name": "test_atop.py", "file_type": "text/x-python", "category": "test", "start_line": 583, "end_line": 599, "span_ids": ["test_non_hlg", "test_atop_legacy"], "tokens": 179}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_atop_legacy():\n x = da.ones(10, chunks=(5,))\n with pytest.warns(None):\n y = da.atop(inc, \"i\", x, \"i\", dtype=x.dtype)\n z = da.blockwise(inc, \"i\", x, \"i\", dtype=x.dtype)\n assert_eq(y, z)\n assert y.name == z.name\n\n\ndef test_non_hlg():\n # Regression test for https://github.com/dask/dask/issues/5850\n a = da.from_array(np.ones(1, np.float64), chunks=(1,))\n a.dask = dict(a.dask) # Convert from HighLevelGraph to plain dict\n b = da.from_array(np.zeros(1, np.float64), chunks=(1,))\n x = a + b\n assert_eq(x, a)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_chunk.py_pytest_test_keepdims_wrapper_no_axis.assert_rwf_276": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_chunk.py_pytest_test_keepdims_wrapper_no_axis.assert_rwf_276", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_chunk.py", "file_name": "test_chunk.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 35, "span_ids": ["imports", "test_keepdims_wrapper_no_axis"], "tokens": 212}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\n\npytest.importorskip(\"numpy\")\n\nimport numpy as np\n\nfrom dask.array.chunk import coarsen, keepdims_wrapper\nimport dask.array as da\n\n\ndef test_keepdims_wrapper_no_axis():\n def summer(a, axis=None):\n return a.sum(axis=axis)\n\n summer_wrapped = keepdims_wrapper(summer)\n\n assert summer_wrapped != summer\n\n a = np.arange(24).reshape(1, 2, 3, 4)\n\n r = summer(a)\n rw = summer_wrapped(a, keepdims=True)\n rwf = summer_wrapped(a, keepdims=False)\n\n assert r.ndim == 0\n assert r.shape == tuple()\n assert r == 276\n\n assert rw.ndim == 4\n assert rw.shape == (1, 1, 1, 1)\n assert (rw == 276).all()\n\n assert rwf.ndim == 0\n assert rwf.shape == tuple()\n assert rwf == 276", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_chunk.py_test_keepdims_wrapper_one_axis_test_keepdims_wrapper_one_axis.assert_rwf_np_array_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_chunk.py_test_keepdims_wrapper_one_axis_test_keepdims_wrapper_one_axis.assert_rwf_np_array_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_chunk.py", "file_name": "test_chunk.py", "file_type": "text/x-python", "category": "test", "start_line": 38, "end_line": 62, "span_ids": ["test_keepdims_wrapper_one_axis"], "tokens": 286}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_keepdims_wrapper_one_axis():\n def summer(a, axis=None):\n return a.sum(axis=axis)\n\n summer_wrapped = keepdims_wrapper(summer)\n\n assert summer_wrapped != summer\n\n a = np.arange(24).reshape(1, 2, 3, 4)\n\n r = summer(a, axis=2)\n rw = summer_wrapped(a, axis=2, keepdims=True)\n rwf = summer_wrapped(a, axis=2, keepdims=False)\n\n assert r.ndim == 3\n assert r.shape == (1, 2, 4)\n assert (r == np.array([[[12, 15, 18, 21], [48, 51, 54, 57]]])).all()\n\n assert rw.ndim == 4\n assert rw.shape == (1, 2, 1, 4)\n assert (rw == np.array([[[[12, 15, 18, 21]], [[48, 51, 54, 57]]]])).all()\n\n assert rwf.ndim == 3\n assert rwf.shape == (1, 2, 4)\n assert (rwf == np.array([[[12, 15, 18, 21], [48, 51, 54, 57]]])).all()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_chunk.py_test_keepdims_wrapper_two_axes_test_keepdims_wrapper_two_axes.assert_rwf_np_array_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_chunk.py_test_keepdims_wrapper_two_axes_test_keepdims_wrapper_two_axes.assert_rwf_np_array_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_chunk.py", "file_name": "test_chunk.py", "file_type": "text/x-python", "category": "test", "start_line": 65, "end_line": 89, "span_ids": ["test_keepdims_wrapper_two_axes"], "tokens": 242}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_keepdims_wrapper_two_axes():\n def summer(a, axis=None):\n return a.sum(axis=axis)\n\n summer_wrapped = keepdims_wrapper(summer)\n\n assert summer_wrapped != summer\n\n a = np.arange(24).reshape(1, 2, 3, 4)\n\n r = summer(a, axis=(1, 3))\n rw = summer_wrapped(a, axis=(1, 3), keepdims=True)\n rwf = summer_wrapped(a, axis=(1, 3), keepdims=False)\n\n assert r.ndim == 2\n assert r.shape == (1, 3)\n assert (r == np.array([[60, 92, 124]])).all()\n\n assert rw.ndim == 4\n assert rw.shape == (1, 1, 3, 1)\n assert (rw == np.array([[[[60], [92], [124]]]])).all()\n\n assert rwf.ndim == 2\n assert rwf.shape == (1, 3)\n assert (rwf == np.array([[60, 92, 124]])).all()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_chunk.py_test_coarsen_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_chunk.py_test_coarsen_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_chunk.py", "file_name": "test_chunk.py", "file_type": "text/x-python", "category": "test", "start_line": 92, "end_line": 111, "span_ids": ["test_integer_input", "test_coarsen"], "tokens": 209}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_coarsen():\n x = np.random.randint(10, size=(24, 24))\n y = coarsen(np.sum, x, {0: 2, 1: 4})\n assert y.shape == (12, 6)\n assert y[0, 0] == np.sum(x[:2, :4])\n\n\n\"\"\"\ndef test_coarsen_on_uneven_shape():\n x = np.random.randint(10, size=(23, 24))\n y = coarsen(np.sum, x, {0: 2, 1: 4})\n assert y.shape == (12, 6)\n assert y[0, 0] == np.sum(x[:2, :4])\n assert eq(y[11, :], x[23, :])\n\"\"\"\n\n\ndef test_integer_input():\n assert da.zeros((4, 6), chunks=2).rechunk(3).chunks == ((3, 1), (3, 3))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_pytest_test_arr_like.if_order_F_.else_.assert_not_np_isfortran_d": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_pytest_test_arr_like.if_order_F_.else_.assert_not_np_isfortran_d", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_creation.py", "file_name": "test_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 73, "span_ids": ["imports", "test_arr_like"], "tokens": 559}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\n\npytest.importorskip(\"numpy\")\n\nimport numpy as np\nimport pytest\nfrom tlz import concat\n\nimport dask\nimport dask.array as da\nfrom dask.array.core import normalize_chunks\nfrom dask.array.utils import assert_eq, same_keys, AxisError\nfrom dask.array.numpy_compat import _numpy_117, _numpy_118\n\n\n@pytest.mark.parametrize(\n \"funcname\",\n [\n \"empty_like\",\n \"empty\",\n \"ones_like\",\n \"ones\",\n \"zeros_like\",\n \"zeros\",\n \"full_like\",\n \"full\",\n ],\n)\n@pytest.mark.parametrize(\"cast_shape\", [tuple, list, np.asarray])\n@pytest.mark.parametrize(\"cast_chunks\", [tuple, list, np.asarray])\n@pytest.mark.parametrize(\"shape, chunks\", [((10, 10), (4, 4))])\n@pytest.mark.parametrize(\"name\", [None, \"my-name\"])\n@pytest.mark.parametrize(\"order\", [\"C\", \"F\"])\n@pytest.mark.parametrize(\"dtype\", [\"i4\"])\ndef test_arr_like(funcname, shape, cast_shape, dtype, cast_chunks, chunks, name, order):\n np_func = getattr(np, funcname)\n da_func = getattr(da, funcname)\n shape = cast_shape(shape)\n chunks = cast_chunks(chunks)\n\n if \"full\" in funcname:\n old_np_func = np_func\n old_da_func = da_func\n\n np_func = lambda *a, **k: old_np_func(*a, fill_value=5, **k)\n da_func = lambda *a, **k: old_da_func(*a, fill_value=5, **k)\n\n dtype = np.dtype(dtype)\n\n if \"like\" in funcname:\n a = np.random.randint(0, 10, shape).astype(dtype)\n\n np_r = np_func(a, order=order)\n da_r = da_func(a, order=order, chunks=chunks, name=name)\n else:\n np_r = np_func(shape, order=order, dtype=dtype)\n da_r = da_func(shape, order=order, dtype=dtype, chunks=chunks, name=name)\n\n assert np_r.shape == da_r.shape\n assert np_r.dtype == da_r.dtype\n\n if \"empty\" not in funcname:\n assert (np_r == np.asarray(da_r)).all()\n\n if name is None:\n assert funcname.split(\"_\")[0] in da_r.name\n else:\n assert da_r.name == name\n\n if \"order\" == \"F\":\n assert np.isfortran(da_r.compute())\n else:\n assert not np.isfortran(da_r.compute())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_arr_like_shape_test_arr_like_shape.if_empty_not_in_funcnam.assert_eq_np_r_da_r_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_arr_like_shape_test_arr_like_shape.if_empty_not_in_funcnam.assert_eq_np_r_da_r_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_creation.py", "file_name": "test_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 76, "end_line": 113, "span_ids": ["test_arr_like_shape"], "tokens": 423}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n not _numpy_117, reason=\"requires NumPy>=1.17 for shape argument support\"\n)\n@pytest.mark.parametrize(\n \"funcname, kwargs\",\n [\n (\"empty_like\", {}),\n (\"ones_like\", {}),\n (\"zeros_like\", {}),\n (\"full_like\", {\"fill_value\": 5}),\n ],\n)\n@pytest.mark.parametrize(\n \"shape, chunks, out_shape\",\n [\n ((10, 10), (4, 4), None),\n ((10, 10), (4, 4), (20, 3)),\n ((10, 10), (4), (20)),\n ((10, 10, 10), (4, 2), (5, 5)),\n ((2, 3, 5, 7), None, (3, 5, 7)),\n ((2, 3, 5, 7), (2, 5, 3), (3, 5, 7)),\n ((2, 3, 5, 7), (2, 5, 3, \"auto\", 3), (11,) + (2, 3, 5, 7)),\n ((2, 3, 5, 7), \"auto\", (3, 5, 7)),\n ],\n)\n@pytest.mark.parametrize(\"dtype\", [\"i4\"])\ndef test_arr_like_shape(funcname, kwargs, shape, dtype, chunks, out_shape):\n np_func = getattr(np, funcname)\n da_func = getattr(da, funcname)\n a = np.random.randint(0, 10, shape).astype(dtype)\n np_r = np_func(a, shape=out_shape, **kwargs)\n da_r = da_func(a, chunks=chunks, shape=out_shape, **kwargs)\n\n assert np_r.shape == da_r.shape\n assert np_r.dtype == da_r.dtype\n\n if \"empty\" not in funcname:\n assert_eq(np_r, da_r)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_linspace_test_linspace.None_4": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_linspace_test_linspace.None_4", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_creation.py", "file_name": "test_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 116, "end_line": 143, "span_ids": ["test_linspace"], "tokens": 427}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"endpoint\", [True, False])\ndef test_linspace(endpoint):\n darr = da.linspace(6, 49, endpoint=endpoint, chunks=5)\n nparr = np.linspace(6, 49, endpoint=endpoint)\n assert_eq(darr, nparr)\n\n darr = da.linspace(1.4, 4.9, endpoint=endpoint, chunks=5, num=13)\n nparr = np.linspace(1.4, 4.9, endpoint=endpoint, num=13)\n assert_eq(darr, nparr)\n\n darr = da.linspace(6, 49, endpoint=endpoint, chunks=5, dtype=float)\n nparr = np.linspace(6, 49, endpoint=endpoint, dtype=float)\n assert_eq(darr, nparr)\n\n darr, dstep = da.linspace(6, 49, endpoint=endpoint, chunks=5, retstep=True)\n nparr, npstep = np.linspace(6, 49, endpoint=endpoint, retstep=True)\n assert np.allclose(dstep, npstep)\n assert_eq(darr, nparr)\n\n darr = da.linspace(1.4, 4.9, endpoint=endpoint, chunks=5, num=13, dtype=int)\n nparr = np.linspace(1.4, 4.9, num=13, endpoint=endpoint, dtype=int)\n assert_eq(darr, nparr)\n assert sorted(\n da.linspace(1.4, 4.9, endpoint=endpoint, chunks=5, num=13).dask\n ) == sorted(da.linspace(1.4, 4.9, endpoint=endpoint, chunks=5, num=13).dask)\n assert sorted(\n da.linspace(6, 49, endpoint=endpoint, chunks=5, dtype=float).dask\n ) == sorted(da.linspace(6, 49, endpoint=endpoint, chunks=5, dtype=float).dask)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_arange_test_arange.assert_da_arange_10_chun": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_arange_test_arange.assert_da_arange_10_chun", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_creation.py", "file_name": "test_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 146, "end_line": 191, "span_ids": ["test_arange"], "tokens": 471}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_arange():\n darr = da.arange(77, chunks=13)\n nparr = np.arange(77)\n assert_eq(darr, nparr)\n\n darr = da.arange(2, 13, chunks=5)\n nparr = np.arange(2, 13)\n assert_eq(darr, nparr)\n\n darr = da.arange(4, 21, 9, chunks=13)\n nparr = np.arange(4, 21, 9)\n assert_eq(darr, nparr)\n\n # negative steps\n darr = da.arange(53, 5, -3, chunks=5)\n nparr = np.arange(53, 5, -3)\n assert_eq(darr, nparr)\n\n darr = da.arange(77, chunks=13, dtype=float)\n nparr = np.arange(77, dtype=float)\n assert_eq(darr, nparr)\n\n darr = da.arange(2, 13, chunks=5, dtype=int)\n nparr = np.arange(2, 13, dtype=int)\n assert_eq(darr, nparr)\n assert sorted(da.arange(2, 13, chunks=5).dask) == sorted(\n da.arange(2, 13, chunks=5).dask\n )\n assert sorted(da.arange(77, chunks=13, dtype=float).dask) == sorted(\n da.arange(77, chunks=13, dtype=float).dask\n )\n\n # 0 size output\n darr = da.arange(0, 1, -0.5, chunks=20)\n nparr = np.arange(0, 1, -0.5)\n assert_eq(darr, nparr)\n\n darr = da.arange(0, -1, 0.5, chunks=20)\n nparr = np.arange(0, -1, 0.5)\n assert_eq(darr, nparr)\n\n # Unexpected or missing kwargs\n with pytest.raises(TypeError, match=\"whatsthis\"):\n da.arange(10, chunks=-1, whatsthis=1)\n\n assert da.arange(10).chunks == ((10,),)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_arange_dtypes_test_arange_dtypes.assert_eq_a_np_a_da_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_arange_dtypes_test_arange_dtypes.assert_eq_a_np_a_da_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_creation.py", "file_name": "test_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 194, "end_line": 218, "span_ids": ["test_arange_dtypes"], "tokens": 403}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"start,stop,step,dtype\",\n [\n (0, 1, 1, None), # int64\n (1.5, 2, 1, None), # float64\n (1, 2.5, 1, None), # float64\n (1, 2, 0.5, None), # float64\n (np.float32(1), np.float32(2), np.float32(1), None), # promoted to float64\n (np.int32(1), np.int32(2), np.int32(1), None), # promoted to int64\n (np.uint32(1), np.uint32(2), np.uint32(1), None), # promoted to int64\n (np.uint64(1), np.uint64(2), np.uint64(1), None), # promoted to float64\n (np.uint32(1), np.uint32(2), np.uint32(1), np.uint32),\n (np.uint64(1), np.uint64(2), np.uint64(1), np.uint64),\n # numpy.arange gives unexpected results\n # https://github.com/numpy/numpy/issues/11505\n # (1j, 2, 1, None),\n # (1, 2j, 1, None),\n # (1, 2, 1j, None),\n # (1+2j, 2+3j, 1+.1j, None),\n ],\n)\ndef test_arange_dtypes(start, stop, step, dtype):\n a_np = np.arange(start, stop, step, dtype=dtype)\n a_da = da.arange(start, stop, step, dtype=dtype, chunks=-1)\n assert_eq(a_np, a_da)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_arange_cast_float_int_step_test_arange_cast_float_int_step.assert_eq_darr_nparr_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_arange_cast_float_int_step_test_arange_cast_float_int_step.assert_eq_darr_nparr_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_creation.py", "file_name": "test_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 221, "end_line": 228, "span_ids": ["test_arange_cast_float_int_step"], "tokens": 106}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.xfail(\n reason=\"Casting floats to ints is not supported since edge\"\n \"behavior is not specified or guaranteed by NumPy.\"\n)\ndef test_arange_cast_float_int_step():\n darr = da.arange(3.3, -9.1, -0.25, chunks=3, dtype=\"i8\")\n nparr = np.arange(3.3, -9.1, -0.25, dtype=\"i8\")\n assert_eq(darr, nparr)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_arange_float_step_test_arange_float_step.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_arange_float_step_test_arange_float_step.None_3", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_creation.py", "file_name": "test_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 231, "end_line": 246, "span_ids": ["test_arange_float_step"], "tokens": 211}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_arange_float_step():\n darr = da.arange(2.0, 13.0, 0.3, chunks=4)\n nparr = np.arange(2.0, 13.0, 0.3)\n assert_eq(darr, nparr)\n\n darr = da.arange(7.7, 1.5, -0.8, chunks=3)\n nparr = np.arange(7.7, 1.5, -0.8)\n assert_eq(darr, nparr)\n\n darr = da.arange(0, 1, 0.01, chunks=20)\n nparr = np.arange(0, 1, 0.01)\n assert_eq(darr, nparr)\n\n darr = da.arange(0, 1, 0.03, chunks=20)\n nparr = np.arange(0, 1, 0.03)\n assert_eq(darr, nparr)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_indices_wrong_chunks_test_indices_dimensions_chunks.with_dask_config_set_ar.assert_expected_actual": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_indices_wrong_chunks_test_indices_dimensions_chunks.with_dask_config_set_ar.assert_expected_actual", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_creation.py", "file_name": "test_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 249, "end_line": 265, "span_ids": ["test_indices_wrong_chunks", "test_indices_dimensions_chunks"], "tokens": 158}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_indices_wrong_chunks():\n with pytest.raises(ValueError):\n da.indices((1,), chunks=tuple())\n\n\ndef test_indices_dimensions_chunks():\n chunks = ((1, 4, 2, 3), (5, 5))\n darr = da.indices((10, 10), chunks=chunks)\n assert darr.chunks == ((1, 1),) + chunks\n\n with dask.config.set({\"array.chunk-size\": \"50 MiB\"}):\n shape = (10000, 10000)\n expected = normalize_chunks(\"auto\", shape=shape, dtype=int)\n result = da.indices(shape, chunks=\"auto\")\n # indices prepends a dimension\n actual = result.chunks[1:]\n assert expected == actual", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_empty_indicies_test_empty_indicies.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_empty_indicies_test_empty_indicies.None_3", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_creation.py", "file_name": "test_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 268, "end_line": 291, "span_ids": ["test_empty_indicies"], "tokens": 236}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_empty_indicies():\n darr = da.indices(tuple(), chunks=tuple())\n nparr = np.indices(tuple())\n assert darr.shape == nparr.shape\n assert darr.dtype == nparr.dtype\n assert_eq(darr, nparr)\n\n darr = da.indices(tuple(), float, chunks=tuple())\n nparr = np.indices(tuple(), float)\n assert darr.shape == nparr.shape\n assert darr.dtype == nparr.dtype\n assert_eq(darr, nparr)\n\n darr = da.indices((0,), float, chunks=(1,))\n nparr = np.indices((0,), float)\n assert darr.shape == nparr.shape\n assert darr.dtype == nparr.dtype\n assert_eq(darr, nparr)\n\n darr = da.indices((0, 1, 2), float, chunks=(1, 1, 2))\n nparr = np.indices((0, 1, 2), float)\n assert darr.shape == nparr.shape\n assert darr.dtype == nparr.dtype\n assert_eq(darr, nparr)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_indicies_test_indicies.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_indicies_test_indicies.None_3", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_creation.py", "file_name": "test_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 294, "end_line": 309, "span_ids": ["test_indicies"], "tokens": 151}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_indicies():\n darr = da.indices((1,), chunks=(1,))\n nparr = np.indices((1,))\n assert_eq(darr, nparr)\n\n darr = da.indices((1,), float, chunks=(1,))\n nparr = np.indices((1,), float)\n assert_eq(darr, nparr)\n\n darr = da.indices((2, 1), chunks=(2, 1))\n nparr = np.indices((2, 1))\n assert_eq(darr, nparr)\n\n darr = da.indices((2, 3), chunks=(1, 2))\n nparr = np.indices((2, 3))\n assert_eq(darr, nparr)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_meshgrid_test_meshgrid.for_e_r_a_e_r_d_i_in_zi.if_sparse_.else_.assert_e_r_d_chunks_xi": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_meshgrid_test_meshgrid.for_e_r_a_e_r_d_i_in_zi.if_sparse_.else_.assert_e_r_d_chunks_xi", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_creation.py", "file_name": "test_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 312, "end_line": 352, "span_ids": ["test_meshgrid"], "tokens": 441}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"shapes, chunks\",\n [\n ([()], [()]),\n ([(0,)], [(0,)]),\n ([(2,), (3,)], [(1,), (2,)]),\n ([(2,), (3,), (4,)], [(1,), (2,), (3,)]),\n ([(2,), (3,), (4,), (5,)], [(1,), (2,), (3,), (4,)]),\n ([(2, 3), (4,)], [(1, 2), (3,)]),\n ],\n)\n@pytest.mark.parametrize(\"indexing\", [\"ij\", \"xy\"])\n@pytest.mark.parametrize(\"sparse\", [False, True])\ndef test_meshgrid(shapes, chunks, indexing, sparse):\n xi_a = []\n xi_d = []\n xi_dc = []\n for each_shape, each_chunk in zip(shapes, chunks):\n xi_a.append(np.random.random(each_shape))\n xi_d_e = da.from_array(xi_a[-1], chunks=each_chunk)\n xi_d.append(xi_d_e)\n xi_d_ef = xi_d_e.flatten()\n xi_dc.append(xi_d_ef.chunks[0])\n do = list(range(len(xi_dc)))\n if indexing == \"xy\" and len(xi_dc) > 1:\n do[0], do[1] = do[1], do[0]\n xi_dc[0], xi_dc[1] = xi_dc[1], xi_dc[0]\n xi_dc = tuple(xi_dc)\n\n r_a = np.meshgrid(*xi_a, indexing=indexing, sparse=sparse)\n r_d = da.meshgrid(*xi_d, indexing=indexing, sparse=sparse)\n\n assert isinstance(r_d, list)\n assert len(r_a) == len(r_d)\n\n for e_r_a, e_r_d, i in zip(r_a, r_d, do):\n assert_eq(e_r_a, e_r_d)\n if sparse:\n assert e_r_d.chunks[i] == xi_dc[i]\n else:\n assert e_r_d.chunks == xi_dc", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_meshgrid_inputcoercion_test_meshgrid_inputcoercion.assert_eq_z_z_d_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_meshgrid_inputcoercion_test_meshgrid_inputcoercion.assert_eq_z_z_d_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_creation.py", "file_name": "test_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 355, "end_line": 365, "span_ids": ["test_meshgrid_inputcoercion"], "tokens": 110}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_meshgrid_inputcoercion():\n a = [1, 2, 3]\n b = np.array([4, 5, 6, 7])\n x, y = np.meshgrid(a, b, indexing=\"ij\")\n z = x * y\n\n x_d, y_d = da.meshgrid(a, b, indexing=\"ij\")\n z_d = x_d * y_d\n\n assert z_d.shape == (len(a), len(b))\n assert_eq(z, z_d)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_tril_triu_test_tril_triu.for_chk_in_5_4_.for_k_in_.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_tril_triu_test_tril_triu.for_chk_in_5_4_.for_k_in_.None_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_creation.py", "file_name": "test_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 368, "end_line": 401, "span_ids": ["test_tril_triu"], "tokens": 219}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_tril_triu():\n A = np.random.randn(20, 20)\n for chk in [5, 4]:\n dA = da.from_array(A, (chk, chk))\n\n assert np.allclose(da.triu(dA).compute(), np.triu(A))\n assert np.allclose(da.tril(dA).compute(), np.tril(A))\n\n for k in [\n -25,\n -20,\n -19,\n -15,\n -14,\n -9,\n -8,\n -6,\n -5,\n -1,\n 1,\n 4,\n 5,\n 6,\n 8,\n 10,\n 11,\n 15,\n 16,\n 19,\n 20,\n 21,\n ]:\n assert np.allclose(da.triu(dA, k).compute(), np.triu(A, k))\n assert np.allclose(da.tril(dA, k).compute(), np.tril(A, k))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_tril_triu_errors_test_tril_triu_non_square_arrays.assert_eq_da_tril_dA_np": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_tril_triu_errors_test_tril_triu_non_square_arrays.assert_eq_da_tril_dA_np", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_creation.py", "file_name": "test_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 404, "end_line": 414, "span_ids": ["test_tril_triu_non_square_arrays", "test_tril_triu_errors"], "tokens": 133}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_tril_triu_errors():\n A = np.random.randint(0, 11, (10, 10, 10))\n dA = da.from_array(A, chunks=(5, 5, 5))\n pytest.raises(ValueError, lambda: da.triu(dA))\n\n\ndef test_tril_triu_non_square_arrays():\n A = np.random.randint(0, 11, (30, 35))\n dA = da.from_array(A, chunks=(5, 5))\n assert_eq(da.triu(dA), np.triu(A))\n assert_eq(da.tril(dA), np.tril(A))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_eye_test_eye.with_dask_config_set_ar.assert_4_x_npartitions_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_eye_test_eye.with_dask_config_set_ar.assert_4_x_npartitions_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_creation.py", "file_name": "test_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 417, "end_line": 437, "span_ids": ["test_eye"], "tokens": 420}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_eye():\n assert_eq(da.eye(9, chunks=3), np.eye(9))\n assert_eq(da.eye(9), np.eye(9))\n assert_eq(da.eye(10, chunks=3), np.eye(10))\n assert_eq(da.eye(9, chunks=3, M=11), np.eye(9, M=11))\n assert_eq(da.eye(11, chunks=3, M=9), np.eye(11, M=9))\n assert_eq(da.eye(7, chunks=3, M=11), np.eye(7, M=11))\n assert_eq(da.eye(11, chunks=3, M=7), np.eye(11, M=7))\n assert_eq(da.eye(9, chunks=3, k=2), np.eye(9, k=2))\n assert_eq(da.eye(9, chunks=3, k=-2), np.eye(9, k=-2))\n assert_eq(da.eye(7, chunks=3, M=11, k=5), np.eye(7, M=11, k=5))\n assert_eq(da.eye(11, chunks=3, M=7, k=-6), np.eye(11, M=7, k=-6))\n assert_eq(da.eye(6, chunks=3, M=9, k=7), np.eye(6, M=9, k=7))\n assert_eq(da.eye(12, chunks=3, M=6, k=-3), np.eye(12, M=6, k=-3))\n\n assert_eq(da.eye(9, chunks=3, dtype=int), np.eye(9, dtype=int))\n assert_eq(da.eye(10, chunks=3, dtype=int), np.eye(10, dtype=int))\n\n with dask.config.set({\"array.chunk-size\": \"50 MiB\"}):\n x = da.eye(10000, \"auto\")\n assert 4 < x.npartitions < 32", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_diag_test_diag.assert_eq_da_diag_d_np_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_diag_test_diag.assert_eq_da_diag_d_np_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_creation.py", "file_name": "test_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 440, "end_line": 465, "span_ids": ["test_diag"], "tokens": 222}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_diag():\n v = np.arange(11)\n assert_eq(da.diag(v), np.diag(v))\n\n v = da.arange(11, chunks=3)\n darr = da.diag(v)\n nparr = np.diag(v)\n assert_eq(darr, nparr)\n assert sorted(da.diag(v).dask) == sorted(da.diag(v).dask)\n\n v = v + v + 3\n darr = da.diag(v)\n nparr = np.diag(v)\n assert_eq(darr, nparr)\n\n v = da.arange(11, chunks=11)\n darr = da.diag(v)\n nparr = np.diag(v)\n assert_eq(darr, nparr)\n assert sorted(da.diag(v).dask) == sorted(da.diag(v).dask)\n\n x = np.arange(64).reshape((8, 8))\n assert_eq(da.diag(x), np.diag(x))\n\n d = da.from_array(x, chunks=(4, 4))\n assert_eq(da.diag(d), np.diag(x))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_diagonal_test_diagonal.None_14": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_diagonal_test_diagonal.None_14", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_creation.py", "file_name": "test_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 468, "end_line": 532, "span_ids": ["test_diagonal"], "tokens": 749}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_diagonal():\n v = np.arange(11)\n with pytest.raises(ValueError):\n da.diagonal(v)\n\n v = np.arange(4).reshape((2, 2))\n with pytest.raises(ValueError):\n da.diagonal(v, axis1=0, axis2=0)\n\n with pytest.raises(AxisError):\n da.diagonal(v, axis1=-4)\n\n with pytest.raises(AxisError):\n da.diagonal(v, axis2=-4)\n\n v = np.arange(4 * 5 * 6).reshape((4, 5, 6))\n v = da.from_array(v, chunks=2)\n assert_eq(da.diagonal(v), np.diagonal(v))\n # Empty diagonal.\n assert_eq(da.diagonal(v, offset=10), np.diagonal(v, offset=10))\n assert_eq(da.diagonal(v, offset=-10), np.diagonal(v, offset=-10))\n\n with pytest.raises(ValueError):\n da.diagonal(v, axis1=-2)\n\n # Negative axis.\n assert_eq(da.diagonal(v, axis1=-1), np.diagonal(v, axis1=-1))\n assert_eq(da.diagonal(v, offset=1, axis1=-1), np.diagonal(v, offset=1, axis1=-1))\n\n # Heterogeneous chunks.\n v = np.arange(2 * 3 * 4 * 5 * 6).reshape((2, 3, 4, 5, 6))\n v = da.from_array(v, chunks=(1, (1, 2), (1, 2, 1), (2, 1, 2), (5, 1)))\n\n assert_eq(da.diagonal(v), np.diagonal(v))\n assert_eq(\n da.diagonal(v, offset=2, axis1=3, axis2=1),\n np.diagonal(v, offset=2, axis1=3, axis2=1),\n )\n\n assert_eq(\n da.diagonal(v, offset=-2, axis1=3, axis2=1),\n np.diagonal(v, offset=-2, axis1=3, axis2=1),\n )\n\n assert_eq(\n da.diagonal(v, offset=-2, axis1=3, axis2=4),\n np.diagonal(v, offset=-2, axis1=3, axis2=4),\n )\n\n assert_eq(da.diagonal(v, 1), np.diagonal(v, 1))\n assert_eq(da.diagonal(v, -1), np.diagonal(v, -1))\n # Positional arguments\n assert_eq(da.diagonal(v, 1, 2, 1), np.diagonal(v, 1, 2, 1))\n\n v = np.arange(2 * 3 * 4 * 5 * 6).reshape((2, 3, 4, 5, 6))\n assert_eq(da.diagonal(v, axis1=1, axis2=3), np.diagonal(v, axis1=1, axis2=3))\n assert_eq(\n da.diagonal(v, offset=1, axis1=1, axis2=3),\n np.diagonal(v, offset=1, axis1=1, axis2=3),\n )\n\n assert_eq(\n da.diagonal(v, offset=1, axis1=3, axis2=1),\n np.diagonal(v, offset=1, axis1=3, axis2=1),\n )\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_diagonal.None_15_test_diagonal.None_22": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_diagonal.None_15_test_diagonal.None_22", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_creation.py", "file_name": "test_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 534, "end_line": 568, "span_ids": ["test_diagonal"], "tokens": 397}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_diagonal():\n # ... other code\n\n assert_eq(\n da.diagonal(v, offset=-5, axis1=3, axis2=1),\n np.diagonal(v, offset=-5, axis1=3, axis2=1),\n )\n\n assert_eq(\n da.diagonal(v, offset=-6, axis1=3, axis2=1),\n np.diagonal(v, offset=-6, axis1=3, axis2=1),\n )\n\n assert_eq(\n da.diagonal(v, offset=-6, axis1=-3, axis2=1),\n np.diagonal(v, offset=-6, axis1=-3, axis2=1),\n )\n\n assert_eq(\n da.diagonal(v, offset=-6, axis1=-3, axis2=1),\n np.diagonal(v, offset=-6, axis1=-3, axis2=1),\n )\n\n v = da.from_array(v, chunks=2)\n assert_eq(\n da.diagonal(v, offset=1, axis1=3, axis2=1),\n np.diagonal(v, offset=1, axis1=3, axis2=1),\n )\n assert_eq(\n da.diagonal(v, offset=-1, axis1=3, axis2=1),\n np.diagonal(v, offset=-1, axis1=3, axis2=1),\n )\n\n v = np.arange(384).reshape((8, 8, 6))\n assert_eq(da.diagonal(v, offset=-1, axis1=2), np.diagonal(v, offset=-1, axis1=2))\n\n v = da.from_array(v, chunks=(4, 4, 2))\n assert_eq(da.diagonal(v, offset=-1, axis1=2), np.diagonal(v, offset=-1, axis1=2))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_fromfunction_test_fromfunction.assert_same_keys_d_d2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_fromfunction_test_fromfunction.assert_same_keys_d_d2_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_creation.py", "file_name": "test_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 571, "end_line": 588, "span_ids": ["test_fromfunction"], "tokens": 192}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"dtype\", [None, \"f8\", \"i8\"])\n@pytest.mark.parametrize(\n \"func, kwargs\",\n [\n (lambda x, y: x + y, {}),\n (lambda x, y, c=1: x + c * y, {}),\n (lambda x, y, c=1: x + c * y, {\"c\": 3}),\n ],\n)\ndef test_fromfunction(func, dtype, kwargs):\n a = np.fromfunction(func, shape=(5, 5), dtype=dtype, **kwargs)\n d = da.fromfunction(func, shape=(5, 5), chunks=(2, 2), dtype=dtype, **kwargs)\n\n assert_eq(d, a)\n\n d2 = da.fromfunction(func, shape=(5, 5), chunks=(2, 2), dtype=dtype, **kwargs)\n\n assert same_keys(d, d2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_repeat_test_repeat.for_r_in_1_2_3_4_.assert_all_concat_d_repea": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_repeat_test_repeat.for_r_in_1_2_3_4_.assert_all_concat_d_repea", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_creation.py", "file_name": "test_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 591, "end_line": 623, "span_ids": ["test_repeat"], "tokens": 268}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_repeat():\n x = np.random.random((10, 11, 13))\n d = da.from_array(x, chunks=(4, 5, 3))\n\n repeats = [0, 1, 2, 5]\n axes = [-3, -2, -1, 0, 1, 2]\n\n for r in repeats:\n for a in axes:\n assert_eq(x.repeat(r, axis=a), d.repeat(r, axis=a))\n\n assert_eq(d.repeat(2, 0), da.repeat(d, 2, 0))\n\n with pytest.raises(NotImplementedError):\n da.repeat(d, np.arange(10))\n\n with pytest.raises(NotImplementedError):\n da.repeat(d, 2, None)\n\n with pytest.raises(NotImplementedError):\n da.repeat(d, 2)\n\n for invalid_axis in [3, -4]:\n with pytest.raises(ValueError):\n da.repeat(d, 2, axis=invalid_axis)\n\n x = np.arange(5)\n d = da.arange(5, chunks=(2,))\n\n assert_eq(x.repeat(3), d.repeat(3))\n\n for r in [1, 2, 3, 4]:\n assert all(concat(d.repeat(r).chunks))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_tile_basic_test_tile_basic.assert_eq_np_tile_b_reps": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_tile_basic_test_tile_basic.assert_eq_np_tile_b_reps", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_creation.py", "file_name": "test_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 626, "end_line": 632, "span_ids": ["test_tile_basic"], "tokens": 107}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"reps\", [2, (2, 2), (1, 2), (2, 1), (2, 3, 4, 0)])\ndef test_tile_basic(reps):\n a = da.asarray([0, 1, 2])\n b = [[1, 2], [3, 4]]\n\n assert_eq(np.tile(a.compute(), reps), da.tile(a, reps))\n assert_eq(np.tile(b, reps), da.tile(b, reps))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_tile_chunks_test_tile_neg_reps.with_pytest_raises_ValueE.da_tile_d_reps_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_tile_chunks_test_tile_neg_reps.with_pytest_raises_ValueE.da_tile_d_reps_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_creation.py", "file_name": "test_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 635, "end_line": 651, "span_ids": ["test_tile_neg_reps", "test_tile_chunks"], "tokens": 204}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"shape, chunks\", [((10,), (1,)), ((10, 11, 13), (4, 5, 3))])\n@pytest.mark.parametrize(\"reps\", [0, 1, 2, 3, 5, (1,), (1, 2)])\ndef test_tile_chunks(shape, chunks, reps):\n x = np.random.random(shape)\n d = da.from_array(x, chunks=chunks)\n\n assert_eq(np.tile(x, reps), da.tile(d, reps))\n\n\n@pytest.mark.parametrize(\"shape, chunks\", [((10,), (1,)), ((10, 11, 13), (4, 5, 3))])\n@pytest.mark.parametrize(\"reps\", [-1, -5])\ndef test_tile_neg_reps(shape, chunks, reps):\n x = np.random.random(shape)\n d = da.from_array(x, chunks=chunks)\n\n with pytest.raises(ValueError):\n da.tile(d, reps)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_tile_zero_reps_test_tile_zero_reps.assert_eq_np_tile_x_reps": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_tile_zero_reps_test_tile_zero_reps.assert_eq_np_tile_x_reps", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_creation.py", "file_name": "test_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 654, "end_line": 660, "span_ids": ["test_tile_zero_reps"], "tokens": 113}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"shape, chunks\", [((10,), (1,)), ((10, 11, 13), (4, 5, 3))])\n@pytest.mark.parametrize(\"reps\", [0, (0,), (2, 0), (0, 3, 0, 4)])\ndef test_tile_zero_reps(shape, chunks, reps):\n x = np.random.random(shape)\n d = da.from_array(x, chunks=chunks)\n\n assert_eq(np.tile(x, reps), da.tile(d, reps))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_tile_empty_array_test_tile_empty_array.assert_eq_np_tile_x_reps": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_tile_empty_array_test_tile_empty_array.assert_eq_np_tile_x_reps", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_creation.py", "file_name": "test_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 663, "end_line": 669, "span_ids": ["test_tile_empty_array"], "tokens": 104}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"shape, chunks\", [((1, 1, 0), (1, 1, 0)), ((2, 0), (1, 0))])\n@pytest.mark.parametrize(\"reps\", [2, (3, 2, 5)])\ndef test_tile_empty_array(shape, chunks, reps):\n x = np.empty(shape)\n d = da.from_array(x, chunks=chunks)\n\n assert_eq(np.tile(x, reps), da.tile(d, reps))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_tile_np_kroncompare_examples_skip_stat_length.pytest_mark_xfail__numpy_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_tile_np_kroncompare_examples_skip_stat_length.pytest_mark_xfail__numpy_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_creation.py", "file_name": "test_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 672, "end_line": 683, "span_ids": ["impl:2", "test_tile_np_kroncompare_examples"], "tokens": 159}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"shape\", [(3,), (2, 3), (3, 4, 3), (3, 2, 3), (4, 3, 2, 4), (2, 2)]\n)\n@pytest.mark.parametrize(\"reps\", [(2,), (1, 2), (2, 1), (2, 2), (2, 3, 2), (3, 2)])\ndef test_tile_np_kroncompare_examples(shape, reps):\n x = np.random.random(shape)\n d = da.asarray(x)\n\n assert_eq(np.tile(x, reps), da.tile(d, reps))\n\n\nskip_stat_length = pytest.mark.xfail(_numpy_117, reason=\"numpy-14061\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_pad_0_width_test_pad_0_width.assert_eq_np_r_da_r_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_pad_0_width_test_pad_0_width.assert_eq_np_r_da_r_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_creation.py", "file_name": "test_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 686, "end_line": 716, "span_ids": ["test_pad_0_width"], "tokens": 309}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"shape, chunks, pad_width, mode, kwargs\",\n [\n ((10, 11), (4, 5), 0, \"constant\", {\"constant_values\": 2}),\n ((10, 11), (4, 5), 0, \"edge\", {}),\n ((10, 11), (4, 5), 0, \"linear_ramp\", {\"end_values\": 2}),\n ((10, 11), (4, 5), 0, \"reflect\", {}),\n ((10, 11), (4, 5), 0, \"symmetric\", {}),\n ((10, 11), (4, 5), 0, \"wrap\", {}),\n pytest.param(\n (10, 11),\n (4, 5),\n 0,\n \"empty\",\n {},\n marks=pytest.mark.skipif(\n not _numpy_117, reason=\"requires NumPy>=1.17 for empty mode support\"\n ),\n ),\n ],\n)\ndef test_pad_0_width(shape, chunks, pad_width, mode, kwargs):\n np_a = np.random.random(shape)\n da_a = da.from_array(np_a, chunks=chunks)\n\n np_r = np.pad(np_a, pad_width, mode, **kwargs)\n da_r = da.pad(da_a, pad_width, mode, **kwargs)\n\n assert da_r is da_a\n\n assert_eq(np_r, da_r)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_pad_test_pad.if_mode_empty_.else_.assert_eq_np_r_da_r_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_pad_test_pad.if_mode_empty_.else_.assert_eq_np_r_da_r_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_creation.py", "file_name": "test_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 719, "end_line": 771, "span_ids": ["test_pad"], "tokens": 611}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"shape, chunks, pad_width, mode, kwargs\",\n [\n ((10,), (3,), 1, \"constant\", {}),\n ((10,), (3,), 2, \"constant\", {\"constant_values\": -1}),\n ((10,), (3,), ((2, 3)), \"constant\", {\"constant_values\": (-1, -2)}),\n (\n (10, 11),\n (4, 5),\n ((1, 4), (2, 3)),\n \"constant\",\n {\"constant_values\": ((-1, -2), (2, 1))},\n ),\n ((10,), (3,), 3, \"edge\", {}),\n ((10,), (3,), 3, \"linear_ramp\", {}),\n ((10,), (3,), 3, \"linear_ramp\", {\"end_values\": 0}),\n (\n (10, 11),\n (4, 5),\n ((1, 4), (2, 3)),\n \"linear_ramp\",\n {\"end_values\": ((-1, -2), (4, 3))},\n ),\n ((10, 11), (4, 5), ((1, 4), (2, 3)), \"reflect\", {}),\n ((10, 11), (4, 5), ((1, 4), (2, 3)), \"symmetric\", {}),\n ((10, 11), (4, 5), ((1, 4), (2, 3)), \"wrap\", {}),\n ((10,), (3,), ((2, 3)), \"maximum\", {\"stat_length\": (1, 2)}),\n ((10, 11), (4, 5), ((1, 4), (2, 3)), \"mean\", {\"stat_length\": ((3, 4), (2, 1))}),\n ((10,), (3,), ((2, 3)), \"minimum\", {\"stat_length\": (2, 3)}),\n pytest.param(\n (10,),\n (3,),\n 1,\n \"empty\",\n {},\n marks=pytest.mark.skipif(\n not _numpy_117, reason=\"requires NumPy>=1.17 for empty mode support\"\n ),\n ),\n ],\n)\ndef test_pad(shape, chunks, pad_width, mode, kwargs):\n np_a = np.random.random(shape)\n da_a = da.from_array(np_a, chunks=chunks)\n\n np_r = np.pad(np_a, pad_width, mode, **kwargs)\n da_r = da.pad(da_a, pad_width, mode, **kwargs)\n\n if mode == \"empty\":\n # empty pads lead to undefined values which may be different\n assert_eq(np_r[pad_width:-pad_width], da_r[pad_width:-pad_width])\n else:\n assert_eq(np_r, da_r)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_pad_3d_data_test_pad_3d_data.assert_eq_np_r_da_r_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_pad_3d_data_test_pad_3d_data.assert_eq_np_r_da_r_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_creation.py", "file_name": "test_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 774, "end_line": 829, "span_ids": ["test_pad_3d_data"], "tokens": 425}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"dtype\", [np.uint8, np.int16, np.float32, bool])\n@pytest.mark.parametrize(\n \"pad_widths\", [2, (2,), (2, 3), ((2, 3),), ((3, 1), (0, 0), (2, 0))]\n)\n@pytest.mark.parametrize(\n \"mode\",\n [\n \"constant\",\n \"edge\",\n pytest.param(\n \"linear_ramp\",\n marks=pytest.mark.skipif(\n not _numpy_118, reason=\"numpy changed pad behaviour\"\n ),\n ),\n \"maximum\",\n \"mean\",\n \"minimum\",\n pytest.param(\n \"reflect\",\n marks=pytest.mark.skip(\n reason=\"Bug when pad_width is larger than dimension: https://github.com/dask/dask/issues/5303\"\n ),\n ),\n pytest.param(\n \"symmetric\",\n marks=pytest.mark.skip(\n reason=\"Bug when pad_width is larger than dimension: https://github.com/dask/dask/issues/5303\"\n ),\n ),\n pytest.param(\n \"wrap\",\n marks=pytest.mark.skip(\n reason=\"Bug when pad_width is larger than dimension: https://github.com/dask/dask/issues/5303\"\n ),\n ),\n pytest.param(\n \"median\",\n marks=pytest.mark.skip(reason=\"Not implemented\"),\n ),\n pytest.param(\n \"empty\",\n marks=pytest.mark.skip(\n reason=\"Empty leads to undefined values, which may be different\"\n ),\n ),\n ],\n)\ndef test_pad_3d_data(dtype, pad_widths, mode):\n np_a = np.arange(2 * 3 * 4).reshape(2, 3, 4).astype(dtype)\n da_a = da.from_array(np_a, chunks=\"auto\")\n\n np_r = np.pad(np_a, pad_widths, mode=mode)\n da_r = da.pad(da_a, pad_widths, mode=mode)\n\n assert_eq(np_r, da_r)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_pad_udf_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_pad_udf_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_creation.py", "file_name": "test_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 832, "end_line": 858, "span_ids": ["test_pad_udf", "test_auto_chunks"], "tokens": 249}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"kwargs\", [{}, {\"scaler\": 2}])\ndef test_pad_udf(kwargs):\n def udf_pad(vector, pad_width, iaxis, inner_kwargs):\n assert kwargs == inner_kwargs\n scaler = inner_kwargs.get(\"scaler\", 1)\n vector[: pad_width[0]] = -scaler * pad_width[0]\n vector[-pad_width[1] :] = scaler * pad_width[1]\n return vector\n\n shape = (10, 11)\n chunks = (4, 5)\n pad_width = ((1, 2), (2, 3))\n\n np_a = np.random.random(shape)\n da_a = da.from_array(np_a, chunks=chunks)\n\n np_r = np.pad(np_a, pad_width, udf_pad, **kwargs)\n da_r = da.pad(da_a, pad_width, udf_pad, **kwargs)\n\n assert_eq(np_r, da_r)\n\n\ndef test_auto_chunks():\n with dask.config.set({\"array.chunk-size\": \"50 MiB\"}):\n x = da.ones((10000, 10000))\n assert 4 < x.npartitions < 32", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_basic_test_sizeof.assert_sizeof_c_c_nby": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_basic_test_sizeof.assert_sizeof_c_c_nby", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy.py", "file_name": "test_cupy.py", "file_type": "text/x-python", "category": "test", "start_line": 197, "end_line": 221, "span_ids": ["test_sizeof", "test_basic"], "tokens": 231}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"func\", functions)\ndef test_basic(func):\n c = cupy.random.random((2, 3, 4))\n n = c.get()\n dc = da.from_array(c, chunks=(1, 2, 2), asarray=False)\n dn = da.from_array(n, chunks=(1, 2, 2))\n\n ddc = func(dc)\n ddn = func(dn)\n\n assert type(ddc._meta) == cupy.core.core.ndarray\n\n if ddc.dask.keys()[0][0].startswith(\"empty\"):\n # We can't verify for data correctness when testing empty_like\n assert type(ddc._meta) == type(ddc.compute())\n else:\n assert_eq(ddc, ddc) # Check that _meta and computed arrays match types\n assert_eq(ddc, ddn)\n\n\n@pytest.mark.parametrize(\"dtype\", [\"f4\", \"f8\"])\ndef test_sizeof(dtype):\n c = cupy.random.random((2, 3, 4), dtype=dtype)\n\n assert sizeof(c) == c.nbytes", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_diag_test_diag.assert_eq_da_diag_dx_cu": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_diag_test_diag.assert_eq_da_diag_dx_cu", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy.py", "file_name": "test_cupy.py", "file_type": "text/x-python", "category": "test", "start_line": 161, "end_line": 183, "span_ids": ["test_diag"], "tokens": 270}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n not IS_NEP18_ACTIVE, reason=\"NEP-18 support is not available in NumPy\"\n)\ndef test_diag():\n v = cupy.arange(11)\n dv = da.from_array(v, chunks=(4,), asarray=False)\n assert type(dv._meta) == cupy.core.core.ndarray\n assert_eq(dv, dv) # Check that _meta and computed arrays match types\n assert_eq(da.diag(dv), cupy.diag(v))\n\n v = v + v + 3\n dv = dv + dv + 3\n darr = da.diag(dv)\n cupyarr = cupy.diag(v)\n assert type(darr._meta) == cupy.core.core.ndarray\n assert_eq(darr, darr) # Check that _meta and computed arrays match types\n assert_eq(darr, cupyarr)\n\n x = cupy.arange(64).reshape((8, 8))\n dx = da.from_array(x, chunks=(4, 4), asarray=False)\n assert type(dx._meta) == cupy.core.core.ndarray\n assert_eq(dx, dx) # Check that _meta and computed arrays match types\n assert_eq(da.diag(dx), cupy.diag(x))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_diagonal_test_diagonal.None_11": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_diagonal_test_diagonal.None_11", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy.py", "file_name": "test_cupy.py", "file_type": "text/x-python", "category": "test", "start_line": 249, "end_line": 307, "span_ids": ["test_diagonal"], "tokens": 643}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n not IS_NEP18_ACTIVE, reason=\"NEP-18 support is not available in NumPy\"\n)\ndef test_diagonal():\n v = cupy.arange(11)\n with pytest.raises(ValueError):\n da.diagonal(v)\n\n v = cupy.arange(4).reshape((2, 2))\n with pytest.raises(ValueError):\n da.diagonal(v, axis1=0, axis2=0)\n\n with pytest.raises(AxisError):\n da.diagonal(v, axis1=-4)\n\n with pytest.raises(AxisError):\n da.diagonal(v, axis2=-4)\n\n v = cupy.arange(4 * 5 * 6).reshape((4, 5, 6))\n v = da.from_array(v, chunks=2, asarray=False)\n assert_eq(da.diagonal(v), np.diagonal(v))\n # Empty diagonal.\n assert_eq(da.diagonal(v, offset=10), np.diagonal(v, offset=10))\n assert_eq(da.diagonal(v, offset=-10), np.diagonal(v, offset=-10))\n assert isinstance(da.diagonal(v).compute(), cupy.core.core.ndarray)\n\n with pytest.raises(ValueError):\n da.diagonal(v, axis1=-2)\n\n # Negative axis.\n assert_eq(da.diagonal(v, axis1=-1), np.diagonal(v, axis1=-1))\n assert_eq(da.diagonal(v, offset=1, axis1=-1), np.diagonal(v, offset=1, axis1=-1))\n\n # Heterogeneous chunks.\n v = cupy.arange(2 * 3 * 4 * 5 * 6).reshape((2, 3, 4, 5, 6))\n v = da.from_array(\n v, chunks=(1, (1, 2), (1, 2, 1), (2, 1, 2), (5, 1)), asarray=False\n )\n\n assert_eq(da.diagonal(v), np.diagonal(v))\n assert_eq(\n da.diagonal(v, offset=2, axis1=3, axis2=1),\n np.diagonal(v, offset=2, axis1=3, axis2=1),\n )\n\n assert_eq(\n da.diagonal(v, offset=-2, axis1=3, axis2=1),\n np.diagonal(v, offset=-2, axis1=3, axis2=1),\n )\n\n assert_eq(\n da.diagonal(v, offset=-2, axis1=3, axis2=4),\n np.diagonal(v, offset=-2, axis1=3, axis2=4),\n )\n\n assert_eq(da.diagonal(v, 1), np.diagonal(v, 1))\n assert_eq(da.diagonal(v, -1), np.diagonal(v, -1))\n # Positional arguments\n assert_eq(da.diagonal(v, 1, 2, 1), np.diagonal(v, 1, 2, 1))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_tril_triu_test_tril_triu.for_chk_in_5_4_.for_k_in_25_20_9_.assert_eq_da_tril_dA_k_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_tril_triu_test_tril_triu.for_chk_in_5_4_.for_k_in_25_20_9_.assert_eq_da_tril_dA_k_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy.py", "file_name": "test_cupy.py", "file_type": "text/x-python", "category": "test", "start_line": 310, "end_line": 325, "span_ids": ["test_tril_triu"], "tokens": 210}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n not IS_NEP18_ACTIVE or cupy.__version__ < \"6.4.0\",\n reason=\"NEP-18 support is not available in NumPy or CuPy older than \"\n \"6.4.0 (requires https://github.com/cupy/cupy/pull/2418)\",\n)\ndef test_tril_triu():\n A = cupy.random.randn(20, 20)\n for chk in [5, 4]:\n dA = da.from_array(A, (chk, chk), asarray=False)\n\n assert_eq(da.triu(dA), np.triu(A))\n assert_eq(da.tril(dA), np.tril(A))\n\n for k in [-25, -20, -9, -1, 1, 8, 19, 21]:\n assert_eq(da.triu(dA, k), np.triu(A, k))\n assert_eq(da.tril(dA, k), np.tril(A, k))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_tril_triu_non_square_arrays_test_tril_triu_non_square_arrays.assert_eq_da_tril_dA_np": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_tril_triu_non_square_arrays_test_tril_triu_non_square_arrays.assert_eq_da_tril_dA_np", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy.py", "file_name": "test_cupy.py", "file_type": "text/x-python", "category": "test", "start_line": 328, "end_line": 337, "span_ids": ["test_tril_triu_non_square_arrays"], "tokens": 147}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n not IS_NEP18_ACTIVE or cupy.__version__ < \"6.4.0\",\n reason=\"NEP-18 support is not available in NumPy or CuPy older than \"\n \"6.4.0 (requires https://github.com/cupy/cupy/pull/2418)\",\n)\ndef test_tril_triu_non_square_arrays():\n A = cupy.random.randint(0, 11, (30, 35))\n dA = da.from_array(A, chunks=(5, 5), asarray=False)\n assert_eq(da.triu(dA), np.triu(A))\n assert_eq(da.tril(dA), np.tril(A))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_apply_gufunc_axis_test_apply_gufunc_axis.assert_eq_m_dm_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_apply_gufunc_axis_test_apply_gufunc_axis.assert_eq_m_dm_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy.py", "file_name": "test_cupy.py", "file_type": "text/x-python", "category": "test", "start_line": 281, "end_line": 295, "span_ids": ["test_apply_gufunc_axis"], "tokens": 138}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n not IS_NEP18_ACTIVE, reason=\"NEP-18 support is not available in NumPy\"\n)\ndef test_apply_gufunc_axis():\n def mydiff(x):\n return np.diff(x)\n\n a = cupy.random.randn(3, 6, 4)\n da_ = da.from_array(a, chunks=2, asarray=False)\n\n m = np.diff(a, axis=1)\n dm = apply_gufunc(\n mydiff, \"(i)->(i)\", da_, axis=1, output_sizes={\"i\": 5}, allow_rechunk=True\n )\n assert_eq(m, dm)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_overlap_internal_test_overlap_internal.assert_same_keys_da_overl": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_overlap_internal_test_overlap_internal.assert_same_keys_da_overl", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy.py", "file_name": "test_cupy.py", "file_type": "text/x-python", "category": "test", "start_line": 298, "end_line": 323, "span_ids": ["test_overlap_internal"], "tokens": 498}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_overlap_internal():\n x = cupy.arange(64).reshape((8, 8))\n d = da.from_array(x, chunks=(4, 4), asarray=False)\n\n g = da.overlap.overlap_internal(d, {0: 2, 1: 1})\n assert g.chunks == ((6, 6), (5, 5))\n\n expected = np.array(\n [\n [0, 1, 2, 3, 4, 3, 4, 5, 6, 7],\n [8, 9, 10, 11, 12, 11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20, 19, 20, 21, 22, 23],\n [24, 25, 26, 27, 28, 27, 28, 29, 30, 31],\n [32, 33, 34, 35, 36, 35, 36, 37, 38, 39],\n [40, 41, 42, 43, 44, 43, 44, 45, 46, 47],\n [16, 17, 18, 19, 20, 19, 20, 21, 22, 23],\n [24, 25, 26, 27, 28, 27, 28, 29, 30, 31],\n [32, 33, 34, 35, 36, 35, 36, 37, 38, 39],\n [40, 41, 42, 43, 44, 43, 44, 45, 46, 47],\n [48, 49, 50, 51, 52, 51, 52, 53, 54, 55],\n [56, 57, 58, 59, 60, 59, 60, 61, 62, 63],\n ]\n )\n\n assert_eq(g, expected)\n assert same_keys(da.overlap.overlap_internal(d, {0: 2, 1: 1}), g)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_trim_internal_test_periodic.assert_eq_e_0_d_2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_trim_internal_test_periodic.assert_eq_e_0_d_2_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy.py", "file_name": "test_cupy.py", "file_type": "text/x-python", "category": "test", "start_line": 326, "end_line": 346, "span_ids": ["test_periodic", "test_trim_internal"], "tokens": 239}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_trim_internal():\n x = cupy.ones((40, 60))\n d = da.from_array(x, chunks=(10, 10), asarray=False)\n e = da.overlap.trim_internal(d, axes={0: 1, 1: 2})\n\n assert e.chunks == ((8, 8, 8, 8), (6, 6, 6, 6, 6, 6))\n\n\n@pytest.mark.skipif(\n not IS_NEP18_ACTIVE, reason=\"NEP-18 support is not available in NumPy\"\n)\ndef test_periodic():\n x = cupy.arange(64).reshape((8, 8))\n d = da.from_array(x, chunks=(4, 4), asarray=False)\n\n e = da.overlap.periodic(d, axis=0, depth=2)\n assert e.shape[0] == d.shape[0] + 4\n assert e.shape[1] == d.shape[1]\n\n assert_eq(e[1, :], d[-1, :])\n assert_eq(e[0, :], d[-2, :])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_reflect_test_reflect.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_reflect_test_reflect.None_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy.py", "file_name": "test_cupy.py", "file_type": "text/x-python", "category": "test", "start_line": 349, "end_line": 362, "span_ids": ["test_reflect"], "tokens": 197}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n not IS_NEP18_ACTIVE, reason=\"NEP-18 support is not available in NumPy\"\n)\ndef test_reflect():\n x = cupy.arange(10)\n d = da.from_array(x, chunks=(5, 5), asarray=False)\n\n e = da.overlap.reflect(d, axis=0, depth=2)\n expected = np.array([1, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9, 8])\n assert_eq(e, expected)\n\n e = da.overlap.reflect(d, axis=0, depth=1)\n expected = np.array([0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9])\n assert_eq(e, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_nearest_test_nearest.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_nearest_test_nearest.None_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy.py", "file_name": "test_cupy.py", "file_type": "text/x-python", "category": "test", "start_line": 365, "end_line": 378, "span_ids": ["test_nearest"], "tokens": 199}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n not IS_NEP18_ACTIVE, reason=\"NEP-18 support is not available in NumPy\"\n)\ndef test_nearest():\n x = cupy.arange(10)\n d = da.from_array(x, chunks=(5, 5), asarray=False)\n\n e = da.overlap.nearest(d, axis=0, depth=2)\n expected = np.array([0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9, 9])\n assert_eq(e, expected)\n\n e = da.overlap.nearest(d, axis=0, depth=1)\n expected = np.array([0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9])\n assert_eq(e, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_constant_test_constant.assert_eq_e_1_np_on": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_constant_test_constant.assert_eq_e_1_np_on", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy.py", "file_name": "test_cupy.py", "file_type": "text/x-python", "category": "test", "start_line": 440, "end_line": 454, "span_ids": ["test_constant"], "tokens": 202}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n not IS_NEP18_ACTIVE or cupy.__version__ < \"6.4.0\",\n reason=\"NEP-18 support is not available in NumPy or CuPy older than \"\n \"6.4.0 (requires https://github.com/cupy/cupy/pull/2418)\",\n)\ndef test_constant():\n x = cupy.arange(64).reshape((8, 8))\n d = da.from_array(x, chunks=(4, 4), asarray=False)\n\n e = da.overlap.constant(d, axis=0, depth=2, value=10)\n assert e.shape[0] == d.shape[0] + 4\n assert e.shape[1] == d.shape[1]\n\n assert_eq(e[1, :], np.ones(8, dtype=x.dtype) * 10)\n assert_eq(e[-1, :], np.ones(8, dtype=x.dtype) * 10)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_boundaries_test_boundaries.assert_eq_e_expected_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_boundaries_test_boundaries.assert_eq_e_expected_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy.py", "file_name": "test_cupy.py", "file_type": "text/x-python", "category": "test", "start_line": 457, "end_line": 484, "span_ids": ["test_boundaries"], "tokens": 538}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n not IS_NEP18_ACTIVE or cupy.__version__ < \"6.4.0\",\n reason=\"NEP-18 support is not available in NumPy or CuPy older than \"\n \"6.4.0 (requires https://github.com/cupy/cupy/pull/2418)\",\n)\ndef test_boundaries():\n x = cupy.arange(64).reshape((8, 8))\n d = da.from_array(x, chunks=(4, 4), asarray=False)\n\n e = da.overlap.boundaries(d, {0: 2, 1: 1}, {0: 0, 1: \"periodic\"})\n\n expected = np.array(\n [\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [7, 0, 1, 2, 3, 4, 5, 6, 7, 0],\n [15, 8, 9, 10, 11, 12, 13, 14, 15, 8],\n [23, 16, 17, 18, 19, 20, 21, 22, 23, 16],\n [31, 24, 25, 26, 27, 28, 29, 30, 31, 24],\n [39, 32, 33, 34, 35, 36, 37, 38, 39, 32],\n [47, 40, 41, 42, 43, 44, 45, 46, 47, 40],\n [55, 48, 49, 50, 51, 52, 53, 54, 55, 48],\n [63, 56, 57, 58, 59, 60, 61, 62, 63, 56],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n ]\n )\n assert_eq(e, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_random_all_test_random_all.rnd_test_rs_standard_t_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_random_all_test_random_all.rnd_test_rs_standard_t_2", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy.py", "file_name": "test_cupy.py", "file_type": "text/x-python", "category": "test", "start_line": 432, "end_line": 479, "span_ids": ["test_random_all"], "tokens": 747}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_random_all():\n def rnd_test(func, *args, **kwargs):\n a = func(*args, **kwargs)\n assert type(a._meta) == cupy.core.core.ndarray\n assert_eq(a, a) # Check that _meta and computed arrays match types\n\n rs = da.random.RandomState(RandomState=cupy.random.RandomState)\n\n rnd_test(rs.beta, 1, 2, size=5, chunks=3)\n rnd_test(rs.binomial, 10, 0.5, size=5, chunks=3)\n rnd_test(rs.chisquare, 1, size=5, chunks=3)\n rnd_test(rs.exponential, 1, size=5, chunks=3)\n rnd_test(rs.f, 1, 2, size=5, chunks=3)\n rnd_test(rs.gamma, 5, 1, size=5, chunks=3)\n rnd_test(rs.geometric, 1, size=5, chunks=3)\n rnd_test(rs.gumbel, 1, size=5, chunks=3)\n rnd_test(rs.hypergeometric, 1, 2, 3, size=5, chunks=3)\n rnd_test(rs.laplace, size=5, chunks=3)\n rnd_test(rs.logistic, size=5, chunks=3)\n rnd_test(rs.lognormal, size=5, chunks=3)\n rnd_test(rs.logseries, 0.5, size=5, chunks=3)\n # No RandomState for multinomial in CuPy\n # rnd_test(rs.multinomial, 20, [1 / 6.] * 6, size=5, chunks=3)\n rnd_test(rs.negative_binomial, 5, 0.5, size=5, chunks=3)\n rnd_test(rs.noncentral_chisquare, 2, 2, size=5, chunks=3)\n\n rnd_test(rs.noncentral_f, 2, 2, 3, size=5, chunks=3)\n rnd_test(rs.normal, 2, 2, size=5, chunks=3)\n rnd_test(rs.pareto, 1, size=5, chunks=3)\n rnd_test(rs.poisson, size=5, chunks=3)\n\n rnd_test(rs.power, 1, size=5, chunks=3)\n rnd_test(rs.rayleigh, size=5, chunks=3)\n rnd_test(rs.random_sample, size=5, chunks=3)\n\n rnd_test(rs.triangular, 1, 2, 3, size=5, chunks=3)\n rnd_test(rs.uniform, size=5, chunks=3)\n rnd_test(rs.vonmises, 2, 3, size=5, chunks=3)\n rnd_test(rs.wald, 1, 2, size=5, chunks=3)\n\n rnd_test(rs.weibull, 2, size=5, chunks=3)\n rnd_test(rs.zipf, 2, size=5, chunks=3)\n\n rnd_test(rs.standard_cauchy, size=5, chunks=3)\n rnd_test(rs.standard_exponential, size=5, chunks=3)\n rnd_test(rs.standard_gamma, 2, size=5, chunks=3)\n rnd_test(rs.standard_normal, size=5, chunks=3)\n rnd_test(rs.standard_t, 2, size=5, chunks=3)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_random_shapes_test_random_shapes.assert_x_shape_shape": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_random_shapes_test_random_shapes.assert_x_shape_shape", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy.py", "file_name": "test_cupy.py", "file_type": "text/x-python", "category": "test", "start_line": 482, "end_line": 490, "span_ids": ["test_random_shapes"], "tokens": 122}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"shape\", [(2, 3), (2, 3, 4), (2, 3, 4, 2)])\ndef test_random_shapes(shape):\n rs = da.random.RandomState(RandomState=cupy.random.RandomState)\n\n x = rs.poisson(size=shape, chunks=3)\n assert type(x._meta) == cupy.core.core.ndarray\n assert_eq(x, x) # Check that _meta and computed arrays match types\n assert x._meta.shape == (0,) * len(shape)\n assert x.shape == shape", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_tsqr_test_tsqr._full_matrix_returned": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_tsqr_test_tsqr._full_matrix_returned", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy.py", "file_name": "test_cupy.py", "file_type": "text/x-python", "category": "test", "start_line": 548, "end_line": 612, "span_ids": ["test_tsqr"], "tokens": 760}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n not IS_NEP18_ACTIVE or cupy.__version__ < \"6.1.0\",\n reason=\"NEP-18 support is not available in NumPy or CuPy older than \"\n \"6.1.0 (requires https://github.com/cupy/cupy/pull/2209)\",\n)\n@pytest.mark.parametrize(\n \"m,n,chunks,error_type\",\n [\n (20, 10, 10, None), # tall-skinny regular blocks\n (20, 10, (3, 10), None), # tall-skinny regular fat layers\n (20, 10, ((8, 4, 8), 10), None), # tall-skinny irregular fat layers\n (40, 10, ((15, 5, 5, 8, 7), 10), None), # tall-skinny non-uniform chunks (why?)\n (128, 2, (16, 2), None), # tall-skinny regular thin layers; recursion_depth=1\n (\n 129,\n 2,\n (16, 2),\n None,\n ), # tall-skinny regular thin layers; recursion_depth=2 --> 17x2\n (\n 130,\n 2,\n (16, 2),\n None,\n ), # tall-skinny regular thin layers; recursion_depth=2 --> 18x2 next\n (\n 131,\n 2,\n (16, 2),\n None,\n ), # tall-skinny regular thin layers; recursion_depth=2 --> 18x2 next\n (300, 10, (40, 10), None), # tall-skinny regular thin layers; recursion_depth=2\n (300, 10, (30, 10), None), # tall-skinny regular thin layers; recursion_depth=3\n (300, 10, (20, 10), None), # tall-skinny regular thin layers; recursion_depth=4\n (10, 5, 10, None), # single block tall\n (5, 10, 10, None), # single block short\n (10, 10, 10, None), # single block square\n (10, 40, (10, 10), ValueError), # short-fat regular blocks\n (10, 40, (10, 15), ValueError), # short-fat irregular blocks\n (\n 10,\n 40,\n (10, (15, 5, 5, 8, 7)),\n ValueError,\n ), # short-fat non-uniform chunks (why?)\n (20, 20, 10, ValueError), # 2x2 regular blocks\n ],\n)\ndef test_tsqr(m, n, chunks, error_type):\n mat = cupy.random.rand(m, n)\n data = da.from_array(mat, chunks=chunks, name=\"A\", asarray=False)\n\n # qr\n m_q = m\n n_q = min(m, n)\n m_r = n_q\n n_r = n\n\n # svd\n m_u = m\n n_u = min(m, n)\n n_s = n_q\n m_vh = n_q\n n_vh = n\n d_vh = max(m_vh, n_vh) # full matrix returned\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_tsqr.if_error_type_is_None__test_tsqr.if_error_type_is_None_.else_.None_1.u_s_vh_da_linalg_tsqr": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_tsqr.if_error_type_is_None__test_tsqr.if_error_type_is_None_.else_.None_1.u_s_vh_da_linalg_tsqr", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy.py", "file_name": "test_cupy.py", "file_type": "text/x-python", "category": "test", "start_line": 614, "end_line": 637, "span_ids": ["test_tsqr"], "tokens": 997}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n not IS_NEP18_ACTIVE or cupy.__version__ < \"6.1.0\",\n reason=\"NEP-18 support is not available in NumPy or CuPy older than \"\n \"6.1.0 (requires https://github.com/cupy/cupy/pull/2209)\",\n)\n@pytest.mark.parametrize(\n \"m,n,chunks,error_type\",\n [\n (20, 10, 10, None), # tall-skinny regular blocks\n (20, 10, (3, 10), None), # tall-skinny regular fat layers\n (20, 10, ((8, 4, 8), 10), None), # tall-skinny irregular fat layers\n (40, 10, ((15, 5, 5, 8, 7), 10), None), # tall-skinny non-uniform chunks (why?)\n (128, 2, (16, 2), None), # tall-skinny regular thin layers; recursion_depth=1\n (\n 129,\n 2,\n (16, 2),\n None,\n ), # tall-skinny regular thin layers; recursion_depth=2 --> 17x2\n (\n 130,\n 2,\n (16, 2),\n None,\n ), # tall-skinny regular thin layers; recursion_depth=2 --> 18x2 next\n (\n 131,\n 2,\n (16, 2),\n None,\n ), # tall-skinny regular thin layers; recursion_depth=2 --> 18x2 next\n (300, 10, (40, 10), None), # tall-skinny regular thin layers; recursion_depth=2\n (300, 10, (30, 10), None), # tall-skinny regular thin layers; recursion_depth=3\n (300, 10, (20, 10), None), # tall-skinny regular thin layers; recursion_depth=4\n (10, 5, 10, None), # single block tall\n (5, 10, 10, None), # single block short\n (10, 10, 10, None), # single block square\n (10, 40, (10, 10), ValueError), # short-fat regular blocks\n (10, 40, (10, 15), ValueError), # short-fat irregular blocks\n (\n 10,\n 40,\n (10, (15, 5, 5, 8, 7)),\n ValueError,\n ), # short-fat non-uniform chunks (why?)\n (20, 20, 10, ValueError), # 2x2 regular blocks\n ],\n)\ndef test_tsqr(m, n, chunks, error_type):\n # ... other code\n\n if error_type is None:\n # test QR\n q, r = da.linalg.tsqr(data)\n assert_eq((m_q, n_q), q.shape) # shape check\n assert_eq((m_r, n_r), r.shape) # shape check\n assert_eq(mat, da.dot(q, r)) # accuracy check\n assert_eq(cupy.eye(n_q, n_q), da.dot(q.T, q)) # q must be orthonormal\n assert_eq(r, np.triu(r.rechunk(r.shape[0]))) # r must be upper triangular\n\n # test SVD\n u, s, vh = da.linalg.tsqr(data, compute_svd=True)\n s_exact = np.linalg.svd(mat)[1]\n assert_eq(s, s_exact) # s must contain the singular values\n assert_eq((m_u, n_u), u.shape) # shape check\n assert_eq((n_s,), s.shape) # shape check\n assert_eq((d_vh, d_vh), vh.shape) # shape check\n assert_eq(np.eye(n_u, n_u), da.dot(u.T, u)) # u must be orthonormal\n assert_eq(np.eye(d_vh, d_vh), da.dot(vh, vh.T)) # vh must be orthonormal\n assert_eq(mat, da.dot(da.dot(u, da.diag(s)), vh[:n_q])) # accuracy check\n else:\n with pytest.raises(error_type):\n q, r = da.linalg.tsqr(data)\n with pytest.raises(error_type):\n u, s, vh = da.linalg.tsqr(data, compute_svd=True)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_tsqr_uncertain_test_tsqr_uncertain.data.da_from_array_mat_chunks": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_tsqr_uncertain_test_tsqr_uncertain.data.da_from_array_mat_chunks", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy.py", "file_name": "test_cupy.py", "file_type": "text/x-python", "category": "test", "start_line": 586, "end_line": 680, "span_ids": ["test_tsqr_uncertain"], "tokens": 749}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n not IS_NEP18_ACTIVE, reason=\"NEP-18 support is not available in NumPy\"\n)\n@pytest.mark.parametrize(\n \"m_min,n_max,chunks,vary_rows,vary_cols,error_type\",\n [\n (10, 5, (10, 5), True, False, None), # single block tall\n (10, 5, (10, 5), False, True, None), # single block tall\n (10, 5, (10, 5), True, True, None), # single block tall\n (40, 5, (10, 5), True, False, None), # multiple blocks tall\n (40, 5, (10, 5), False, True, None), # multiple blocks tall\n (40, 5, (10, 5), True, True, None), # multiple blocks tall\n (\n 300,\n 10,\n (40, 10),\n True,\n False,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=2\n (\n 300,\n 10,\n (30, 10),\n True,\n False,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=3\n (\n 300,\n 10,\n (20, 10),\n True,\n False,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=4\n (\n 300,\n 10,\n (40, 10),\n False,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=2\n (\n 300,\n 10,\n (30, 10),\n False,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=3\n (\n 300,\n 10,\n (20, 10),\n False,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=4\n (\n 300,\n 10,\n (40, 10),\n True,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=2\n (\n 300,\n 10,\n (30, 10),\n True,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=3\n (\n 300,\n 10,\n (20, 10),\n True,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=4\n ],\n)\ndef test_tsqr_uncertain(m_min, n_max, chunks, vary_rows, vary_cols, error_type):\n mat = cupy.random.rand(m_min * 2, n_max)\n m, n = m_min * 2, n_max\n mat[0:m_min, 0] += 1\n _c0 = mat[:, 0]\n _r0 = mat[0, :]\n c0 = da.from_array(_c0, chunks=m_min, name=\"c\", asarray=False)\n r0 = da.from_array(_r0, chunks=n_max, name=\"r\", asarray=False)\n data = da.from_array(mat, chunks=chunks, name=\"A\", asarray=False)\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_tsqr_uncertain.if_vary_rows__test_tsqr_uncertain._full_matrix_returned": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_tsqr_uncertain.if_vary_rows__test_tsqr_uncertain._full_matrix_returned", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy.py", "file_name": "test_cupy.py", "file_type": "text/x-python", "category": "test", "start_line": 681, "end_line": 702, "span_ids": ["test_tsqr_uncertain"], "tokens": 799}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n not IS_NEP18_ACTIVE, reason=\"NEP-18 support is not available in NumPy\"\n)\n@pytest.mark.parametrize(\n \"m_min,n_max,chunks,vary_rows,vary_cols,error_type\",\n [\n (10, 5, (10, 5), True, False, None), # single block tall\n (10, 5, (10, 5), False, True, None), # single block tall\n (10, 5, (10, 5), True, True, None), # single block tall\n (40, 5, (10, 5), True, False, None), # multiple blocks tall\n (40, 5, (10, 5), False, True, None), # multiple blocks tall\n (40, 5, (10, 5), True, True, None), # multiple blocks tall\n (\n 300,\n 10,\n (40, 10),\n True,\n False,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=2\n (\n 300,\n 10,\n (30, 10),\n True,\n False,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=3\n (\n 300,\n 10,\n (20, 10),\n True,\n False,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=4\n (\n 300,\n 10,\n (40, 10),\n False,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=2\n (\n 300,\n 10,\n (30, 10),\n False,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=3\n (\n 300,\n 10,\n (20, 10),\n False,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=4\n (\n 300,\n 10,\n (40, 10),\n True,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=2\n (\n 300,\n 10,\n (30, 10),\n True,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=3\n (\n 300,\n 10,\n (20, 10),\n True,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=4\n ],\n)\ndef test_tsqr_uncertain(m_min, n_max, chunks, vary_rows, vary_cols, error_type):\n # ... other code\n if vary_rows:\n data = data[c0 > 0.5, :]\n mat = mat[_c0 > 0.5, :]\n m = mat.shape[0]\n if vary_cols:\n data = data[:, r0 > 0.5]\n mat = mat[:, _r0 > 0.5]\n n = mat.shape[1]\n\n # qr\n m_q = m\n n_q = min(m, n)\n m_r = n_q\n n_r = n\n\n # svd\n m_u = m\n n_u = min(m, n)\n n_s = n_q\n m_vh = n_q\n n_vh = n\n d_vh = max(m_vh, n_vh) # full matrix returned\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_tsqr_uncertain.if_error_type_is_None__test_tsqr_uncertain.if_error_type_is_None_.else_.None_1.u_s_vh_da_linalg_tsqr": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_tsqr_uncertain.if_error_type_is_None__test_tsqr_uncertain.if_error_type_is_None_.else_.None_1.u_s_vh_da_linalg_tsqr", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy.py", "file_name": "test_cupy.py", "file_type": "text/x-python", "category": "test", "start_line": 704, "end_line": 732, "span_ids": ["test_tsqr_uncertain"], "tokens": 1014}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n not IS_NEP18_ACTIVE, reason=\"NEP-18 support is not available in NumPy\"\n)\n@pytest.mark.parametrize(\n \"m_min,n_max,chunks,vary_rows,vary_cols,error_type\",\n [\n (10, 5, (10, 5), True, False, None), # single block tall\n (10, 5, (10, 5), False, True, None), # single block tall\n (10, 5, (10, 5), True, True, None), # single block tall\n (40, 5, (10, 5), True, False, None), # multiple blocks tall\n (40, 5, (10, 5), False, True, None), # multiple blocks tall\n (40, 5, (10, 5), True, True, None), # multiple blocks tall\n (\n 300,\n 10,\n (40, 10),\n True,\n False,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=2\n (\n 300,\n 10,\n (30, 10),\n True,\n False,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=3\n (\n 300,\n 10,\n (20, 10),\n True,\n False,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=4\n (\n 300,\n 10,\n (40, 10),\n False,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=2\n (\n 300,\n 10,\n (30, 10),\n False,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=3\n (\n 300,\n 10,\n (20, 10),\n False,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=4\n (\n 300,\n 10,\n (40, 10),\n True,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=2\n (\n 300,\n 10,\n (30, 10),\n True,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=3\n (\n 300,\n 10,\n (20, 10),\n True,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=4\n ],\n)\ndef test_tsqr_uncertain(m_min, n_max, chunks, vary_rows, vary_cols, error_type):\n # ... other code\n\n if error_type is None:\n # test QR\n q, r = da.linalg.tsqr(data)\n q = q.compute() # because uncertainty\n r = r.compute()\n assert_eq((m_q, n_q), q.shape) # shape check\n assert_eq((m_r, n_r), r.shape) # shape check\n assert_eq(mat, np.dot(q, r)) # accuracy check\n assert_eq(np.eye(n_q, n_q), np.dot(q.T, q)) # q must be orthonormal\n assert_eq(r, np.triu(r)) # r must be upper triangular\n\n # test SVD\n u, s, vh = da.linalg.tsqr(data, compute_svd=True)\n u = u.compute() # because uncertainty\n s = s.compute()\n vh = vh.compute()\n s_exact = np.linalg.svd(mat)[1]\n assert_eq(s, s_exact) # s must contain the singular values\n assert_eq((m_u, n_u), u.shape) # shape check\n assert_eq((n_s,), s.shape) # shape check\n assert_eq((d_vh, d_vh), vh.shape) # shape check\n assert_eq(np.eye(n_u, n_u), np.dot(u.T, u)) # u must be orthonormal\n assert_eq(np.eye(d_vh, d_vh), np.dot(vh, vh.T)) # vh must be orthonormal\n assert_eq(mat, np.dot(np.dot(u, np.diag(s)), vh[:n_q])) # accuracy check\n else:\n with pytest.raises(error_type):\n q, r = da.linalg.tsqr(data)\n with pytest.raises(error_type):\n u, s, vh = da.linalg.tsqr(data, compute_svd=True)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_sfqr_test_sfqr.if_error_type_is_None_.else_.with_pytest_raises_error_.q_r_da_linalg_sfqr_dat": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_sfqr_test_sfqr.if_error_type_is_None_.else_.with_pytest_raises_error_.q_r_da_linalg_sfqr_dat", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy.py", "file_name": "test_cupy.py", "file_type": "text/x-python", "category": "test", "start_line": 735, "end_line": 816, "span_ids": ["test_sfqr"], "tokens": 797}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"m,n,chunks,error_type\",\n [\n (20, 10, 10, ValueError), # tall-skinny regular blocks\n (20, 10, (3, 10), ValueError), # tall-skinny regular fat layers\n (20, 10, ((8, 4, 8), 10), ValueError), # tall-skinny irregular fat layers\n (\n 40,\n 10,\n ((15, 5, 5, 8, 7), 10),\n ValueError,\n ), # tall-skinny non-uniform chunks (why?)\n (\n 128,\n 2,\n (16, 2),\n ValueError,\n ), # tall-skinny regular thin layers; recursion_depth=1\n (\n 129,\n 2,\n (16, 2),\n ValueError,\n ), # tall-skinny regular thin layers; recursion_depth=2 --> 17x2\n (\n 130,\n 2,\n (16, 2),\n ValueError,\n ), # tall-skinny regular thin layers; recursion_depth=2 --> 18x2 next\n (\n 131,\n 2,\n (16, 2),\n ValueError,\n ), # tall-skinny regular thin layers; recursion_depth=2 --> 18x2 next\n (\n 300,\n 10,\n (40, 10),\n ValueError,\n ), # tall-skinny regular thin layers; recursion_depth=2\n (\n 300,\n 10,\n (30, 10),\n ValueError,\n ), # tall-skinny regular thin layers; recursion_depth=3\n (\n 300,\n 10,\n (20, 10),\n ValueError,\n ), # tall-skinny regular thin layers; recursion_depth=4\n (10, 5, 10, None), # single block tall\n (5, 10, 10, None), # single block short\n (10, 10, 10, None), # single block square\n (10, 40, (10, 10), None), # short-fat regular blocks\n (10, 40, (10, 15), None), # short-fat irregular blocks\n (10, 40, (10, (15, 5, 5, 8, 7)), None), # short-fat non-uniform chunks (why?)\n (20, 20, 10, ValueError), # 2x2 regular blocks\n ],\n)\ndef test_sfqr(m, n, chunks, error_type):\n mat = np.random.rand(m, n)\n data = da.from_array(mat, chunks=chunks, name=\"A\")\n m_q = m\n n_q = min(m, n)\n m_r = n_q\n n_r = n\n m_qtq = n_q\n\n if error_type is None:\n q, r = da.linalg.sfqr(data)\n assert_eq((m_q, n_q), q.shape) # shape check\n assert_eq((m_r, n_r), r.shape) # shape check\n assert_eq(mat, da.dot(q, r)) # accuracy check\n assert_eq(np.eye(m_qtq, m_qtq), da.dot(q.T, q)) # q must be orthonormal\n assert_eq(r, da.triu(r.rechunk(r.shape[0]))) # r must be upper triangular\n else:\n with pytest.raises(error_type):\n q, r = da.linalg.sfqr(data)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_sparse_hstack_vstack_csr_test_sparse_hstack_vstack_csr.assert_eq_x_y_todense_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_sparse_hstack_vstack_csr_test_sparse_hstack_vstack_csr.assert_eq_x_y_todense_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy.py", "file_name": "test_cupy.py", "file_type": "text/x-python", "category": "test", "start_line": 819, "end_line": 829, "span_ids": ["test_sparse_hstack_vstack_csr"], "tokens": 114}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_sparse_hstack_vstack_csr():\n pytest.importorskip(\"cupyx\")\n x = cupy.arange(24, dtype=cupy.float32).reshape(4, 6)\n\n sp = da.from_array(x, chunks=(2, 3), asarray=False, fancy=False)\n sp = sp.map_blocks(cupyx.scipy.sparse.csr_matrix, dtype=cupy.float32)\n\n y = sp.compute()\n\n assert cupyx.scipy.sparse.isspmatrix(y)\n assert_eq(x, y.todense())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_cupy_sparse_concatenate_test_cupy_sparse_concatenate.assert_z_toarray_z_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_cupy_sparse_concatenate_test_cupy_sparse_concatenate.assert_z_toarray_z_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy.py", "file_name": "test_cupy.py", "file_type": "text/x-python", "category": "test", "start_line": 832, "end_line": 858, "span_ids": ["test_cupy_sparse_concatenate"], "tokens": 234}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"axis\", [0, 1])\ndef test_cupy_sparse_concatenate(axis):\n pytest.importorskip(\"cupyx\")\n\n rs = da.random.RandomState(RandomState=cupy.random.RandomState)\n meta = cupyx.scipy.sparse.csr_matrix((0, 0))\n\n xs = []\n ys = []\n for i in range(2):\n x = rs.random((1000, 10), chunks=(100, 10))\n x[x < 0.9] = 0\n xs.append(x)\n ys.append(x.map_blocks(cupyx.scipy.sparse.csr_matrix, meta=meta))\n\n z = da.concatenate(ys, axis=axis)\n z = z.compute()\n\n if axis == 0:\n sp_concatenate = cupyx.scipy.sparse.vstack\n elif axis == 1:\n sp_concatenate = cupyx.scipy.sparse.hstack\n z_expected = sp_concatenate(\n [cupyx.scipy.sparse.csr_matrix(e.compute()) for e in xs]\n )\n\n assert (z.toarray() == z_expected.toarray()).all()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_bincount_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_test_bincount_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy.py", "file_name": "test_cupy.py", "file_type": "text/x-python", "category": "test", "start_line": 915, "end_line": 929, "span_ids": ["test_bincount"], "tokens": 200}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n not IS_NEP18_ACTIVE or cupy.__version__ < \"6.4.0\",\n reason=\"NEP-18 support is not available in NumPy or CuPy older than \"\n \"6.4.0 (requires https://github.com/cupy/cupy/pull/2418)\",\n)\ndef test_bincount():\n x = cupy.array([2, 1, 5, 2, 1])\n d = da.from_array(x, chunks=2, asarray=False)\n e = da.bincount(d, minlength=6)\n assert_eq(e, np.bincount(x, minlength=6))\n assert same_keys(da.bincount(d, minlength=6), e)\n\n assert da.bincount(d, minlength=6).name != da.bincount(d, minlength=7).name\n assert da.bincount(d, minlength=6).name == da.bincount(d, minlength=6).name", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_operator_dispatch_property.return.wrapped": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_operator_dispatch_property.return.wrapped", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_dispatch.py", "file_name": "test_dispatch.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 33, "span_ids": ["imports", "wrap", "dispatch_property"], "tokens": 155}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import operator\n\nimport pytest\nimport numpy as np\n\nimport dask.array as da\nfrom dask.array import Array\nfrom dask.array.chunk_types import is_valid_array_chunk, is_valid_chunk_type\nfrom dask.array.utils import assert_eq\n\n\ndef wrap(func_name):\n \"\"\"\n Wrap a function.\n \"\"\"\n\n def wrapped(self, *a, **kw):\n a = getattr(self.arr, func_name)(*a, **kw)\n return a if not isinstance(a, np.ndarray) else type(self)(a)\n\n return wrapped\n\n\ndef dispatch_property(prop_name):\n \"\"\"\n Wrap a simple property.\n \"\"\"\n\n @property\n def wrapped(self, *a, **kw):\n return getattr(self.arr, prop_name)\n\n return wrapped", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_EncapsulateNDArray_EncapsulateNDArray.__array__.return.np_asarray_self_arr_arg": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_EncapsulateNDArray_EncapsulateNDArray.__array__.return.np_asarray_self_arr_arg", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_dispatch.py", "file_name": "test_dispatch.py", "file_type": "text/x-python", "category": "test", "start_line": 35, "end_line": 51, "span_ids": ["EncapsulateNDArray.__array__", "EncapsulateNDArray.__init__", "EncapsulateNDArray"], "tokens": 155}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class EncapsulateNDArray(np.lib.mixins.NDArrayOperatorsMixin):\n \"\"\"\n A class that \"mocks\" ndarray by encapsulating an ndarray and using\n protocols to \"look like\" an ndarray. Basically tests whether Dask\n works fine with something that is essentially an array but uses\n protocols instead of being an actual array. Must be manually\n registered as a valid chunk type to be considered a downcast type\n of Dask array in the type casting hierarchy.\n \"\"\"\n\n __array_priority__ = 20\n\n def __init__(self, arr):\n self.arr = arr\n\n def __array__(self, *args, **kwargs):\n return np.asarray(self.arr, *args, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_EncapsulateNDArray.__array_function___EncapsulateNDArray.__setitem__.wrap___setitem___": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_EncapsulateNDArray.__array_function___EncapsulateNDArray.__setitem__.wrap___setitem___", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_dispatch.py", "file_name": "test_dispatch.py", "file_type": "text/x-python", "category": "test", "start_line": 53, "end_line": 68, "span_ids": ["EncapsulateNDArray.__array_function__", "EncapsulateNDArray:5"], "tokens": 173}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class EncapsulateNDArray(np.lib.mixins.NDArrayOperatorsMixin):\n\n def __array_function__(self, f, t, arrs, kw):\n if not all(\n issubclass(ti, (type(self), np.ndarray) + np.ScalarType) for ti in t\n ):\n return NotImplemented\n arrs = tuple(\n arr if not isinstance(arr, type(self)) else arr.arr for arr in arrs\n )\n t = tuple(ti for ti in t if not issubclass(ti, type(self)))\n print(t)\n a = self.arr.__array_function__(f, t, arrs, kw)\n return a if not isinstance(a, np.ndarray) else type(self)(a)\n\n __getitem__ = wrap(\"__getitem__\")\n\n __setitem__ = wrap(\"__setitem__\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_EncapsulateNDArray.__array_ufunc___da_register_chunk_type_En": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_EncapsulateNDArray.__array_ufunc___da_register_chunk_type_En", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_dispatch.py", "file_name": "test_dispatch.py", "file_type": "text/x-python", "category": "test", "start_line": 70, "end_line": 88, "span_ids": ["EncapsulateNDArray.__array_ufunc__", "EncapsulateNDArray:9", "impl"], "tokens": 182}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class EncapsulateNDArray(np.lib.mixins.NDArrayOperatorsMixin):\n\n def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):\n if not all(\n isinstance(i, (type(self), np.ndarray) + np.ScalarType) for i in inputs\n ):\n return NotImplemented\n inputs = tuple(i if not isinstance(i, type(self)) else i.arr for i in inputs)\n a = getattr(ufunc, method)(*inputs, **kwargs)\n return a if not isinstance(a, np.ndarray) else type(self)(a)\n\n shape = dispatch_property(\"shape\")\n ndim = dispatch_property(\"ndim\")\n dtype = dispatch_property(\"dtype\")\n\n astype = wrap(\"astype\")\n sum = wrap(\"sum\")\n prod = wrap(\"prod\")\n\n\nda.register_chunk_type(EncapsulateNDArray)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_WrappedArray_WrappedArray.__setitem__.self_arr_key_value": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_WrappedArray_WrappedArray.__setitem__.self_arr_key_value", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_dispatch.py", "file_name": "test_dispatch.py", "file_type": "text/x-python", "category": "test", "start_line": 92, "end_line": 137, "span_ids": ["WrappedArray.__array_function__", "WrappedArray.__array_ufunc__", "WrappedArray", "WrappedArray._downcast_args", "WrappedArray.__array__", "WrappedArray.__dask_graph__", "WrappedArray.__init__", "WrappedArray.__getitem__", "WrappedArray:3", "WrappedArray.__setitem__"], "tokens": 393}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class WrappedArray(np.lib.mixins.NDArrayOperatorsMixin):\n \"\"\"\n Another mock duck array class (like EncapsulateNDArray), but\n designed to be above Dask in the type casting hierarchy (that is,\n WrappedArray wraps Dask Array) and be even more minimal in API.\n Tests that Dask defers properly to upcast types.\n \"\"\"\n\n def __init__(self, arr, **attrs):\n self.arr = arr\n self.attrs = attrs\n\n def __array__(self, *args, **kwargs):\n return np.asarray(self.arr, *args, **kwargs)\n\n def _downcast_args(self, args):\n for arg in args:\n if isinstance(arg, type(self)):\n yield arg.arr\n elif isinstance(arg, (tuple, list)):\n yield type(arg)(self._downcast_args(arg))\n else:\n yield arg\n\n def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):\n inputs = tuple(self._downcast_args(inputs))\n return type(self)(getattr(ufunc, method)(*inputs, **kwargs), **self.attrs)\n\n def __array_function__(self, func, types, args, kwargs):\n args = tuple(self._downcast_args(args))\n return type(self)(func(*args, **kwargs), **self.attrs)\n\n def __dask_graph__(self):\n # Note: make sure that dask dusk arrays do not interfere with the\n # dispatch mechanism. The return value here, doesn't matter.\n return ...\n\n shape = dispatch_property(\"shape\")\n ndim = dispatch_property(\"ndim\")\n dtype = dispatch_property(\"dtype\")\n\n def __getitem__(self, key):\n return type(self)(self.arr[key], **self.attrs)\n\n def __setitem__(self, key, value):\n self.arr[key] = value", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_test_binary_operation_type_precedence_test_binary_operation_type_precedence.assert_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_test_binary_operation_type_precedence_test_binary_operation_type_precedence.assert_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_dispatch.py", "file_name": "test_dispatch.py", "file_type": "text/x-python", "category": "test", "start_line": 134, "end_line": 177, "span_ids": ["test_binary_operation_type_precedence"], "tokens": 264}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"op\",\n [\n operator.add,\n operator.eq,\n operator.gt,\n operator.ge,\n operator.lt,\n operator.le,\n operator.mod,\n operator.mul,\n operator.ne,\n operator.pow,\n operator.sub,\n operator.truediv,\n operator.floordiv,\n np.add,\n np.subtract,\n ],\n)\n@pytest.mark.parametrize(\n \"arr_upcast, arr_downcast\",\n [\n (\n WrappedArray(np.random.random((10, 10))),\n da.random.random((10, 10), chunks=(5, 5)),\n ),\n (\n da.random.random((10, 10), chunks=(5, 5)),\n EncapsulateNDArray(np.random.random((10, 10))),\n ),\n (\n WrappedArray(np.random.random((10, 10))),\n EncapsulateNDArray(np.random.random((10, 10))),\n ),\n ],\n)\ndef test_binary_operation_type_precedence(op, arr_upcast, arr_downcast):\n \"\"\" Test proper dispatch on binary operators and NumPy ufuncs\"\"\"\n assert (\n type(op(arr_upcast, arr_downcast))\n == type(op(arr_downcast, arr_upcast))\n == type(arr_upcast)\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_test_is_valid_array_chunk_test_is_valid_array_chunk.assert_is_valid_array_chu": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_test_is_valid_array_chunk_test_is_valid_array_chunk.assert_is_valid_array_chu", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_dispatch.py", "file_name": "test_dispatch.py", "file_type": "text/x-python", "category": "test", "start_line": 186, "end_line": 205, "span_ids": ["test_is_valid_array_chunk"], "tokens": 175}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"arr, result\",\n [\n (WrappedArray(np.arange(4)), False),\n (da.from_array(np.arange(4)), False),\n (EncapsulateNDArray(np.arange(4)), True),\n (np.ma.masked_array(np.arange(4), [True, False, True, False]), True),\n (np.arange(4), True),\n (None, True),\n # float/int/str scalars are not valid array chunks,\n # but ops on float/int/str etc scalars do get handled\n # by Dask\n (0.0, False),\n (0, False),\n (\"\", False),\n ],\n)\ndef test_is_valid_array_chunk(arr, result):\n \"\"\" Test is_valid_array_chunk for correctness\"\"\"\n assert is_valid_array_chunk(arr) is result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_from_itertools_import_com_test_fft.assert_eq_da_fft_darr_n": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_from_itertools_import_com_test_fft.assert_eq_da_fft_darr_n", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_fft.py", "file_name": "test_fft.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 49, "span_ids": ["test_cant_fft_chunked_axis", "imports", "test_fft"], "tokens": 344}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from itertools import combinations_with_replacement\n\nimport numpy as np\n\nimport pytest\n\nimport dask.array as da\nimport dask.array.fft\nfrom dask.array.fft import fft_wrap\nfrom dask.array.utils import assert_eq, same_keys\n\nfrom dask.array.core import normalize_chunks\n\n\nall_1d_funcnames = [\"fft\", \"ifft\", \"rfft\", \"irfft\", \"hfft\", \"ihfft\"]\n\nall_nd_funcnames = [\n \"fft2\",\n \"ifft2\",\n \"fftn\",\n \"ifftn\",\n \"rfft2\",\n \"irfft2\",\n \"rfftn\",\n \"irfftn\",\n]\n\nnparr = np.arange(100).reshape(10, 10)\ndarr = da.from_array(nparr, chunks=(1, 10))\ndarr2 = da.from_array(nparr, chunks=(10, 1))\ndarr3 = da.from_array(nparr, chunks=(10, 10))\n\n\n@pytest.mark.parametrize(\"funcname\", all_1d_funcnames)\ndef test_cant_fft_chunked_axis(funcname):\n da_fft = getattr(da.fft, funcname)\n\n bad_darr = da.from_array(nparr, chunks=(5, 5))\n for i in range(bad_darr.ndim):\n with pytest.raises(ValueError):\n da_fft(bad_darr, axis=i)\n\n\n@pytest.mark.parametrize(\"funcname\", all_1d_funcnames)\ndef test_fft(funcname):\n da_fft = getattr(da.fft, funcname)\n np_fft = getattr(np.fft, funcname)\n\n assert_eq(da_fft(darr), np_fft(nparr))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_test_fft2n_shapes_test_fft2n_shapes.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_test_fft2n_shapes_test_fft2n_shapes.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_fft.py", "file_name": "test_fft.py", "file_type": "text/x-python", "category": "test", "start_line": 52, "end_line": 61, "span_ids": ["test_fft2n_shapes"], "tokens": 169}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"funcname\", all_nd_funcnames)\ndef test_fft2n_shapes(funcname):\n da_fft = getattr(dask.array.fft, funcname)\n np_fft = getattr(np.fft, funcname)\n assert_eq(da_fft(darr3), np_fft(nparr))\n assert_eq(da_fft(darr3, (8, 9)), np_fft(nparr, (8, 9)))\n assert_eq(da_fft(darr3, (8, 9), axes=(1, 0)), np_fft(nparr, (8, 9), axes=(1, 0)))\n assert_eq(\n da_fft(darr3, (12, 11), axes=(1, 0)), np_fft(nparr, (12, 11), axes=(1, 0))\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_test_fft_n_kwarg_test_fft_n_kwarg.None_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_test_fft_n_kwarg_test_fft_n_kwarg.None_5", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_fft.py", "file_name": "test_fft.py", "file_type": "text/x-python", "category": "test", "start_line": 64, "end_line": 74, "span_ids": ["test_fft_n_kwarg"], "tokens": 189}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"funcname\", all_1d_funcnames)\ndef test_fft_n_kwarg(funcname):\n da_fft = getattr(da.fft, funcname)\n np_fft = getattr(np.fft, funcname)\n\n assert_eq(da_fft(darr, 5), np_fft(nparr, 5))\n assert_eq(da_fft(darr, 13), np_fft(nparr, 13))\n assert_eq(da_fft(darr2, axis=0), np_fft(nparr, axis=0))\n assert_eq(da_fft(darr2, 5, axis=0), np_fft(nparr, 5, axis=0))\n assert_eq(da_fft(darr2, 13, axis=0), np_fft(nparr, 13, axis=0))\n assert_eq(da_fft(darr2, 12, axis=0), np_fft(nparr, 12, axis=0))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_test_fft_consistent_names_test_wrap_bad_kind.with_pytest_raises_ValueE.fft_wrap_np_ones_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_test_fft_consistent_names_test_wrap_bad_kind.with_pytest_raises_ValueE.fft_wrap_np_ones_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_fft.py", "file_name": "test_fft.py", "file_type": "text/x-python", "category": "test", "start_line": 77, "end_line": 88, "span_ids": ["test_fft_consistent_names", "test_wrap_bad_kind"], "tokens": 123}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"funcname\", all_1d_funcnames)\ndef test_fft_consistent_names(funcname):\n da_fft = getattr(da.fft, funcname)\n\n assert same_keys(da_fft(darr, 5), da_fft(darr, 5))\n assert same_keys(da_fft(darr2, 5, axis=0), da_fft(darr2, 5, axis=0))\n assert not same_keys(da_fft(darr, 5), da_fft(darr, 13))\n\n\ndef test_wrap_bad_kind():\n with pytest.raises(ValueError):\n fft_wrap(np.ones)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_test_nd_ffts_axes_test_nd_ffts_axes.for_num_axes_in_range_1_.for_axes_in_combinations_.if_len_set_axes_len_a.else_.assert_eq_r_er_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_test_nd_ffts_axes_test_nd_ffts_axes.for_num_axes_in_range_1_.for_axes_in_combinations_.if_len_set_axes_len_a.else_.assert_eq_r_er_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_fft.py", "file_name": "test_fft.py", "file_type": "text/x-python", "category": "test", "start_line": 91, "end_line": 116, "span_ids": ["test_nd_ffts_axes"], "tokens": 247}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"funcname\", all_nd_funcnames)\n@pytest.mark.parametrize(\"dtype\", [\"float32\", \"float64\"])\ndef test_nd_ffts_axes(funcname, dtype):\n np_fft = getattr(np.fft, funcname)\n da_fft = getattr(da.fft, funcname)\n\n shape = (7, 8, 9)\n chunk_size = (3, 3, 3)\n a = np.arange(np.prod(shape), dtype=dtype).reshape(shape)\n d = da.from_array(a, chunks=chunk_size)\n\n for num_axes in range(1, d.ndim):\n for axes in combinations_with_replacement(range(d.ndim), num_axes):\n cs = list(chunk_size)\n for i in axes:\n cs[i] = shape[i]\n d2 = d.rechunk(cs)\n if len(set(axes)) < len(axes):\n with pytest.raises(ValueError):\n da_fft(d2, axes=axes)\n else:\n r = da_fft(d2, axes=axes)\n er = np_fft(a, axes=axes)\n assert r.dtype == er.dtype\n assert r.shape == er.shape\n assert_eq(r, er)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_test_wrap_ffts_test_wrap_ffts.if_modname_scipy_fftp.else_.None_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_test_wrap_ffts_test_wrap_ffts.if_modname_scipy_fftp.else_.None_5", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_fft.py", "file_name": "test_fft.py", "file_type": "text/x-python", "category": "test", "start_line": 119, "end_line": 151, "span_ids": ["test_wrap_ffts"], "tokens": 412}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"modname\", [\"numpy.fft\", \"scipy.fftpack\"])\n@pytest.mark.parametrize(\"funcname\", all_1d_funcnames)\n@pytest.mark.parametrize(\"dtype\", [\"float32\", \"float64\"])\ndef test_wrap_ffts(modname, funcname, dtype):\n fft_mod = pytest.importorskip(modname)\n try:\n func = getattr(fft_mod, funcname)\n except AttributeError:\n pytest.skip(\"`%s` missing function `%s`.\" % (modname, funcname))\n\n darrc = darr.astype(dtype)\n darr2c = darr2.astype(dtype)\n nparrc = nparr.astype(dtype)\n\n if modname == \"scipy.fftpack\" and \"rfft\" in funcname:\n with pytest.raises(ValueError):\n fft_wrap(func)\n else:\n wfunc = fft_wrap(func)\n assert wfunc(darrc).dtype == func(nparrc).dtype\n assert wfunc(darrc).shape == func(nparrc).shape\n assert_eq(wfunc(darrc), func(nparrc))\n assert_eq(wfunc(darrc, axis=1), func(nparrc, axis=1))\n assert_eq(wfunc(darr2c, axis=0), func(nparrc, axis=0))\n assert_eq(wfunc(darrc, n=len(darrc) - 1), func(nparrc, n=len(darrc) - 1))\n assert_eq(\n wfunc(darrc, axis=1, n=darrc.shape[1] - 1),\n func(nparrc, n=darrc.shape[1] - 1),\n )\n assert_eq(\n wfunc(darr2c, axis=0, n=darr2c.shape[0] - 1),\n func(nparrc, axis=0, n=darr2c.shape[0] - 1),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_test_wrap_fftns_test_wrap_fftns.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_test_wrap_fftns_test_wrap_fftns.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_fft.py", "file_name": "test_fft.py", "file_type": "text/x-python", "category": "test", "start_line": 154, "end_line": 177, "span_ids": ["test_wrap_fftns"], "tokens": 346}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"modname\", [\"numpy.fft\", \"scipy.fftpack\"])\n@pytest.mark.parametrize(\"funcname\", all_nd_funcnames)\n@pytest.mark.parametrize(\"dtype\", [\"float32\", \"float64\"])\ndef test_wrap_fftns(modname, funcname, dtype):\n fft_mod = pytest.importorskip(modname)\n try:\n func = getattr(fft_mod, funcname)\n except AttributeError:\n pytest.skip(\"`%s` missing function `%s`.\" % (modname, funcname))\n\n darrc = darr.astype(dtype).rechunk(darr.shape)\n darr2c = darr2.astype(dtype).rechunk(darr2.shape)\n nparrc = nparr.astype(dtype)\n\n wfunc = fft_wrap(func)\n assert wfunc(darrc).dtype == func(nparrc).dtype\n assert wfunc(darrc).shape == func(nparrc).shape\n assert_eq(wfunc(darrc), func(nparrc))\n assert_eq(wfunc(darrc, axes=(1, 0)), func(nparrc, axes=(1, 0)))\n assert_eq(wfunc(darr2c, axes=(0, 1)), func(nparrc, axes=(0, 1)))\n assert_eq(\n wfunc(darr2c, (darr2c.shape[0] - 1, darr2c.shape[1] - 1), (0, 1)),\n func(nparrc, (nparrc.shape[0] - 1, nparrc.shape[1] - 1), (0, 1)),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_test_fftfreq_test_fftfreq.assert_eq_r1_r2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_test_fftfreq_test_fftfreq.assert_eq_r1_r2_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_fft.py", "file_name": "test_fft.py", "file_type": "text/x-python", "category": "test", "start_line": 180, "end_line": 191, "span_ids": ["test_fftfreq"], "tokens": 137}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"n\", [1, 2, 3, 6, 7])\n@pytest.mark.parametrize(\"d\", [1.0, 0.5, 2 * np.pi])\n@pytest.mark.parametrize(\"c\", [lambda m: m, lambda m: (1, m - 1)])\ndef test_fftfreq(n, d, c):\n c = c(n)\n\n r1 = np.fft.fftfreq(n, d)\n r2 = da.fft.fftfreq(n, d, chunks=c)\n\n assert normalize_chunks(c, r2.shape) == r2.chunks\n\n assert_eq(r1, r2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_test_rfftfreq_test_rfftfreq.assert_eq_r1_r2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_test_rfftfreq_test_rfftfreq.assert_eq_r1_r2_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_fft.py", "file_name": "test_fft.py", "file_type": "text/x-python", "category": "test", "start_line": 194, "end_line": 205, "span_ids": ["test_rfftfreq"], "tokens": 158}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"n\", [1, 2, 3, 6, 7])\n@pytest.mark.parametrize(\"d\", [1.0, 0.5, 2 * np.pi])\n@pytest.mark.parametrize(\"c\", [lambda m: (m // 2 + 1,), lambda m: (1, m // 2)])\ndef test_rfftfreq(n, d, c):\n c = [ci for ci in c(n) if ci != 0]\n\n r1 = np.fft.rfftfreq(n, d)\n r2 = da.fft.rfftfreq(n, d, chunks=c)\n\n assert normalize_chunks(c, r2.shape) == r2.chunks\n\n assert_eq(r1, r2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_test_fftshift_test_fftshift.assert_eq_d_r_a_r_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_test_fftshift_test_fftshift.assert_eq_d_r_a_r_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_fft.py", "file_name": "test_fft.py", "file_type": "text/x-python", "category": "test", "start_line": 208, "end_line": 231, "span_ids": ["test_fftshift"], "tokens": 287}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"funcname\", [\"fftshift\", \"ifftshift\"])\n@pytest.mark.parametrize(\"axes\", [None, 0, 1, 2, (0, 1), (1, 2), (0, 2), (0, 1, 2)])\n@pytest.mark.parametrize(\n \"shape, chunks\",\n [[(5, 6, 7), (2, 3, 4)], [(5, 6, 7), (2, 6, 4)], [(5, 6, 7), (5, 6, 7)]],\n)\ndef test_fftshift(funcname, shape, chunks, axes):\n np_func = getattr(np.fft, funcname)\n da_func = getattr(da.fft, funcname)\n\n a = np.arange(np.prod(shape)).reshape(shape)\n d = da.from_array(a, chunks=chunks)\n\n a_r = np_func(a, axes)\n d_r = da_func(d, axes)\n\n for each_d_chunks, each_d_r_chunks in zip(d.chunks, d_r.chunks):\n if len(each_d_chunks) == 1:\n assert len(each_d_r_chunks) == 1\n assert each_d_r_chunks == each_d_chunks\n else:\n assert len(each_d_r_chunks) != 1\n\n assert_eq(d_r, a_r)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_test_fftshift_identity_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_test_fftshift_identity_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_fft.py", "file_name": "test_fft.py", "file_type": "text/x-python", "category": "test", "start_line": 234, "end_line": 259, "span_ids": ["test_fftshift_identity"], "tokens": 309}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"funcname1, funcname2\", [(\"fftshift\", \"ifftshift\"), (\"ifftshift\", \"fftshift\")]\n)\n@pytest.mark.parametrize(\"axes\", [None, 0, 1, 2, (0, 1), (1, 2), (0, 2), (0, 1, 2)])\n@pytest.mark.parametrize(\n \"shape, chunks\",\n [[(5, 6, 7), (2, 3, 4)], [(5, 6, 7), (2, 6, 4)], [(5, 6, 7), (5, 6, 7)]],\n)\ndef test_fftshift_identity(funcname1, funcname2, shape, chunks, axes):\n da_func1 = getattr(da.fft, funcname1)\n da_func2 = getattr(da.fft, funcname2)\n\n a = np.arange(np.prod(shape)).reshape(shape)\n d = da.from_array(a, chunks=chunks)\n\n d_r = da_func1(da_func2(d, axes), axes)\n\n for each_d_chunks, each_d_r_chunks in zip(d.chunks, d_r.chunks):\n if len(each_d_chunks) == 1:\n assert len(each_d_r_chunks) == 1\n assert each_d_r_chunks == each_d_chunks\n else:\n assert len(each_d_r_chunks) != 1\n\n assert_eq(d_r, d)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_pytest_test__parse_gufunc_signature.None_3._parse_gufunc_signature_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_pytest_test__parse_gufunc_signature.None_3._parse_gufunc_signature_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 37, "span_ids": ["imports", "test__parse_gufunc_signature"], "tokens": 379}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\nfrom numpy.testing import assert_equal\nimport dask.array as da\nfrom dask.array.utils import assert_eq\nimport numpy as np\n\nfrom dask.array.core import Array\nfrom dask.array.gufunc import (\n _parse_gufunc_signature,\n _validate_normalize_axes,\n apply_gufunc,\n gufunc,\n as_gufunc,\n)\nfrom dask.array.numpy_compat import _numpy_120\nfrom dask.array.utils import IS_NEP18_ACTIVE\n\n\n# Copied from `numpy.lib.test_test_function_base.py`:\ndef test__parse_gufunc_signature():\n assert_equal(_parse_gufunc_signature(\"(x)->()\"), ([(\"x\",)], ()))\n assert_equal(_parse_gufunc_signature(\"(x,y)->()\"), ([(\"x\", \"y\")], ()))\n assert_equal(_parse_gufunc_signature(\"(x),(y)->()\"), ([(\"x\",), (\"y\",)], ()))\n assert_equal(_parse_gufunc_signature(\"(x)->(y)\"), ([(\"x\",)], (\"y\",)))\n assert_equal(_parse_gufunc_signature(\"(x)->(y),()\"), ([(\"x\",)], [(\"y\",), ()]))\n assert_equal(\n _parse_gufunc_signature(\"(),(a,b,c),(d)->(d,e)\"),\n ([(), (\"a\", \"b\", \"c\"), (\"d\",)], (\"d\", \"e\")),\n )\n with pytest.raises(ValueError):\n _parse_gufunc_signature(\"(x)(y)->()\")\n with pytest.raises(ValueError):\n _parse_gufunc_signature(\"(x),(y)->\")\n with pytest.raises(ValueError):\n _parse_gufunc_signature(\"((x))->(x)\")\n with pytest.raises(ValueError):\n _parse_gufunc_signature(\"(x)->(x),\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_axes_input_validation_01_test_apply_gufunc_axes_input_validation_01.None_2.apply_gufunc_foo_i_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_axes_input_validation_01_test_apply_gufunc_axes_input_validation_01.None_2.apply_gufunc_foo_i_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 40, "end_line": 58, "span_ids": ["test_apply_gufunc_axes_input_validation_01"], "tokens": 196}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_apply_gufunc_axes_input_validation_01():\n def foo(x):\n return np.mean(x, axis=-1)\n\n a = da.random.normal(size=(20, 30), chunks=30)\n\n with pytest.raises(ValueError):\n apply_gufunc(foo, \"(i)->()\", a, axes=0)\n\n apply_gufunc(foo, \"(i)->()\", a, axes=[0])\n apply_gufunc(foo, \"(i)->()\", a, axes=[(0,)])\n apply_gufunc(foo, \"(i)->()\", a, axes=[0, tuple()])\n apply_gufunc(foo, \"(i)->()\", a, axes=[(0,), tuple()])\n\n with pytest.raises(ValueError):\n apply_gufunc(foo, \"(i)->()\", a, axes=[(0, 1)])\n\n with pytest.raises(ValueError):\n apply_gufunc(foo, \"(i)->()\", a, axes=[0, 0])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test__validate_normalize_axes_01_test__validate_normalize_axes_01.assert_o_0_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test__validate_normalize_axes_01_test__validate_normalize_axes_01.assert_o_0_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 61, "end_line": 73, "span_ids": ["test__validate_normalize_axes_01"], "tokens": 156}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test__validate_normalize_axes_01():\n with pytest.raises(ValueError):\n _validate_normalize_axes([(1, 0)], None, False, [(\"i\", \"j\")], (\"j\",))\n\n with pytest.raises(ValueError):\n _validate_normalize_axes([0, 0], None, False, [(\"i\", \"j\")], (\"j\",))\n\n with pytest.raises(ValueError):\n _validate_normalize_axes([(0,), 0], None, False, [(\"i\", \"j\")], (\"j\",))\n\n i, o = _validate_normalize_axes([(1, 0), 0], None, False, [(\"i\", \"j\")], (\"j\",))\n assert i == [(1, 0)]\n assert o == [(0,)]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test__validate_normalize_axes_02_test__validate_normalize_axes_02.None_2._validate_normalize_axes_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test__validate_normalize_axes_02_test__validate_normalize_axes_02.None_2._validate_normalize_axes_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 76, "end_line": 96, "span_ids": ["test__validate_normalize_axes_02"], "tokens": 221}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test__validate_normalize_axes_02():\n i, o = _validate_normalize_axes(None, 0, False, [(\"i\",), (\"i\",)], ())\n assert i == [(0,), (0,)]\n assert o == [()]\n\n i, o = _validate_normalize_axes(None, 0, False, [(\"i\",)], (\"i\",))\n assert i == [(0,)]\n assert o == [(0,)]\n\n i, o = _validate_normalize_axes(None, 0, True, [(\"i\",), (\"i\",)], ())\n assert i == [(0,), (0,)]\n assert o == [(0,)]\n\n with pytest.raises(ValueError):\n _validate_normalize_axes(None, (0,), False, [(\"i\",), (\"i\",)], ())\n\n with pytest.raises(ValueError):\n _validate_normalize_axes(None, 0, False, [(\"i\",), (\"j\",)], ())\n\n with pytest.raises(ValueError):\n _validate_normalize_axes(None, 0, False, [(\"i\",), (\"j\",)], (\"j\",))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test__validate_normalize_axes_03_test__validate_normalize_axes_03.None_2._validate_normalize_axes_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test__validate_normalize_axes_03_test__validate_normalize_axes_03.None_2._validate_normalize_axes_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 99, "end_line": 111, "span_ids": ["test__validate_normalize_axes_03"], "tokens": 143}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test__validate_normalize_axes_03():\n i, o = _validate_normalize_axes(None, 0, True, [(\"i\",)], ())\n assert i == [(0,)]\n assert o == [(0,)]\n\n with pytest.raises(ValueError):\n _validate_normalize_axes(None, 0, True, [(\"i\",)], (\"i\",))\n\n with pytest.raises(ValueError):\n _validate_normalize_axes([(0, 1), (0, 1)], None, True, [(\"i\", \"j\")], (\"i\", \"j\"))\n\n with pytest.raises(ValueError):\n _validate_normalize_axes([(0,), (0,)], None, True, [(\"i\",), (\"j\",)], ())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_01_test_apply_gufunc_01.assert_std_compute_shap": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_01_test_apply_gufunc_01.assert_std_compute_shap", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 114, "end_line": 123, "span_ids": ["test_apply_gufunc_01"], "tokens": 124}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_apply_gufunc_01():\n def stats(x):\n return np.mean(x, axis=-1), np.std(x, axis=-1)\n\n a = da.random.normal(size=(10, 20, 30), chunks=(5, 5, 30))\n result = apply_gufunc(stats, \"(i)->(),()\", a, output_dtypes=2 * (a.dtype,))\n mean, std = result\n assert isinstance(result, tuple)\n assert mean.compute().shape == (10, 20)\n assert std.compute().shape == (10, 20)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_01b_test_apply_gufunc_01b.assert_std_compute_shap": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_01b_test_apply_gufunc_01b.assert_std_compute_shap", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 126, "end_line": 135, "span_ids": ["test_apply_gufunc_01b"], "tokens": 116}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_apply_gufunc_01b():\n def stats(x):\n return np.mean(x, axis=-1), np.std(x, axis=-1)\n\n a = da.random.normal(size=(10, 20, 30), chunks=5)\n mean, std = apply_gufunc(\n stats, \"(i)->(),()\", a, output_dtypes=2 * (a.dtype,), allow_rechunk=True\n )\n assert mean.compute().shape == (10, 20)\n assert std.compute().shape == (10, 20)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_output_dtypes_string_test_apply_gufunc_output_dtypes_string.assert_mean_compute_sha": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_output_dtypes_string_test_apply_gufunc_output_dtypes_string.assert_mean_compute_sha", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 138, "end_line": 145, "span_ids": ["test_apply_gufunc_output_dtypes_string"], "tokens": 106}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"vectorize\", [False, True])\ndef test_apply_gufunc_output_dtypes_string(vectorize):\n def stats(x):\n return np.mean(x, axis=-1)\n\n a = da.random.normal(size=(10, 20, 30), chunks=(5, 5, 30))\n mean = apply_gufunc(stats, \"(i)->()\", a, output_dtypes=\"f\", vectorize=vectorize)\n assert mean.compute().shape == (10, 20)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_output_dtypes_string_many_outputs_test_apply_gufunc_output_dtypes_string_many_outputs.assert_std_compute_shap": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_output_dtypes_string_many_outputs_test_apply_gufunc_output_dtypes_string_many_outputs.assert_std_compute_shap", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 148, "end_line": 158, "span_ids": ["test_apply_gufunc_output_dtypes_string_many_outputs"], "tokens": 139}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"vectorize\", [False, True])\ndef test_apply_gufunc_output_dtypes_string_many_outputs(vectorize):\n def stats(x):\n return np.mean(x, axis=-1), np.std(x, axis=-1)\n\n a = da.random.normal(size=(10, 20, 30), chunks=(5, 5, 30))\n mean, std = apply_gufunc(\n stats, \"(i)->(),()\", a, output_dtypes=(\"f\", \"f\"), vectorize=vectorize\n )\n assert mean.compute().shape == (10, 20)\n assert std.compute().shape == (10, 20)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_pass_additional_kwargs_test_apply_gufunc_02.assert_c_compute_shape_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_pass_additional_kwargs_test_apply_gufunc_02.assert_c_compute_shape_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 161, "end_line": 178, "span_ids": ["test_apply_gufunc_02", "test_apply_gufunc_pass_additional_kwargs"], "tokens": 203}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_apply_gufunc_pass_additional_kwargs():\n def foo(x, bar):\n assert bar == 2\n return x\n\n ret = apply_gufunc(foo, \"()->()\", 1.0, output_dtypes=\"f\", bar=2)\n assert_eq(ret, np.array(1.0, dtype=\"f\"))\n\n\ndef test_apply_gufunc_02():\n def outer_product(x, y):\n return np.einsum(\"...i,...j->...ij\", x, y)\n\n a = da.random.normal(size=(20, 30), chunks=(5, 30))\n b = da.random.normal(size=(10, 1, 40), chunks=(10, 1, 40))\n c = apply_gufunc(outer_product, \"(i),(j)->(i,j)\", a, b, output_dtypes=a.dtype)\n\n assert c.compute().shape == (10, 20, 30, 40)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_scalar_output_test_apply_gufunc_elemwise_01b.with_pytest_raises_ValueE.apply_gufunc_add_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_scalar_output_test_apply_gufunc_elemwise_01b.with_pytest_raises_ValueE.apply_gufunc_add_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 181, "end_line": 206, "span_ids": ["test_apply_gufunc_elemwise_01", "test_apply_gufunc_scalar_output", "test_apply_gufunc_elemwise_01b"], "tokens": 259}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_apply_gufunc_scalar_output():\n def foo():\n return 1\n\n x = apply_gufunc(foo, \"->()\", output_dtypes=int)\n assert x.compute() == 1\n\n\ndef test_apply_gufunc_elemwise_01():\n def add(x, y):\n return x + y\n\n a = da.from_array(np.array([1, 2, 3]), chunks=2, name=\"a\")\n b = da.from_array(np.array([1, 2, 3]), chunks=2, name=\"b\")\n z = apply_gufunc(add, \"(),()->()\", a, b, output_dtypes=a.dtype)\n assert_eq(z, np.array([2, 4, 6]))\n\n\ndef test_apply_gufunc_elemwise_01b():\n def add(x, y):\n return x + y\n\n a = da.from_array(np.array([1, 2, 3]), chunks=2, name=\"a\")\n b = da.from_array(np.array([1, 2, 3]), chunks=1, name=\"b\")\n with pytest.raises(ValueError):\n apply_gufunc(add, \"(),()->()\", a, b, output_dtypes=a.dtype)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_elemwise_02_test_apply_gufunc_elemwise_02.assert_eq_z2_np_array_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_elemwise_02_test_apply_gufunc_elemwise_02.assert_eq_z2_np_array_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 209, "end_line": 218, "span_ids": ["test_apply_gufunc_elemwise_02"], "tokens": 157}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_apply_gufunc_elemwise_02():\n def addmul(x, y):\n assert x.shape in ((2,), (1,))\n return x + y, x * y\n\n a = da.from_array(np.array([1, 2, 3]), chunks=2, name=\"a\")\n b = da.from_array(np.array([1, 2, 3]), chunks=2, name=\"b\")\n z1, z2 = apply_gufunc(addmul, \"(),()->(),()\", a, b, output_dtypes=2 * (a.dtype,))\n assert_eq(z1, np.array([2, 4, 6]))\n assert_eq(z2, np.array([1, 4, 9]))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_gufunc_vector_output_test_apply_gufunc_two_scalar_output.assert_y_compute_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_gufunc_vector_output_test_apply_gufunc_two_scalar_output.assert_y_compute_2", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 221, "end_line": 266, "span_ids": ["test_apply_gufunc_elemwise_core", "test_apply_gufunc_two_scalar_output", "test_gufunc_vector_output", "test_apply_gufunc_elemwise_loop"], "tokens": 421}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_gufunc_vector_output():\n def foo():\n return np.array([1, 2, 3], dtype=int)\n\n x = apply_gufunc(foo, \"->(i_0)\", output_dtypes=int, output_sizes={\"i_0\": 3})\n assert x.chunks == ((3,),)\n assert_eq(x, np.array([1, 2, 3]))\n\n\ndef test_apply_gufunc_elemwise_loop():\n def foo(x):\n assert x.shape in ((2,), (1,))\n return 2 * x\n\n a = da.from_array(np.array([1, 2, 3]), chunks=2, name=\"a\")\n z = apply_gufunc(foo, \"()->()\", a, output_dtypes=int)\n assert z.chunks == ((2, 1),)\n assert_eq(z, np.array([2, 4, 6]))\n\n\ndef test_apply_gufunc_elemwise_core():\n def foo(x):\n assert x.shape == (3,)\n return 2 * x\n\n a = da.from_array(np.array([1, 2, 3]), chunks=3, name=\"a\")\n z = apply_gufunc(foo, \"(i)->(i)\", a, output_dtypes=int)\n assert z.chunks == ((3,),)\n assert_eq(z, np.array([2, 4, 6]))\n\n\n# TODO: In case single tuple output will get enabled:\n# def test_apply_gufunc_one_scalar_output():\n# def foo():\n# return 1,\n# x, = apply_gufunc(foo, \"->(),\", output_dtypes=(int,))\n# assert x.compute() == 1\n\n\ndef test_apply_gufunc_two_scalar_output():\n def foo():\n return 1, 2\n\n x, y = apply_gufunc(foo, \"->(),()\", output_dtypes=(int, int))\n assert x.compute() == 1\n assert y.compute() == 2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_two_mixed_outputs_test_apply_gufunc_output_dtypes.assert_eq_y_dy_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_two_mixed_outputs_test_apply_gufunc_output_dtypes.assert_eq_y_dy_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 269, "end_line": 290, "span_ids": ["test_apply_gufunc_output_dtypes", "test_apply_gufunc_two_mixed_outputs"], "tokens": 200}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_apply_gufunc_two_mixed_outputs():\n def foo():\n return 1, np.ones((2, 3), dtype=float)\n\n x, y = apply_gufunc(\n foo, \"->(),(i,j)\", output_dtypes=(int, float), output_sizes={\"i\": 2, \"j\": 3}\n )\n assert x.compute() == 1\n assert y.chunks == ((2,), (3,))\n assert_eq(y, np.ones((2, 3), dtype=float))\n\n\n@pytest.mark.parametrize(\"output_dtypes\", [int, (int,)])\ndef test_apply_gufunc_output_dtypes(output_dtypes):\n def foo(x):\n return y\n\n x = np.random.randn(10)\n y = x.astype(int)\n dy = apply_gufunc(foo, \"()->()\", x, output_dtypes=output_dtypes)\n # print(x, x.compute())\n assert_eq(y, dy)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_gufunc_two_inputs_test_gufunc_two_inputs.assert_eq_x_3_np_ones_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_gufunc_two_inputs_test_gufunc_two_inputs.assert_eq_x_3_np_ones_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 293, "end_line": 300, "span_ids": ["test_gufunc_two_inputs"], "tokens": 117}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_gufunc_two_inputs():\n def foo(x, y):\n return np.einsum(\"...ij,...jk->ik\", x, y)\n\n a = da.ones((2, 3), chunks=100, dtype=int)\n b = da.ones((3, 4), chunks=100, dtype=int)\n x = apply_gufunc(foo, \"(i,j),(j,k)->(i,k)\", a, b, output_dtypes=int)\n assert_eq(x, 3 * np.ones((2, 4), dtype=int))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_gufunc_mixed_inputs_test_gufunc.assert_valy_shape_10_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_gufunc_mixed_inputs_test_gufunc.assert_valy_shape_10_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 303, "end_line": 331, "span_ids": ["test_gufunc", "test_gufunc_mixed_inputs"], "tokens": 209}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_gufunc_mixed_inputs():\n def foo(x, y):\n return x + y\n\n a = np.ones((2, 1), dtype=int)\n b = da.ones((1, 8), chunks=(2, 3), dtype=int)\n x = apply_gufunc(foo, \"(),()->()\", a, b, output_dtypes=int)\n assert_eq(x, 2 * np.ones((2, 8), dtype=int))\n\n\ndef test_gufunc():\n x = da.random.normal(size=(10, 5), chunks=(2, 5))\n\n def foo(x):\n return np.mean(x, axis=-1)\n\n gufoo = gufunc(\n foo,\n signature=\"(i)->()\",\n axis=-1,\n keepdims=False,\n output_dtypes=float,\n vectorize=True,\n )\n\n y = gufoo(x)\n valy = y.compute()\n assert isinstance(y, Array)\n assert valy.shape == (10,)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_as_gufunc_test_apply_gufunc_broadcasting_loopdims.assert_z_compute_shape_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_as_gufunc_test_apply_gufunc_broadcasting_loopdims.assert_z_compute_shape_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 334, "end_line": 363, "span_ids": ["test_apply_gufunc_broadcasting_loopdims", "test_as_gufunc"], "tokens": 299}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_as_gufunc():\n x = da.random.normal(size=(10, 5), chunks=(2, 5))\n\n @as_gufunc(\"(i)->()\", axis=-1, keepdims=False, output_dtypes=float, vectorize=True)\n def foo(x):\n return np.mean(x, axis=-1)\n\n y = foo(x)\n valy = y.compute()\n assert isinstance(y, Array)\n assert valy.shape == (10,)\n\n\ndef test_apply_gufunc_broadcasting_loopdims():\n def foo(x, y):\n assert len(x.shape) == 2\n assert len(y.shape) == 3\n x, y = np.broadcast_arrays(x, y)\n return x, y, x * y\n\n a = da.random.normal(size=(10, 30), chunks=(8, 30))\n b = da.random.normal(size=(20, 1, 30), chunks=(3, 1, 30))\n\n x, y, z = apply_gufunc(\n foo, \"(i),(i)->(i),(i),(i)\", a, b, output_dtypes=3 * (float,), vectorize=False\n )\n\n assert x.compute().shape == (20, 10, 30)\n assert y.compute().shape == (20, 10, 30)\n assert z.compute().shape == (20, 10, 30)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_check_same_dimsizes_test_apply_gufunc_check_coredim_chunksize.assert_consists_of_multi": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_check_same_dimsizes_test_apply_gufunc_check_coredim_chunksize.assert_consists_of_multi", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 366, "end_line": 385, "span_ids": ["test_apply_gufunc_check_coredim_chunksize", "test_apply_gufunc_check_same_dimsizes"], "tokens": 193}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_apply_gufunc_check_same_dimsizes():\n def foo(x, y):\n return x + y\n\n a = da.random.normal(size=(3,), chunks=(2,))\n b = da.random.normal(size=(4,), chunks=(2,))\n\n with pytest.raises(ValueError) as excinfo:\n apply_gufunc(foo, \"(),()->()\", a, b, output_dtypes=float, allow_rechunk=True)\n assert \"different lengths in arrays\" in str(excinfo.value)\n\n\ndef test_apply_gufunc_check_coredim_chunksize():\n def foo(x):\n return np.sum(x, axis=-1)\n\n a = da.random.normal(size=(8,), chunks=3)\n with pytest.raises(ValueError) as excinfo:\n da.apply_gufunc(foo, \"(i)->()\", a, output_dtypes=float, allow_rechunk=False)\n assert \"consists of multiple chunks\" in str(excinfo.value)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_check_inhomogeneous_chunksize_test_apply_gufunc_check_inhomogeneous_chunksize.assert_with_different_ch": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_check_inhomogeneous_chunksize_test_apply_gufunc_check_inhomogeneous_chunksize.assert_with_different_ch", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 388, "end_line": 399, "span_ids": ["test_apply_gufunc_check_inhomogeneous_chunksize"], "tokens": 126}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_apply_gufunc_check_inhomogeneous_chunksize():\n def foo(x, y):\n return x + y\n\n a = da.random.normal(size=(8,), chunks=((2, 2, 2, 2),))\n b = da.random.normal(size=(8,), chunks=((2, 3, 3),))\n\n with pytest.raises(ValueError) as excinfo:\n da.apply_gufunc(\n foo, \"(),()->()\", a, b, output_dtypes=float, allow_rechunk=False\n )\n assert \"with different chunksize present\" in str(excinfo.value)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_infer_dtype_test_apply_gufunc_infer_dtype.assert_eq_z1_dx_dy_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_infer_dtype_test_apply_gufunc_infer_dtype.assert_eq_z1_dx_dy_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 402, "end_line": 441, "span_ids": ["test_apply_gufunc_infer_dtype"], "tokens": 374}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_apply_gufunc_infer_dtype():\n x = np.arange(50).reshape((5, 10))\n y = np.arange(10)\n dx = da.from_array(x, chunks=5)\n dy = da.from_array(y, chunks=5)\n\n def foo(x, *args, **kwargs):\n cast = kwargs.pop(\"cast\", \"i8\")\n return (x + sum(args)).astype(cast)\n\n dz = apply_gufunc(foo, \"(),(),()->()\", dx, dy, 1)\n z = foo(dx, dy, 1)\n assert_eq(dz, z)\n\n dz = apply_gufunc(foo, \"(),(),()->()\", dx, dy, 1, cast=\"f8\")\n z = foo(dx, dy, 1, cast=\"f8\")\n assert_eq(dz, z)\n\n dz = apply_gufunc(foo, \"(),(),()->()\", dx, dy, 1, cast=\"f8\", output_dtypes=\"f8\")\n z = foo(dx, dy, 1, cast=\"f8\")\n assert_eq(dz, z)\n\n def foo(x):\n raise RuntimeError(\"Woops\")\n\n with pytest.raises(ValueError) as e:\n apply_gufunc(foo, \"()->()\", dx)\n msg = str(e.value)\n assert msg.startswith(\"`dtype` inference failed\")\n assert \"Please specify the dtype explicitly\" in msg\n assert \"RuntimeError\" in msg\n\n # Multiple outputs\n def foo(x, y):\n return x + y, x - y\n\n z0, z1 = apply_gufunc(foo, \"(),()->(),()\", dx, dy)\n\n assert_eq(z0, dx + dy)\n assert_eq(z1, dx - dy)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_axis_01_test_apply_gufunc_axis_02.assert_eq_m_dm_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_axis_01_test_apply_gufunc_axis_02.assert_eq_m_dm_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 444, "end_line": 468, "span_ids": ["test_apply_gufunc_axis_01", "test_apply_gufunc_axis_02"], "tokens": 223}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"keepdims\", [False, True])\ndef test_apply_gufunc_axis_01(keepdims):\n def mymedian(x):\n return np.median(x, axis=-1)\n\n a = np.random.randn(10, 5)\n da_ = da.from_array(a, chunks=2)\n\n m = np.median(a, axis=0, keepdims=keepdims)\n dm = apply_gufunc(\n mymedian, \"(i)->()\", da_, axis=0, keepdims=keepdims, allow_rechunk=True\n )\n assert_eq(m, dm)\n\n\ndef test_apply_gufunc_axis_02():\n def myfft(x):\n return np.fft.fft(x, axis=-1)\n\n a = np.random.randn(10, 5)\n da_ = da.from_array(a, chunks=2)\n\n m = np.fft.fft(a, axis=0)\n dm = apply_gufunc(myfft, \"(i)->(i)\", da_, axis=0, allow_rechunk=True)\n assert_eq(m, dm)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_axis_02b_test_apply_gufunc_axis_02b.assert_eq_m_dm_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_axis_02b_test_apply_gufunc_axis_02b.assert_eq_m_dm_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 471, "end_line": 483, "span_ids": ["test_apply_gufunc_axis_02b"], "tokens": 140}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_apply_gufunc_axis_02b():\n def myfilter(x, cn=10, axis=-1):\n y = np.fft.fft(x, axis=axis)\n y[cn:-cn] = 0\n nx = np.fft.ifft(y, axis=axis)\n return np.real(nx)\n\n a = np.random.randn(3, 6, 4)\n da_ = da.from_array(a, chunks=2)\n\n m = myfilter(a, axis=1)\n dm = apply_gufunc(myfilter, \"(i)->(i)\", da_, axis=1, allow_rechunk=True)\n assert_eq(m, dm)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_axis_03_test_apply_gufunc_axis_03.assert_eq_m_dm_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_axis_03_test_apply_gufunc_axis_03.assert_eq_m_dm_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 486, "end_line": 497, "span_ids": ["test_apply_gufunc_axis_03"], "tokens": 111}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_apply_gufunc_axis_03():\n def mydiff(x):\n return np.diff(x, axis=-1)\n\n a = np.random.randn(3, 6, 4)\n da_ = da.from_array(a, chunks=2)\n\n m = np.diff(a, axis=1)\n dm = apply_gufunc(\n mydiff, \"(i)->(i)\", da_, axis=1, output_sizes={\"i\": 5}, allow_rechunk=True\n )\n assert_eq(m, dm)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_axis_keepdims_test_apply_gufunc_axis_keepdims.assert_eq_m_dm_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_axis_keepdims_test_apply_gufunc_axis_keepdims.assert_eq_m_dm_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 500, "end_line": 512, "span_ids": ["test_apply_gufunc_axis_keepdims"], "tokens": 126}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"axis\", [-2, -1, None])\ndef test_apply_gufunc_axis_keepdims(axis):\n def mymedian(x):\n return np.median(x, axis=-1)\n\n a = np.random.randn(10, 5)\n da_ = da.from_array(a, chunks=2)\n\n m = np.median(a, axis=-1 if not axis else axis, keepdims=True)\n dm = apply_gufunc(\n mymedian, \"(i)->()\", da_, axis=axis, keepdims=True, allow_rechunk=True\n )\n assert_eq(m, dm)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_axes_01_test_apply_gufunc_axes_01.assert_eq_m_dm_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_axes_01_test_apply_gufunc_axes_01.assert_eq_m_dm_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 515, "end_line": 527, "span_ids": ["test_apply_gufunc_axes_01"], "tokens": 166}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"axes\", [[0, 1], [(0,), (1,)]])\ndef test_apply_gufunc_axes_01(axes):\n def mystats(x, y):\n return np.std(x, axis=-1) * np.mean(y, axis=-1)\n\n a = np.random.randn(10, 5)\n b = np.random.randn(5, 6)\n da_ = da.from_array(a, chunks=2)\n db_ = da.from_array(b, chunks=2)\n\n m = np.std(a, axis=0) * np.mean(b, axis=1)\n dm = apply_gufunc(mystats, \"(i),(j)->()\", da_, db_, axes=axes, allow_rechunk=True)\n assert_eq(m, dm)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_axes_02_test_apply_gufunc_axes_02.assert_eq_m_dm_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_axes_02_test_apply_gufunc_axes_02.assert_eq_m_dm_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 530, "end_line": 549, "span_ids": ["test_apply_gufunc_axes_02"], "tokens": 178}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_apply_gufunc_axes_02():\n def matmul(x, y):\n return np.einsum(\"...ij,...jk->...ik\", x, y)\n\n a = np.random.randn(3, 2, 1)\n b = np.random.randn(3, 7, 5)\n\n da_ = da.from_array(a, chunks=2)\n db = da.from_array(b, chunks=3)\n\n m = np.einsum(\"jiu,juk->uik\", a, b)\n dm = apply_gufunc(\n matmul,\n \"(i,j),(j,k)->(i,k)\",\n da_,\n db,\n axes=[(1, 0), (0, -1), (-2, -1)],\n allow_rechunk=True,\n )\n assert_eq(m, dm)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_axes_two_kept_coredims_test_apply_gufunc_axes_two_kept_coredims.assert_c_compute_shape_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_axes_two_kept_coredims_test_apply_gufunc_axes_two_kept_coredims.assert_c_compute_shape_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 552, "end_line": 560, "span_ids": ["test_apply_gufunc_axes_two_kept_coredims"], "tokens": 131}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_apply_gufunc_axes_two_kept_coredims():\n a = da.random.normal(size=(20, 30), chunks=(10, 30))\n b = da.random.normal(size=(10, 1, 40), chunks=(5, 1, 40))\n\n def outer_product(x, y):\n return np.einsum(\"i,j->ij\", x, y)\n\n c = apply_gufunc(outer_product, \"(i),(j)->(i,j)\", a, b, vectorize=True)\n assert c.compute().shape == (10, 20, 30, 40)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_via_numba_01_test_apply_gufunc_via_numba_01.assert_eq_x_y_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_via_numba_01_test_apply_gufunc_via_numba_01.assert_eq_x_y_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 563, "end_line": 579, "span_ids": ["test_apply_gufunc_via_numba_01"], "tokens": 155}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_apply_gufunc_via_numba_01():\n numba = pytest.importorskip(\"numba\")\n\n @numba.guvectorize(\n [(numba.float64[:], numba.float64[:], numba.float64[:])], \"(n),(n)->(n)\"\n )\n def g(x, y, res):\n for i in range(x.shape[0]):\n res[i] = x[i] + y[i]\n\n a = da.random.normal(size=(20, 30), chunks=30)\n b = da.random.normal(size=(20, 30), chunks=30)\n\n x = a + b\n y = g(a, b, axis=0)\n\n assert_eq(x, y)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_via_numba_02_test_apply_gufunc_via_numba_02.assert_eq_x_y_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_via_numba_02_test_apply_gufunc_via_numba_02.assert_eq_x_y_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 582, "end_line": 596, "span_ids": ["test_apply_gufunc_via_numba_02"], "tokens": 145}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_apply_gufunc_via_numba_02():\n numba = pytest.importorskip(\"numba\")\n\n @numba.guvectorize([(numba.float64[:], numba.float64[:])], \"(n)->()\")\n def mysum(x, res):\n res[0] = 0.0\n for i in range(x.shape[0]):\n res[0] += x[i]\n\n a = da.random.normal(size=(20, 30), chunks=5)\n\n x = a.sum(axis=0, keepdims=True)\n y = mysum(a, axis=0, keepdims=True, allow_rechunk=True)\n\n assert_eq(x, y)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_preserve_meta_type_test_preserve_meta_type.assert_eq_mean_mean_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_preserve_meta_type_test_preserve_meta_type.assert_eq_mean_mean_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 599, "end_line": 618, "span_ids": ["test_preserve_meta_type"], "tokens": 203}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n not IS_NEP18_ACTIVE, reason=\"NEP18 required for sparse meta propagation\"\n)\n@pytest.mark.xfail(_numpy_120, reason=\"https://github.com/pydata/sparse/issues/383\")\ndef test_preserve_meta_type():\n sparse = pytest.importorskip(\"sparse\")\n\n def stats(x):\n return np.sum(x, axis=-1), np.mean(x, axis=-1)\n\n a = da.random.normal(size=(10, 20, 30), chunks=(5, 5, 30))\n a = a.map_blocks(sparse.COO.from_numpy)\n sum, mean = apply_gufunc(stats, \"(i)->(),()\", a, output_dtypes=2 * (a.dtype,))\n\n assert isinstance(a._meta, sparse.COO)\n assert isinstance(sum._meta, sparse.COO)\n assert isinstance(mean._meta, sparse.COO)\n\n assert_eq(sum, sum)\n assert_eq(mean, mean)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_with_meta_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_with_meta_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 621, "end_line": 631, "span_ids": ["test_apply_gufunc_with_meta"], "tokens": 136}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_apply_gufunc_with_meta():\n def stats(x):\n return np.mean(x, axis=-1), np.std(x, axis=-1, dtype=np.float32)\n\n a = da.random.normal(size=(10, 20, 30), chunks=(5, 5, 30))\n meta = (np.ones(0, dtype=np.float64), np.ones(0, dtype=np.float32))\n result = apply_gufunc(stats, \"(i)->(),()\", a, meta=meta)\n expected = stats(a.compute())\n assert_eq(expected[0], result[0])\n assert_eq(expected[1], result[1])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_image.py_from_contextlib_import_co_random_images.with_tmpdir_as_dirname_.yield_os_path_join_dirnam": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_image.py_from_contextlib_import_co_random_images.with_tmpdir_as_dirname_.yield_os_path_join_dirnam", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_image.py", "file_name": "test_image.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 22, "span_ids": ["imports", "random_images"], "tokens": 139}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from contextlib import contextmanager\nimport os\n\nimport pytest\n\npytest.importorskip(\"skimage\")\nfrom dask.array.image import imread as da_imread\nimport numpy as np\nfrom skimage.io import imsave\n\nfrom dask.utils import tmpdir\n\n\n@contextmanager\ndef random_images(n, shape):\n with tmpdir() as dirname:\n for i in range(n):\n fn = os.path.join(dirname, \"image.%d.png\" % i)\n x = np.random.randint(0, 255, size=shape).astype(\"u1\")\n imsave(fn, x, check_contrast=False)\n\n yield os.path.join(dirname, \"*.png\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_image.py_test_imread_test_imread.with_random_images_4_5_.assert_im_compute_dtype": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_image.py_test_imread_test_imread.with_random_images_4_5_.assert_im_compute_dtype", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_image.py", "file_name": "test_image.py", "file_type": "text/x-python", "category": "test", "start_line": 25, "end_line": 33, "span_ids": ["test_imread"], "tokens": 118}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_imread():\n with random_images(4, (5, 6, 3)) as globstring:\n im = da_imread(globstring)\n assert im.shape == (4, 5, 6, 3)\n assert im.chunks == ((1, 1, 1, 1), (5,), (6,), (3,))\n assert im.dtype == \"uint8\"\n\n assert im.compute().shape == (4, 5, 6, 3)\n assert im.compute().dtype == \"uint8\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_image.py_test_imread_with_custom_function_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_image.py_test_imread_with_custom_function_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_image.py", "file_name": "test_image.py", "file_type": "text/x-python", "category": "test", "start_line": 36, "end_line": 53, "span_ids": ["test_preprocess", "test_imread_with_custom_function"], "tokens": 181}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_imread_with_custom_function():\n def imread2(fn):\n return np.ones((2, 3, 4), dtype=\"i1\")\n\n with random_images(4, (5, 6, 3)) as globstring:\n im = da_imread(globstring, imread=imread2)\n assert (im.compute() == np.ones((4, 2, 3, 4), dtype=\"u1\")).all()\n\n\ndef test_preprocess():\n def preprocess(x):\n x[:] = 1\n return x[:, :, 0]\n\n with random_images(4, (2, 3, 4)) as globstring:\n im = da_imread(globstring, preprocess=preprocess)\n assert (im.compute() == np.ones((4, 2, 3), dtype=\"u1\")).all()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_pytest_test_tsqr.if_error_type_is_None_.else_.None_1.u_s_vh_tsqr_data_com": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_pytest_test_tsqr.if_error_type_is_None_.else_.None_1.u_s_vh_tsqr_data_com", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 98, "span_ids": ["imports", "test_tsqr"], "tokens": 1104}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\n\npytest.importorskip(\"numpy\")\npytest.importorskip(\"scipy\")\n\nimport numpy as np\nimport scipy.linalg\n\nimport dask.array as da\nfrom dask.array.linalg import tsqr, sfqr, svd_compressed, qr, svd\nfrom dask.array.utils import assert_eq, same_keys, svd_flip\n\n\n@pytest.mark.parametrize(\n \"m,n,chunks,error_type\",\n [\n (20, 10, 10, None), # tall-skinny regular blocks\n (20, 10, (3, 10), None), # tall-skinny regular fat layers\n (20, 10, ((8, 4, 8), 10), None), # tall-skinny irregular fat layers\n (40, 10, ((15, 5, 5, 8, 7), 10), None), # tall-skinny non-uniform chunks (why?)\n (128, 2, (16, 2), None), # tall-skinny regular thin layers; recursion_depth=1\n (\n 129,\n 2,\n (16, 2),\n None,\n ), # tall-skinny regular thin layers; recursion_depth=2 --> 17x2\n (\n 130,\n 2,\n (16, 2),\n None,\n ), # tall-skinny regular thin layers; recursion_depth=2 --> 18x2 next\n (\n 131,\n 2,\n (16, 2),\n None,\n ), # tall-skinny regular thin layers; recursion_depth=2 --> 18x2 next\n (300, 10, (40, 10), None), # tall-skinny regular thin layers; recursion_depth=2\n (300, 10, (30, 10), None), # tall-skinny regular thin layers; recursion_depth=3\n (300, 10, (20, 10), None), # tall-skinny regular thin layers; recursion_depth=4\n (10, 5, 10, None), # single block tall\n (5, 10, 10, None), # single block short\n (10, 10, 10, None), # single block square\n (10, 40, (10, 10), ValueError), # short-fat regular blocks\n (10, 40, (10, 15), ValueError), # short-fat irregular blocks\n (\n 10,\n 40,\n (10, (15, 5, 5, 8, 7)),\n ValueError,\n ), # short-fat non-uniform chunks (why?)\n (20, 20, 10, ValueError), # 2x2 regular blocks\n ],\n)\ndef test_tsqr(m, n, chunks, error_type):\n mat = np.random.rand(m, n)\n data = da.from_array(mat, chunks=chunks, name=\"A\")\n\n # qr\n m_q = m\n n_q = min(m, n)\n m_r = n_q\n n_r = n\n\n # svd\n m_u = m\n n_u = min(m, n)\n n_s = n_q\n m_vh = n_q\n n_vh = n\n d_vh = max(m_vh, n_vh) # full matrix returned\n\n if error_type is None:\n # test QR\n q, r = tsqr(data)\n assert_eq((m_q, n_q), q.shape) # shape check\n assert_eq((m_r, n_r), r.shape) # shape check\n assert_eq(mat, da.dot(q, r)) # accuracy check\n assert_eq(np.eye(n_q, n_q), da.dot(q.T, q)) # q must be orthonormal\n assert_eq(r, da.triu(r.rechunk(r.shape[0]))) # r must be upper triangular\n\n # test SVD\n u, s, vh = tsqr(data, compute_svd=True)\n s_exact = np.linalg.svd(mat)[1]\n assert_eq(s, s_exact) # s must contain the singular values\n assert_eq((m_u, n_u), u.shape) # shape check\n assert_eq((n_s,), s.shape) # shape check\n assert_eq((d_vh, d_vh), vh.shape) # shape check\n assert_eq(np.eye(n_u, n_u), da.dot(u.T, u)) # u must be orthonormal\n assert_eq(np.eye(d_vh, d_vh), da.dot(vh, vh.T)) # vh must be orthonormal\n assert_eq(mat, da.dot(da.dot(u, da.diag(s)), vh[:n_q])) # accuracy check\n else:\n with pytest.raises(error_type):\n q, r = tsqr(data)\n with pytest.raises(error_type):\n u, s, vh = tsqr(data, compute_svd=True)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_tsqr_uncertain_test_tsqr_uncertain.if_vary_rows_.m.mat_shape_0_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_tsqr_uncertain_test_tsqr_uncertain.if_vary_rows_.m.mat_shape_0_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 101, "end_line": 196, "span_ids": ["test_tsqr_uncertain"], "tokens": 748}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"m_min,n_max,chunks,vary_rows,vary_cols,error_type\",\n [\n (10, 5, (10, 5), True, False, None), # single block tall\n (10, 5, (10, 5), False, True, None), # single block tall\n (10, 5, (10, 5), True, True, None), # single block tall\n (40, 5, (10, 5), True, False, None), # multiple blocks tall\n (40, 5, (10, 5), False, True, None), # multiple blocks tall\n (40, 5, (10, 5), True, True, None), # multiple blocks tall\n (\n 300,\n 10,\n (40, 10),\n True,\n False,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=2\n (\n 300,\n 10,\n (30, 10),\n True,\n False,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=3\n (\n 300,\n 10,\n (20, 10),\n True,\n False,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=4\n (\n 300,\n 10,\n (40, 10),\n False,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=2\n (\n 300,\n 10,\n (30, 10),\n False,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=3\n (\n 300,\n 10,\n (20, 10),\n False,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=4\n (\n 300,\n 10,\n (40, 10),\n True,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=2\n (\n 300,\n 10,\n (30, 10),\n True,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=3\n (\n 300,\n 10,\n (20, 10),\n True,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=4\n ],\n)\ndef test_tsqr_uncertain(m_min, n_max, chunks, vary_rows, vary_cols, error_type):\n mat = np.random.rand(m_min * 2, n_max)\n m, n = m_min * 2, n_max\n mat[0:m_min, 0] += 1\n _c0 = mat[:, 0]\n _r0 = mat[0, :]\n c0 = da.from_array(_c0, chunks=m_min, name=\"c\")\n r0 = da.from_array(_r0, chunks=n_max, name=\"r\")\n data = da.from_array(mat, chunks=chunks, name=\"A\")\n if vary_rows:\n data = data[c0 > 0.5, :]\n mat = mat[_c0 > 0.5, :]\n m = mat.shape[0]\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_tsqr_uncertain.if_vary_cols__test_tsqr_uncertain._full_matrix_returned": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_tsqr_uncertain.if_vary_cols__test_tsqr_uncertain._full_matrix_returned", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 197, "end_line": 214, "span_ids": ["test_tsqr_uncertain"], "tokens": 731}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"m_min,n_max,chunks,vary_rows,vary_cols,error_type\",\n [\n (10, 5, (10, 5), True, False, None), # single block tall\n (10, 5, (10, 5), False, True, None), # single block tall\n (10, 5, (10, 5), True, True, None), # single block tall\n (40, 5, (10, 5), True, False, None), # multiple blocks tall\n (40, 5, (10, 5), False, True, None), # multiple blocks tall\n (40, 5, (10, 5), True, True, None), # multiple blocks tall\n (\n 300,\n 10,\n (40, 10),\n True,\n False,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=2\n (\n 300,\n 10,\n (30, 10),\n True,\n False,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=3\n (\n 300,\n 10,\n (20, 10),\n True,\n False,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=4\n (\n 300,\n 10,\n (40, 10),\n False,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=2\n (\n 300,\n 10,\n (30, 10),\n False,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=3\n (\n 300,\n 10,\n (20, 10),\n False,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=4\n (\n 300,\n 10,\n (40, 10),\n True,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=2\n (\n 300,\n 10,\n (30, 10),\n True,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=3\n (\n 300,\n 10,\n (20, 10),\n True,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=4\n ],\n)\ndef test_tsqr_uncertain(m_min, n_max, chunks, vary_rows, vary_cols, error_type):\n # ... other code\n if vary_cols:\n data = data[:, r0 > 0.5]\n mat = mat[:, _r0 > 0.5]\n n = mat.shape[1]\n\n # qr\n m_q = m\n n_q = min(m, n)\n m_r = n_q\n n_r = n\n\n # svd\n m_u = m\n n_u = min(m, n)\n n_s = n_q\n m_vh = n_q\n n_vh = n\n d_vh = max(m_vh, n_vh) # full matrix returned\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_tsqr_uncertain.if_error_type_is_None__test_tsqr_uncertain.if_error_type_is_None_.else_.None_1.u_s_vh_tsqr_data_com": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_tsqr_uncertain.if_error_type_is_None__test_tsqr_uncertain.if_error_type_is_None_.else_.None_1.u_s_vh_tsqr_data_com", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 216, "end_line": 244, "span_ids": ["test_tsqr_uncertain"], "tokens": 978}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"m_min,n_max,chunks,vary_rows,vary_cols,error_type\",\n [\n (10, 5, (10, 5), True, False, None), # single block tall\n (10, 5, (10, 5), False, True, None), # single block tall\n (10, 5, (10, 5), True, True, None), # single block tall\n (40, 5, (10, 5), True, False, None), # multiple blocks tall\n (40, 5, (10, 5), False, True, None), # multiple blocks tall\n (40, 5, (10, 5), True, True, None), # multiple blocks tall\n (\n 300,\n 10,\n (40, 10),\n True,\n False,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=2\n (\n 300,\n 10,\n (30, 10),\n True,\n False,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=3\n (\n 300,\n 10,\n (20, 10),\n True,\n False,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=4\n (\n 300,\n 10,\n (40, 10),\n False,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=2\n (\n 300,\n 10,\n (30, 10),\n False,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=3\n (\n 300,\n 10,\n (20, 10),\n False,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=4\n (\n 300,\n 10,\n (40, 10),\n True,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=2\n (\n 300,\n 10,\n (30, 10),\n True,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=3\n (\n 300,\n 10,\n (20, 10),\n True,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=4\n ],\n)\ndef test_tsqr_uncertain(m_min, n_max, chunks, vary_rows, vary_cols, error_type):\n # ... other code\n\n if error_type is None:\n # test QR\n q, r = tsqr(data)\n q = q.compute() # because uncertainty\n r = r.compute()\n assert_eq((m_q, n_q), q.shape) # shape check\n assert_eq((m_r, n_r), r.shape) # shape check\n assert_eq(mat, np.dot(q, r)) # accuracy check\n assert_eq(np.eye(n_q, n_q), np.dot(q.T, q)) # q must be orthonormal\n assert_eq(r, np.triu(r)) # r must be upper triangular\n\n # test SVD\n u, s, vh = tsqr(data, compute_svd=True)\n u = u.compute() # because uncertainty\n s = s.compute()\n vh = vh.compute()\n s_exact = np.linalg.svd(mat)[1]\n assert_eq(s, s_exact) # s must contain the singular values\n assert_eq((m_u, n_u), u.shape) # shape check\n assert_eq((n_s,), s.shape) # shape check\n assert_eq((d_vh, d_vh), vh.shape) # shape check\n assert_eq(np.eye(n_u, n_u), np.dot(u.T, u)) # u must be orthonormal\n assert_eq(np.eye(d_vh, d_vh), np.dot(vh, vh.T)) # vh must be orthonormal\n assert_eq(mat, np.dot(np.dot(u, np.diag(s)), vh[:n_q])) # accuracy check\n else:\n with pytest.raises(error_type):\n q, r = tsqr(data)\n with pytest.raises(error_type):\n u, s, vh = tsqr(data, compute_svd=True)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_tsqr_zero_height_chunks_test_tsqr_zero_height_chunks.None_13": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_tsqr_zero_height_chunks_test_tsqr_zero_height_chunks.None_13", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 247, "end_line": 276, "span_ids": ["test_tsqr_zero_height_chunks"], "tokens": 400}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_tsqr_zero_height_chunks():\n m_q = 10\n n_q = 5\n m_r = 5\n n_r = 5\n\n # certainty\n mat = np.random.rand(10, 5)\n x = da.from_array(mat, chunks=((4, 0, 1, 0, 5), (5,)))\n q, r = da.linalg.qr(x)\n assert_eq((m_q, n_q), q.shape) # shape check\n assert_eq((m_r, n_r), r.shape) # shape check\n assert_eq(mat, da.dot(q, r)) # accuracy check\n assert_eq(np.eye(n_q, n_q), da.dot(q.T, q)) # q must be orthonormal\n assert_eq(r, da.triu(r.rechunk(r.shape[0]))) # r must be upper triangular\n\n # uncertainty\n mat2 = np.vstack([mat, -(np.ones((10, 5)))])\n v2 = mat2[:, 0]\n x2 = da.from_array(mat2, chunks=5)\n c = da.from_array(v2, chunks=5)\n x = x2[c >= 0, :] # remove the ones added above to yield mat\n q, r = da.linalg.qr(x)\n q = q.compute() # because uncertainty\n r = r.compute()\n assert_eq((m_q, n_q), q.shape) # shape check\n assert_eq((m_r, n_r), r.shape) # shape check\n assert_eq(mat, np.dot(q, r)) # accuracy check\n assert_eq(np.eye(n_q, n_q), np.dot(q.T, q)) # q must be orthonormal\n assert_eq(r, np.triu(r)) # r must be upper triangular", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_sfqr_test_sfqr.if_error_type_is_None_.else_.with_pytest_raises_error_.q_r_sfqr_data_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_sfqr_test_sfqr.if_error_type_is_None_.else_.with_pytest_raises_error_.q_r_sfqr_data_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 279, "end_line": 360, "span_ids": ["test_sfqr"], "tokens": 793}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"m,n,chunks,error_type\",\n [\n (20, 10, 10, ValueError), # tall-skinny regular blocks\n (20, 10, (3, 10), ValueError), # tall-skinny regular fat layers\n (20, 10, ((8, 4, 8), 10), ValueError), # tall-skinny irregular fat layers\n (\n 40,\n 10,\n ((15, 5, 5, 8, 7), 10),\n ValueError,\n ), # tall-skinny non-uniform chunks (why?)\n (\n 128,\n 2,\n (16, 2),\n ValueError,\n ), # tall-skinny regular thin layers; recursion_depth=1\n (\n 129,\n 2,\n (16, 2),\n ValueError,\n ), # tall-skinny regular thin layers; recursion_depth=2 --> 17x2\n (\n 130,\n 2,\n (16, 2),\n ValueError,\n ), # tall-skinny regular thin layers; recursion_depth=2 --> 18x2 next\n (\n 131,\n 2,\n (16, 2),\n ValueError,\n ), # tall-skinny regular thin layers; recursion_depth=2 --> 18x2 next\n (\n 300,\n 10,\n (40, 10),\n ValueError,\n ), # tall-skinny regular thin layers; recursion_depth=2\n (\n 300,\n 10,\n (30, 10),\n ValueError,\n ), # tall-skinny regular thin layers; recursion_depth=3\n (\n 300,\n 10,\n (20, 10),\n ValueError,\n ), # tall-skinny regular thin layers; recursion_depth=4\n (10, 5, 10, None), # single block tall\n (5, 10, 10, None), # single block short\n (10, 10, 10, None), # single block square\n (10, 40, (10, 10), None), # short-fat regular blocks\n (10, 40, (10, 15), None), # short-fat irregular blocks\n (10, 40, (10, (15, 5, 5, 8, 7)), None), # short-fat non-uniform chunks (why?)\n (20, 20, 10, ValueError), # 2x2 regular blocks\n ],\n)\ndef test_sfqr(m, n, chunks, error_type):\n mat = np.random.rand(m, n)\n data = da.from_array(mat, chunks=chunks, name=\"A\")\n m_q = m\n n_q = min(m, n)\n m_r = n_q\n n_r = n\n m_qtq = n_q\n\n if error_type is None:\n q, r = sfqr(data)\n assert_eq((m_q, n_q), q.shape) # shape check\n assert_eq((m_r, n_r), r.shape) # shape check\n assert_eq(mat, da.dot(q, r)) # accuracy check\n assert_eq(np.eye(m_qtq, m_qtq), da.dot(q.T, q)) # q must be orthonormal\n assert_eq(r, da.triu(r.rechunk(r.shape[0]))) # r must be upper triangular\n else:\n with pytest.raises(error_type):\n q, r = sfqr(data)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_qr_test_qr.if_error_type_is_None_.else_.with_pytest_raises_error_.q_r_qr_data_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_qr_test_qr.if_error_type_is_None_.else_.with_pytest_raises_error_.q_r_qr_data_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 363, "end_line": 419, "span_ids": ["test_qr"], "tokens": 756}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"m,n,chunks,error_type\",\n [\n (20, 10, 10, None), # tall-skinny regular blocks\n (20, 10, (3, 10), None), # tall-skinny regular fat layers\n (20, 10, ((8, 4, 8), 10), None), # tall-skinny irregular fat layers\n (40, 10, ((15, 5, 5, 8, 7), 10), None), # tall-skinny non-uniform chunks (why?)\n (128, 2, (16, 2), None), # tall-skinny regular thin layers; recursion_depth=1\n (\n 129,\n 2,\n (16, 2),\n None,\n ), # tall-skinny regular thin layers; recursion_depth=2 --> 17x2\n (\n 130,\n 2,\n (16, 2),\n None,\n ), # tall-skinny regular thin layers; recursion_depth=2 --> 18x2 next\n (\n 131,\n 2,\n (16, 2),\n None,\n ), # tall-skinny regular thin layers; recursion_depth=2 --> 18x2 next\n (300, 10, (40, 10), None), # tall-skinny regular thin layers; recursion_depth=2\n (300, 10, (30, 10), None), # tall-skinny regular thin layers; recursion_depth=3\n (300, 10, (20, 10), None), # tall-skinny regular thin layers; recursion_depth=4\n (10, 5, 10, None), # single block tall\n (5, 10, 10, None), # single block short\n (10, 10, 10, None), # single block square\n (10, 40, (10, 10), None), # short-fat regular blocks\n (10, 40, (10, 15), None), # short-fat irregular blocks\n (10, 40, (10, (15, 5, 5, 8, 7)), None), # short-fat non-uniform chunks (why?)\n (20, 20, 10, NotImplementedError), # 2x2 regular blocks\n ],\n)\ndef test_qr(m, n, chunks, error_type):\n mat = np.random.rand(m, n)\n data = da.from_array(mat, chunks=chunks, name=\"A\")\n m_q = m\n n_q = min(m, n)\n m_r = n_q\n n_r = n\n m_qtq = n_q\n\n if error_type is None:\n q, r = qr(data)\n assert_eq((m_q, n_q), q.shape) # shape check\n assert_eq((m_r, n_r), r.shape) # shape check\n assert_eq(mat, da.dot(q, r)) # accuracy check\n assert_eq(np.eye(m_qtq, m_qtq), da.dot(q.T, q)) # q must be orthonormal\n assert_eq(r, da.triu(r.rechunk(r.shape[0]))) # r must be upper triangular\n else:\n with pytest.raises(error_type):\n q, r = qr(data)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_linalg_consistent_names_test_linalg_consistent_names.assert_same_keys_v1_v2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_linalg_consistent_names_test_linalg_consistent_names.assert_same_keys_v1_v2_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 422, "end_line": 436, "span_ids": ["test_linalg_consistent_names"], "tokens": 145}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_linalg_consistent_names():\n m, n = 20, 10\n mat = np.random.rand(m, n)\n data = da.from_array(mat, chunks=(10, n), name=\"A\")\n\n q1, r1 = qr(data)\n q2, r2 = qr(data)\n assert same_keys(q1, q2)\n assert same_keys(r1, r2)\n\n u1, s1, v1 = svd(data)\n u2, s2, v2 = svd(data)\n assert same_keys(u1, u2)\n assert same_keys(s1, s2)\n assert same_keys(v1, v2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_dask_svd_self_consistent_test_dask_svd_self_consistent.for_d_e_e_in_zip_d_u_d.assert_d_e_dtype_e_dty": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_dask_svd_self_consistent_test_dask_svd_self_consistent.for_d_e_e_in_zip_d_u_d.assert_d_e_dtype_e_dty", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 439, "end_line": 449, "span_ids": ["test_dask_svd_self_consistent"], "tokens": 147}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"m,n\", [(10, 20), (15, 15), (20, 10)])\ndef test_dask_svd_self_consistent(m, n):\n a = np.random.rand(m, n)\n d_a = da.from_array(a, chunks=(3, n), name=\"A\")\n\n d_u, d_s, d_vt = da.linalg.svd(d_a)\n u, s, vt = da.compute(d_u, d_s, d_vt)\n\n for d_e, e in zip([d_u, d_s, d_vt], [u, s, vt]):\n assert d_e.shape == e.shape\n assert d_e.dtype == e.dtype", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_svd_compressed_test_svd_compressed._s_must_contain_the_sing": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_svd_compressed_test_svd_compressed._s_must_contain_the_sing", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 452, "end_line": 480, "span_ids": ["test_svd_compressed"], "tokens": 279}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.slow\ndef test_svd_compressed():\n m, n = 2000, 250\n r = 10\n np.random.seed(4321)\n mat1 = np.random.randn(m, r)\n mat2 = np.random.randn(r, n)\n mat = mat1.dot(mat2)\n data = da.from_array(mat, chunks=(500, 50))\n\n u, s, vt = svd_compressed(data, r, seed=4321, n_power_iter=2)\n\n usvt = da.dot(u, da.dot(da.diag(s), vt))\n\n tol = 0.2\n assert_eq(\n da.linalg.norm(usvt), np.linalg.norm(mat), rtol=tol, atol=tol\n ) # average accuracy check\n\n u = u[:, :r]\n s = s[:r]\n vt = vt[:r, :]\n\n s_exact = np.linalg.svd(mat)[1]\n s_exact = s_exact[:r]\n\n assert_eq(np.eye(r, r), da.dot(u.T, u)) # u must be orthonormal\n assert_eq(np.eye(r, r), da.dot(vt, vt.T)) # v must be orthonormal\n assert_eq(s, s_exact) # s must contain the singular values", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_svd_compressed_shapes_test_svd_compressed_shapes.assert_v_shape_r_n_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_svd_compressed_shapes_test_svd_compressed_shapes.assert_v_shape_r_n_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 492, "end_line": 503, "span_ids": ["test_svd_compressed_shapes"], "tokens": 176}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"m\", [5, 10, 15, 20])\n@pytest.mark.parametrize(\"n\", [5, 10, 15, 20])\n@pytest.mark.parametrize(\"k\", [5])\n@pytest.mark.parametrize(\"chunks\", [(5, 10), (10, 5)])\ndef test_svd_compressed_shapes(m, n, k, chunks):\n x = da.random.random(size=(m, n), chunks=chunks)\n u, s, v = svd_compressed(x, k=k, n_power_iter=1, compute=True, seed=1)\n u, s, v = da.compute(u, s, v)\n r = min(m, n, k)\n assert u.shape == (m, r)\n assert s.shape == (r,)\n assert v.shape == (r, n)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_svd_compressed_compute__check_lu_result.assert_eq_u_da_triu_u_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_svd_compressed_compute__check_lu_result.assert_eq_u_da_triu_u_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 506, "end_line": 521, "span_ids": ["_check_lu_result", "test_svd_compressed_compute"], "tokens": 170}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_svd_compressed_compute():\n x = da.ones((100, 100), chunks=(10, 10))\n u, s, v = da.linalg.svd_compressed(x, k=2, n_power_iter=0, compute=True, seed=123)\n uu, ss, vv = da.linalg.svd_compressed(x, k=2, n_power_iter=0, seed=123)\n\n assert len(v.dask) < len(vv.dask)\n\n assert_eq(v, vv)\n\n\ndef _check_lu_result(p, l, u, A):\n assert np.allclose(p.dot(l).dot(u), A)\n\n # check triangulars\n assert_eq(l, da.tril(l), check_graph=False)\n assert_eq(u, da.triu(u), check_graph=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_lu_1_test_lu_1.for_A_chunk_in_zip_A3_._check_lu_result_dp_dl_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_lu_1_test_lu_1.for_A_chunk_in_zip_A3_._check_lu_result_dp_dl_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 524, "end_line": 563, "span_ids": ["test_lu_1"], "tokens": 500}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_lu_1():\n A1 = np.array([[7, 3, -1, 2], [3, 8, 1, -4], [-1, 1, 4, -1], [2, -4, -1, 6]])\n\n A2 = np.array(\n [\n [7, 0, 0, 0, 0, 0],\n [0, 8, 0, 0, 0, 0],\n [0, 0, 4, 0, 0, 0],\n [0, 0, 0, 6, 0, 0],\n [0, 0, 0, 0, 3, 0],\n [0, 0, 0, 0, 0, 5],\n ]\n )\n # without shuffle\n for A, chunk in zip([A1, A2], [2, 2]):\n dA = da.from_array(A, chunks=(chunk, chunk))\n p, l, u = scipy.linalg.lu(A)\n dp, dl, du = da.linalg.lu(dA)\n assert_eq(p, dp, check_graph=False)\n assert_eq(l, dl, check_graph=False)\n assert_eq(u, du, check_graph=False)\n _check_lu_result(dp, dl, du, A)\n\n A3 = np.array(\n [\n [7, 3, 2, 1, 4, 1],\n [7, 11, 5, 2, 5, 2],\n [21, 25, 16, 10, 16, 5],\n [21, 41, 18, 13, 16, 11],\n [14, 46, 23, 24, 21, 22],\n [0, 56, 29, 17, 14, 8],\n ]\n )\n\n # with shuffle\n for A, chunk in zip([A3], [2]):\n dA = da.from_array(A, chunks=(chunk, chunk))\n p, l, u = scipy.linalg.lu(A)\n dp, dl, du = da.linalg.lu(dA)\n _check_lu_result(dp, dl, du, A)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_lu_2_test_lu_3._check_lu_result_dp_dl_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_lu_2_test_lu_3._check_lu_result_dp_dl_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 566, "end_line": 586, "span_ids": ["test_lu_2", "test_lu_3"], "tokens": 209}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.slow\n@pytest.mark.parametrize(\"size\", [10, 20, 30, 50])\n@pytest.mark.filterwarnings(\"ignore:Increasing:dask.array.core.PerformanceWarning\")\ndef test_lu_2(size):\n np.random.seed(10)\n A = np.random.randint(0, 10, (size, size))\n\n dA = da.from_array(A, chunks=(5, 5))\n dp, dl, du = da.linalg.lu(dA)\n _check_lu_result(dp, dl, du, A)\n\n\n@pytest.mark.slow\n@pytest.mark.parametrize(\"size\", [50, 100, 200])\ndef test_lu_3(size):\n np.random.seed(10)\n A = np.random.randint(0, 10, (size, size))\n\n dA = da.from_array(A, chunks=(25, 25))\n dp, dl, du = da.linalg.lu(dA)\n _check_lu_result(dp, dl, du, A)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_lu_errors_test_lu_errors.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_lu_errors_test_lu_errors.None_2", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 589, "end_line": 600, "span_ids": ["test_lu_errors"], "tokens": 153}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_lu_errors():\n A = np.random.randint(0, 11, (10, 10, 10))\n dA = da.from_array(A, chunks=(5, 5, 5))\n pytest.raises(ValueError, lambda: da.linalg.lu(dA))\n\n A = np.random.randint(0, 11, (10, 8))\n dA = da.from_array(A, chunks=(5, 4))\n pytest.raises(ValueError, lambda: da.linalg.lu(dA))\n\n A = np.random.randint(0, 11, (20, 20))\n dA = da.from_array(A, chunks=(5, 4))\n pytest.raises(ValueError, lambda: da.linalg.lu(dA))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_solve_triangular_vector_test_solve_triangular_vector.assert_eq_dAl_dot_res_b": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_solve_triangular_vector_test_solve_triangular_vector.assert_eq_dAl_dot_res_b", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 603, "end_line": 624, "span_ids": ["test_solve_triangular_vector"], "tokens": 235}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize((\"shape\", \"chunk\"), [(20, 10), (50, 10), (70, 20)])\ndef test_solve_triangular_vector(shape, chunk):\n np.random.seed(1)\n\n A = np.random.randint(1, 11, (shape, shape))\n b = np.random.randint(1, 11, shape)\n\n # upper\n Au = np.triu(A)\n dAu = da.from_array(Au, (chunk, chunk))\n db = da.from_array(b, chunk)\n res = da.linalg.solve_triangular(dAu, db)\n assert_eq(res, scipy.linalg.solve_triangular(Au, b))\n assert_eq(dAu.dot(res), b.astype(float))\n\n # lower\n Al = np.tril(A)\n dAl = da.from_array(Al, (chunk, chunk))\n db = da.from_array(b, chunk)\n res = da.linalg.solve_triangular(dAl, db, lower=True)\n assert_eq(res, scipy.linalg.solve_triangular(Al, b, lower=True))\n assert_eq(dAl.dot(res), b.astype(float))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_solve_triangular_matrix_test_solve_triangular_matrix.assert_eq_dAl_dot_res_b": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_solve_triangular_matrix_test_solve_triangular_matrix.assert_eq_dAl_dot_res_b", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 627, "end_line": 648, "span_ids": ["test_solve_triangular_matrix"], "tokens": 247}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize((\"shape\", \"chunk\"), [(20, 10), (50, 10), (50, 20)])\ndef test_solve_triangular_matrix(shape, chunk):\n np.random.seed(1)\n\n A = np.random.randint(1, 10, (shape, shape))\n b = np.random.randint(1, 10, (shape, 5))\n\n # upper\n Au = np.triu(A)\n dAu = da.from_array(Au, (chunk, chunk))\n db = da.from_array(b, (chunk, 5))\n res = da.linalg.solve_triangular(dAu, db)\n assert_eq(res, scipy.linalg.solve_triangular(Au, b))\n assert_eq(dAu.dot(res), b.astype(float))\n\n # lower\n Al = np.tril(A)\n dAl = da.from_array(Al, (chunk, chunk))\n db = da.from_array(b, (chunk, 5))\n res = da.linalg.solve_triangular(dAl, db, lower=True)\n assert_eq(res, scipy.linalg.solve_triangular(Al, b, lower=True))\n assert_eq(dAl.dot(res), b.astype(float))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_solve_triangular_matrix2_test_solve_triangular_matrix2.assert_eq_dAl_dot_res_b": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_solve_triangular_matrix2_test_solve_triangular_matrix2.assert_eq_dAl_dot_res_b", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 651, "end_line": 672, "span_ids": ["test_solve_triangular_matrix2"], "tokens": 245}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize((\"shape\", \"chunk\"), [(20, 10), (50, 10), (50, 20)])\ndef test_solve_triangular_matrix2(shape, chunk):\n np.random.seed(1)\n\n A = np.random.randint(1, 10, (shape, shape))\n b = np.random.randint(1, 10, (shape, shape))\n\n # upper\n Au = np.triu(A)\n dAu = da.from_array(Au, (chunk, chunk))\n db = da.from_array(b, (chunk, chunk))\n res = da.linalg.solve_triangular(dAu, db)\n assert_eq(res, scipy.linalg.solve_triangular(Au, b))\n assert_eq(dAu.dot(res), b.astype(float))\n\n # lower\n Al = np.tril(A)\n dAl = da.from_array(Al, (chunk, chunk))\n db = da.from_array(b, (chunk, chunk))\n res = da.linalg.solve_triangular(dAl, db, lower=True)\n assert_eq(res, scipy.linalg.solve_triangular(Al, b, lower=True))\n assert_eq(dAl.dot(res), b.astype(float))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_solve_triangular_errors_test_solve_triangular_errors.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_solve_triangular_errors_test_solve_triangular_errors.None_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 675, "end_line": 686, "span_ids": ["test_solve_triangular_errors"], "tokens": 169}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_solve_triangular_errors():\n A = np.random.randint(0, 10, (10, 10, 10))\n b = np.random.randint(1, 10, 10)\n dA = da.from_array(A, chunks=(5, 5, 5))\n db = da.from_array(b, chunks=5)\n pytest.raises(ValueError, lambda: da.linalg.solve_triangular(dA, db))\n\n A = np.random.randint(0, 10, (10, 10))\n b = np.random.randint(1, 10, 10)\n dA = da.from_array(A, chunks=(3, 3))\n db = da.from_array(b, chunks=5)\n pytest.raises(ValueError, lambda: da.linalg.solve_triangular(dA, db))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_solve_test_solve.None_6": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_solve_test_solve.None_6", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 689, "end_line": 718, "span_ids": ["test_solve"], "tokens": 298}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize((\"shape\", \"chunk\"), [(20, 10), (50, 10)])\ndef test_solve(shape, chunk):\n np.random.seed(1)\n\n A = np.random.randint(1, 10, (shape, shape))\n dA = da.from_array(A, (chunk, chunk))\n\n # vector\n b = np.random.randint(1, 10, shape)\n db = da.from_array(b, chunk)\n\n res = da.linalg.solve(dA, db)\n assert_eq(res, scipy.linalg.solve(A, b), check_graph=False)\n assert_eq(dA.dot(res), b.astype(float), check_graph=False)\n\n # tall-and-skinny matrix\n b = np.random.randint(1, 10, (shape, 5))\n db = da.from_array(b, (chunk, 5))\n\n res = da.linalg.solve(dA, db)\n assert_eq(res, scipy.linalg.solve(A, b), check_graph=False)\n assert_eq(dA.dot(res), b.astype(float), check_graph=False)\n\n # matrix\n b = np.random.randint(1, 10, (shape, shape))\n db = da.from_array(b, (chunk, chunk))\n\n res = da.linalg.solve(dA, db)\n assert_eq(res, scipy.linalg.solve(A, b), check_graph=False)\n assert_eq(dA.dot(res), b.astype(float), check_graph=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_inv__get_symmat.return.lA_dot_lA_T_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_inv__get_symmat.return.lA_dot_lA_T_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 721, "end_line": 737, "span_ids": ["test_inv", "_get_symmat"], "tokens": 157}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize((\"shape\", \"chunk\"), [(20, 10), (50, 10)])\ndef test_inv(shape, chunk):\n np.random.seed(1)\n\n A = np.random.randint(1, 10, (shape, shape))\n dA = da.from_array(A, (chunk, chunk))\n\n res = da.linalg.inv(dA)\n assert_eq(res, scipy.linalg.inv(A), check_graph=False)\n assert_eq(dA.dot(res), np.eye(shape, dtype=float), check_graph=False)\n\n\ndef _get_symmat(size):\n np.random.seed(1)\n A = np.random.randint(1, 21, (size, size))\n lA = np.tril(A)\n return lA.dot(lA.T)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_solve_sym_pos_test_solve_sym_pos.None_6": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_solve_sym_pos_test_solve_sym_pos.None_6", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 740, "end_line": 769, "span_ids": ["test_solve_sym_pos"], "tokens": 316}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize((\"shape\", \"chunk\"), [(20, 10), (30, 6)])\ndef test_solve_sym_pos(shape, chunk):\n np.random.seed(1)\n\n A = _get_symmat(shape)\n dA = da.from_array(A, (chunk, chunk))\n\n # vector\n b = np.random.randint(1, 10, shape)\n db = da.from_array(b, chunk)\n\n res = da.linalg.solve(dA, db, sym_pos=True)\n assert_eq(res, scipy.linalg.solve(A, b, sym_pos=True), check_graph=False)\n assert_eq(dA.dot(res), b.astype(float), check_graph=False)\n\n # tall-and-skinny matrix\n b = np.random.randint(1, 10, (shape, 5))\n db = da.from_array(b, (chunk, 5))\n\n res = da.linalg.solve(dA, db, sym_pos=True)\n assert_eq(res, scipy.linalg.solve(A, b, sym_pos=True), check_graph=False)\n assert_eq(dA.dot(res), b.astype(float), check_graph=False)\n\n # matrix\n b = np.random.randint(1, 10, (shape, shape))\n db = da.from_array(b, (chunk, chunk))\n\n res = da.linalg.solve(dA, db, sym_pos=True)\n assert_eq(res, scipy.linalg.solve(A, b, sym_pos=True), check_graph=False)\n assert_eq(dA.dot(res), b.astype(float), check_graph=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_cholesky_test_cholesky.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_cholesky_test_cholesky.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 772, "end_line": 782, "span_ids": ["test_cholesky"], "tokens": 126}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize((\"shape\", \"chunk\"), [(20, 10), (12, 3), (30, 3), (30, 6)])\ndef test_cholesky(shape, chunk):\n\n A = _get_symmat(shape)\n dA = da.from_array(A, (chunk, chunk))\n assert_eq(da.linalg.cholesky(dA), scipy.linalg.cholesky(A), check_graph=False)\n assert_eq(\n da.linalg.cholesky(dA, lower=True),\n scipy.linalg.cholesky(A, lower=True),\n check_graph=False,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_no_chunks_svd_test_no_chunks_svd.for_chunks_in_np_nan_.assert_eq_abs_u_abs_du_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_no_chunks_svd_test_no_chunks_svd.for_chunks_in_np_nan_.assert_eq_abs_u_abs_du_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 814, "end_line": 832, "span_ids": ["test_no_chunks_svd"], "tokens": 213}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_no_chunks_svd():\n x = np.random.random((100, 10))\n u, s, v = np.linalg.svd(x, full_matrices=False)\n\n for chunks in [((np.nan,) * 10, (10,)), ((np.nan,) * 10, (np.nan,))]:\n dx = da.from_array(x, chunks=(10, 10))\n dx._chunks = chunks\n\n du, ds, dv = da.linalg.svd(dx)\n\n assert_eq(s, ds)\n assert_eq(u.dot(np.diag(s)).dot(v), du.dot(da.diag(ds)).dot(dv))\n assert_eq(du.T.dot(du), np.eye(10))\n assert_eq(dv.T.dot(dv), np.eye(10))\n\n dx = da.from_array(x, chunks=(10, 10))\n dx._chunks = ((np.nan,) * 10, (np.nan,))\n assert_eq(abs(v), abs(dv))\n assert_eq(abs(u), abs(du))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_svd_supported_array_shapes_test_svd_supported_array_shapes.assert_eq_dv_nv_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_svd_supported_array_shapes_test_svd_supported_array_shapes.assert_eq_dv_nv_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 852, "end_line": 873, "span_ids": ["test_svd_supported_array_shapes"], "tokens": 262}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"chunks\", [(10, -1), (-1, 10), (9, -1), (-1, 9)])\n@pytest.mark.parametrize(\"shape\", [(10, 100), (100, 10), (10, 10)])\ndef test_svd_supported_array_shapes(chunks, shape):\n # Test the following cases for tall-skinny, short-fat and square arrays:\n # - no chunking\n # - chunking that contradicts shape (e.g. a 10x100 array with 9x100 chunks)\n # - chunking that aligns with shape (e.g. a 10x100 array with 10x9 chunks)\n x = np.random.random(shape)\n dx = da.from_array(x, chunks=chunks)\n\n du, ds, dv = da.linalg.svd(dx)\n du, dv = da.compute(du, dv)\n\n nu, ns, nv = np.linalg.svd(x, full_matrices=False)\n\n # Correct signs before comparison\n du, dv = svd_flip(du, dv)\n nu, nv = svd_flip(nu, nv)\n\n assert_eq(du, nu)\n assert_eq(ds, ns)\n assert_eq(dv, nv)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_svd_incompatible_chunking_test_svd_incompatible_dimensions.with_pytest_raises_ValueE.da_linalg_svd_x_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_svd_incompatible_chunking_test_svd_incompatible_dimensions.with_pytest_raises_ValueE.da_linalg_svd_x_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 876, "end_line": 888, "span_ids": ["test_svd_incompatible_chunking", "test_svd_incompatible_dimensions"], "tokens": 126}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_svd_incompatible_chunking():\n with pytest.raises(\n NotImplementedError, match=\"Array must be chunked in one dimension only\"\n ):\n x = da.random.random((10, 10), chunks=(5, 5))\n da.linalg.svd(x)\n\n\n@pytest.mark.parametrize(\"ndim\", [0, 1, 3])\ndef test_svd_incompatible_dimensions(ndim):\n with pytest.raises(ValueError, match=\"Array must be 2D\"):\n x = da.random.random((10,) * ndim, chunks=(-1,) * ndim)\n da.linalg.svd(x)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_norm_any_ndim_test_norm_any_ndim.assert_eq_a_r_d_r_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_norm_any_ndim_test_norm_any_ndim.assert_eq_a_r_d_r_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 891, "end_line": 904, "span_ids": ["test_norm_any_ndim"], "tokens": 182}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"shape, chunks, axis\",\n [[(5,), (2,), None], [(5,), (2,), 0], [(5,), (2,), (0,)], [(5, 6), (2, 2), None]],\n)\n@pytest.mark.parametrize(\"norm\", [None, 1, -1, np.inf, -np.inf])\n@pytest.mark.parametrize(\"keepdims\", [False, True])\ndef test_norm_any_ndim(shape, chunks, axis, norm, keepdims):\n a = np.random.random(shape)\n d = da.from_array(a, chunks=chunks)\n\n a_r = np.linalg.norm(a, ord=norm, axis=axis, keepdims=keepdims)\n d_r = da.linalg.norm(d, ord=norm, axis=axis, keepdims=keepdims)\n\n assert_eq(a_r, d_r)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_norm_any_slice_test_norm_any_slice.for_firstaxis_in_range_le.for_secondaxis_in_range_l.assert_eq_a_r_d_r_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_norm_any_slice_test_norm_any_slice.for_firstaxis_in_range_le.for_secondaxis_in_range_l.assert_eq_a_r_d_r_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 907, "end_line": 932, "span_ids": ["test_norm_any_slice"], "tokens": 284}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.slow\n@pytest.mark.parametrize(\n \"shape, chunks\",\n [\n [(5,), (2,)],\n [(5, 3), (2, 2)],\n [(4, 5, 3), (2, 2, 2)],\n [(4, 5, 2, 3), (2, 2, 2, 2)],\n [(2, 5, 2, 4, 3), (2, 2, 2, 2, 2)],\n ],\n)\n@pytest.mark.parametrize(\"norm\", [None, 1, -1, np.inf, -np.inf])\n@pytest.mark.parametrize(\"keepdims\", [False, True])\ndef test_norm_any_slice(shape, chunks, norm, keepdims):\n a = np.random.random(shape)\n d = da.from_array(a, chunks=chunks)\n\n for firstaxis in range(len(shape)):\n for secondaxis in range(len(shape)):\n if firstaxis != secondaxis:\n axis = (firstaxis, secondaxis)\n else:\n axis = firstaxis\n a_r = np.linalg.norm(a, ord=norm, axis=axis, keepdims=keepdims)\n d_r = da.linalg.norm(d, ord=norm, axis=axis, keepdims=keepdims)\n assert_eq(a_r, d_r)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_norm_1dim_test_norm_1dim.assert_eq_a_r_d_r_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_norm_1dim_test_norm_1dim.assert_eq_a_r_d_r_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 935, "end_line": 946, "span_ids": ["test_norm_1dim"], "tokens": 166}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"shape, chunks, axis\", [[(5,), (2,), None], [(5,), (2,), 0], [(5,), (2,), (0,)]]\n)\n@pytest.mark.parametrize(\"norm\", [0, 2, -2, 0.5])\n@pytest.mark.parametrize(\"keepdims\", [False, True])\ndef test_norm_1dim(shape, chunks, axis, norm, keepdims):\n a = np.random.random(shape)\n d = da.from_array(a, chunks=chunks)\n\n a_r = np.linalg.norm(a, ord=norm, axis=axis, keepdims=keepdims)\n d_r = da.linalg.norm(d, ord=norm, axis=axis, keepdims=keepdims)\n assert_eq(a_r, d_r)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_norm_2dim_test_norm_2dim.assert_eq_a_r_d_r_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_norm_2dim_test_norm_2dim.assert_eq_a_r_d_r_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 949, "end_line": 966, "span_ids": ["test_norm_2dim"], "tokens": 234}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"shape, chunks, axis\",\n [[(5, 6), (2, 2), None], [(5, 6), (2, 2), (0, 1)], [(5, 6), (2, 2), (1, 0)]],\n)\n@pytest.mark.parametrize(\"norm\", [\"fro\", \"nuc\", 2, -2])\n@pytest.mark.parametrize(\"keepdims\", [False, True])\ndef test_norm_2dim(shape, chunks, axis, norm, keepdims):\n a = np.random.random(shape)\n d = da.from_array(a, chunks=chunks)\n\n # Need one chunk on last dimension for svd.\n if norm == \"nuc\" or norm == 2 or norm == -2:\n d = d.rechunk({-1: -1})\n\n a_r = np.linalg.norm(a, ord=norm, axis=axis, keepdims=keepdims)\n d_r = da.linalg.norm(d, ord=norm, axis=axis, keepdims=keepdims)\n\n assert_eq(a_r, d_r)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_norm_implemented_errors_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_norm_implemented_errors_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 969, "end_line": 981, "span_ids": ["test_norm_implemented_errors"], "tokens": 180}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"shape, chunks, axis\",\n [[(3, 2, 4), (2, 2, 2), (1, 2)], [(2, 3, 4, 5), (2, 2, 2, 2), (-1, -2)]],\n)\n@pytest.mark.parametrize(\"norm\", [\"nuc\", 2, -2])\n@pytest.mark.parametrize(\"keepdims\", [False, True])\ndef test_norm_implemented_errors(shape, chunks, axis, norm, keepdims):\n a = np.random.random(shape)\n d = da.from_array(a, chunks=chunks)\n if len(shape) > 2 and len(axis) == 2:\n with pytest.raises(NotImplementedError):\n da.linalg.norm(d, ord=norm, axis=axis, keepdims=keepdims)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linearoperator.py_pytest_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linearoperator.py_pytest_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linearoperator.py", "file_name": "test_linearoperator.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 31, "span_ids": ["imports", "test_LinearOperator"], "tokens": 238}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\n\npytest.importorskip(\"scipy\")\n\nimport numpy as np\nimport dask.array as da\nimport scipy.sparse.linalg\n\n\ndef test_LinearOperator():\n X = np.random.random(size=(3, 2))\n y = np.random.random(size=(2, 1))\n w = np.random.random(size=(3, 1))\n square = np.random.random(size=(2, 2))\n\n dX = da.from_array(X, chunks=(2, 1))\n\n npLO = scipy.sparse.linalg.aslinearoperator(X)\n daLO = scipy.sparse.linalg.interface.MatrixLinearOperator(dX)\n\n functions = [lambda x, y: x.matvec(y), lambda x, y: x * y, lambda x, y: x.dot(y)]\n for func in functions:\n assert np.allclose(func(npLO, y), func(daLO, y))\n\n assert np.allclose(npLO.matmat(square), daLO.matmat(square))\n\n assert np.allclose(npLO.rmatvec(w), daLO.rmatvec(w))\n\n assert npLO.dtype == daLO.dtype\n assert npLO.shape == daLO.shape", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_random_test_tokenize_masked_array.None_4": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_random_test_tokenize_masked_array.None_4", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_masked.py", "file_name": "test_masked.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 23, "span_ids": ["imports", "test_tokenize_masked_array"], "tokens": 199}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import random\nfrom itertools import product\n\nimport numpy as np\nimport pytest\n\nimport dask.array as da\nfrom dask.base import tokenize\nfrom dask.array.utils import assert_eq\nfrom copy import deepcopy\n\npytest.importorskip(\"dask.array.ma\")\n\n\ndef test_tokenize_masked_array():\n m = np.ma.masked_array([1, 2, 3], mask=[True, True, False], fill_value=10)\n m2 = np.ma.masked_array([1, 2, 3], mask=[True, True, False], fill_value=0)\n m3 = np.ma.masked_array([1, 2, 3], mask=False, fill_value=10)\n assert tokenize(m) == tokenize(m)\n assert tokenize(m2) == tokenize(m2)\n assert tokenize(m3) == tokenize(m3)\n assert tokenize(m) != tokenize(m2)\n assert tokenize(m) != tokenize(m3)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_from_array_masked_array_test_copy_deepcopy.assert_isinstance_y2_comp": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_from_array_masked_array_test_copy_deepcopy.assert_isinstance_y2_comp", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_masked.py", "file_name": "test_masked.py", "file_type": "text/x-python", "category": "test", "start_line": 26, "end_line": 46, "span_ids": ["test_copy_deepcopy", "test_from_array_masked_array"], "tokens": 204}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_from_array_masked_array():\n m = np.ma.masked_array([1, 2, 3], mask=[True, True, False], fill_value=10)\n dm = da.from_array(m, chunks=(2,), asarray=False)\n assert_eq(dm, m)\n\n\ndef test_copy_deepcopy():\n t = np.ma.masked_array([1, 2], mask=[0, 1])\n x = da.from_array(t, chunks=t.shape, asarray=False)\n # x = da.arange(5, chunks=(2,))\n y = x.copy()\n memo = {}\n y2 = deepcopy(x, memo=memo)\n\n xx = da.ma.masked_where([False, True], [1, 2])\n assert_eq(x, xx)\n\n assert_eq(y, t)\n assert isinstance(y.compute(), np.ma.masked_array)\n assert_eq(y2, t)\n assert isinstance(y2.compute(), np.ma.masked_array)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_functions_functions._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_functions_functions._", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_masked.py", "file_name": "test_masked.py", "file_type": "text/x-python", "category": "test", "start_line": 49, "end_line": 77, "span_ids": ["impl:2"], "tokens": 326}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "functions = [\n lambda x: x,\n lambda x: da.expm1(x),\n lambda x: 2 * x,\n lambda x: x / 2,\n lambda x: x ** 2,\n lambda x: x + x,\n lambda x: x * x,\n lambda x: x[0],\n lambda x: x[:, 1],\n lambda x: x[:1, None, 1:3],\n lambda x: x.T,\n lambda x: da.transpose(x, (1, 2, 0)),\n lambda x: x.sum(),\n lambda x: x.dot(np.arange(x.shape[-1])),\n lambda x: x.dot(np.eye(x.shape[-1])),\n lambda x: da.tensordot(x, np.ones(x.shape[:2]), axes=[(0, 1), (0, 1)]),\n lambda x: x.sum(axis=0),\n lambda x: x.max(axis=0),\n lambda x: x.sum(axis=(1, 2)),\n lambda x: x.astype(np.complex128),\n lambda x: x.map_blocks(lambda x: x * 2),\n lambda x: x.round(1),\n lambda x: x.reshape((x.shape[0] * x.shape[1], x.shape[2])),\n lambda x: abs(x),\n lambda x: x > 0.5,\n lambda x: x.rechunk((4, 4, 4)),\n lambda x: x.rechunk((2, 2, 1)),\n]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_basic_test_basic.if_yy_shape_.assert_isinstance_zz_np_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_basic_test_basic.if_yy_shape_.assert_isinstance_zz_np_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_masked.py", "file_name": "test_masked.py", "file_type": "text/x-python", "category": "test", "start_line": 80, "end_line": 94, "span_ids": ["test_basic"], "tokens": 113}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"func\", functions)\ndef test_basic(func):\n x = da.random.random((2, 3, 4), chunks=(1, 2, 2))\n x[x < 0.4] = 0\n\n y = da.ma.masked_equal(x, 0)\n\n xx = func(x)\n yy = func(y)\n\n assert_eq(xx, da.ma.filled(yy, 0))\n\n if yy.shape:\n zz = yy.compute()\n assert isinstance(zz, np.ma.masked_array)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_tensordot_test_tensordot.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_tensordot_test_tensordot.None_2", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_masked.py", "file_name": "test_masked.py", "file_type": "text/x-python", "category": "test", "start_line": 97, "end_line": 117, "span_ids": ["test_tensordot"], "tokens": 262}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_tensordot():\n x = da.random.random((2, 3, 4), chunks=(1, 2, 2))\n x[x < 0.4] = 0\n y = da.random.random((4, 3, 2), chunks=(2, 2, 1))\n y[y < 0.4] = 0\n\n xx = da.ma.masked_equal(x, 0)\n yy = da.ma.masked_equal(y, 0)\n\n assert_eq(\n da.tensordot(x, y, axes=(2, 0)),\n da.ma.filled(da.tensordot(xx, yy, axes=(2, 0)), 0),\n )\n assert_eq(\n da.tensordot(x, y, axes=(1, 1)),\n da.ma.filled(da.tensordot(xx, yy, axes=(1, 1)), 0),\n )\n assert_eq(\n da.tensordot(x, y, axes=((1, 2), (1, 0))),\n da.ma.filled(da.tensordot(xx, yy, axes=((1, 2), (1, 0))), 0),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_mixed_concatenate_test_mixed_concatenate.assert_eq_dd_ss_check_m": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_mixed_concatenate_test_mixed_concatenate.assert_eq_dd_ss_check_m", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_masked.py", "file_name": "test_masked.py", "file_type": "text/x-python", "category": "test", "start_line": 120, "end_line": 134, "span_ids": ["test_mixed_concatenate"], "tokens": 163}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"func\", functions)\n@pytest.mark.filterwarnings(\"ignore::numpy.ComplexWarning\") # abs() in assert_eq\ndef test_mixed_concatenate(func):\n x = da.random.random((2, 3, 4), chunks=(1, 2, 2))\n y = da.random.random((2, 3, 4), chunks=(1, 2, 2))\n\n y[y < 0.4] = 0\n yy = da.ma.masked_equal(y, 0)\n\n d = da.concatenate([x, y], axis=0)\n s = da.concatenate([x, yy], axis=0)\n\n dd = func(d)\n ss = func(s)\n assert_eq(dd, ss, check_meta=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_mixed_random_test_mixed_random.assert_eq_dd_ss_check_m": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_mixed_random_test_mixed_random.assert_eq_dd_ss_check_m", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_masked.py", "file_name": "test_masked.py", "file_type": "text/x-python", "category": "test", "start_line": 137, "end_line": 149, "span_ids": ["test_mixed_random"], "tokens": 132}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"func\", functions)\n@pytest.mark.filterwarnings(\"ignore::numpy.ComplexWarning\") # abs() in assert_eq\ndef test_mixed_random(func):\n d = da.random.random((4, 3, 4), chunks=(1, 2, 2))\n d[d < 0.4] = 0\n\n fn = lambda x: np.ma.masked_equal(x, 0) if random.random() < 0.5 else x\n s = d.map_blocks(fn)\n\n dd = func(d)\n ss = func(s)\n\n assert_eq(dd, ss, check_meta=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_mixed_output_type_test_mixed_output_type.assert_isinstance_zz_np_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_mixed_output_type_test_mixed_output_type.assert_isinstance_zz_np_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_masked.py", "file_name": "test_masked.py", "file_type": "text/x-python", "category": "test", "start_line": 152, "end_line": 162, "span_ids": ["test_mixed_output_type"], "tokens": 113}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_mixed_output_type():\n y = da.random.random((10, 10), chunks=(5, 5))\n y[y < 0.4] = 0\n\n y = da.ma.masked_equal(y, 0)\n x = da.zeros((10, 1), chunks=(5, 1))\n\n z = da.concatenate([x, y], axis=1)\n assert z.shape == (10, 11)\n zz = z.compute()\n assert isinstance(zz, np.ma.masked_array)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_creation_functions_test_creation_functions.assert_eq_da_ma_fix_inval": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_creation_functions_test_creation_functions.assert_eq_da_ma_fix_inval", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_masked.py", "file_name": "test_masked.py", "file_type": "text/x-python", "category": "test", "start_line": 165, "end_line": 212, "span_ids": ["test_creation_functions"], "tokens": 644}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_creation_functions():\n x = np.array([-2, -1, 0, 1, 2] * 20).reshape((10, 10))\n y = np.array([-2, 0, 1, 1, 0] * 2)\n dx = da.from_array(x, chunks=5)\n dy = da.from_array(y, chunks=4)\n\n sol = np.ma.masked_greater(x, y)\n for (a, b) in product([dx, x], [dy, y]):\n assert_eq(da.ma.masked_greater(a, b), sol)\n\n # These are all the same as masked_greater, just check for correct op\n assert_eq(da.ma.masked_greater(dx, 0), np.ma.masked_greater(x, 0))\n assert_eq(da.ma.masked_greater_equal(dx, 0), np.ma.masked_greater_equal(x, 0))\n assert_eq(da.ma.masked_less(dx, 0), np.ma.masked_less(x, 0))\n assert_eq(da.ma.masked_less_equal(dx, 0), np.ma.masked_less_equal(x, 0))\n assert_eq(da.ma.masked_equal(dx, 0), np.ma.masked_equal(x, 0))\n assert_eq(da.ma.masked_not_equal(dx, 0), np.ma.masked_not_equal(x, 0))\n\n # masked_where\n assert_eq(da.ma.masked_where(False, dx), np.ma.masked_where(False, x))\n assert_eq(da.ma.masked_where(dx > 2, dx), np.ma.masked_where(x > 2, x))\n\n with pytest.raises(IndexError):\n da.ma.masked_where((dx > 2)[:, 0], dx)\n\n assert_eq(da.ma.masked_inside(dx, -1, 1), np.ma.masked_inside(x, -1, 1))\n assert_eq(da.ma.masked_outside(dx, -1, 1), np.ma.masked_outside(x, -1, 1))\n assert_eq(da.ma.masked_values(dx, -1), np.ma.masked_values(x, -1))\n\n # masked_equal and masked_values in numpy sets the fill_value to `value`,\n # which can sometimes be an array. This is hard to support in dask, so we\n # forbid it. Check that this isn't supported:\n with pytest.raises(ValueError):\n da.ma.masked_equal(dx, dy)\n\n with pytest.raises(ValueError):\n da.ma.masked_values(dx, dy)\n\n y = x.astype(\"f8\")\n y[0, 0] = y[7, 5] = np.nan\n dy = da.from_array(y, chunks=5)\n\n assert_eq(da.ma.masked_invalid(dy), np.ma.masked_invalid(y))\n\n my = np.ma.masked_greater(y, 0)\n dmy = da.ma.masked_greater(dy, 0)\n\n assert_eq(da.ma.fix_invalid(dmy, fill_value=0), np.ma.fix_invalid(my, fill_value=0))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_filled_assert_eq_ma.if_res_is_np_ma_masked_.else_.assert_eq_a_b_equal_nan": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_filled_assert_eq_ma.if_res_is_np_ma_masked_.else_.assert_eq_a_b_equal_nan", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_masked.py", "file_name": "test_masked.py", "file_type": "text/x-python", "category": "test", "start_line": 215, "end_line": 236, "span_ids": ["assert_eq_ma", "test_filled"], "tokens": 201}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_filled():\n x = np.array([-2, -1, 0, 1, 2] * 20).reshape((10, 10))\n dx = da.from_array(x, chunks=5)\n\n mx = np.ma.masked_equal(x, 0)\n mdx = da.ma.masked_equal(dx, 0)\n\n assert_eq(da.ma.filled(mdx), np.ma.filled(mx))\n assert_eq(da.ma.filled(mdx, -5), np.ma.filled(mx, -5))\n\n\ndef assert_eq_ma(a, b):\n res = a.compute()\n if res is np.ma.masked:\n assert res is b\n else:\n assert type(res) == type(b)\n if hasattr(res, \"mask\"):\n np.testing.assert_equal(res.mask, b.mask)\n a = da.ma.filled(a)\n b = np.ma.filled(b)\n assert_eq(a, b, equal_nan=True)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_reductions_test_reductions.None_6": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_reductions_test_reductions.None_6", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_masked.py", "file_name": "test_masked.py", "file_type": "text/x-python", "category": "test", "start_line": 239, "end_line": 264, "span_ids": ["test_reductions"], "tokens": 327}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"dtype\", (\"i8\", \"f8\"))\n@pytest.mark.parametrize(\n \"reduction\", [\"sum\", \"prod\", \"mean\", \"var\", \"std\", \"min\", \"max\", \"any\", \"all\"]\n)\ndef test_reductions(dtype, reduction):\n x = (np.random.RandomState(42).rand(11, 11) * 10).astype(dtype)\n dx = da.from_array(x, chunks=(4, 4))\n mx = np.ma.masked_greater(x, 5)\n mdx = da.ma.masked_greater(dx, 5)\n\n dfunc = getattr(da, reduction)\n func = getattr(np, reduction)\n\n assert_eq_ma(dfunc(mdx), func(mx))\n assert_eq_ma(dfunc(mdx, axis=0), func(mx, axis=0))\n assert_eq_ma(dfunc(mdx, keepdims=True, split_every=4), func(mx, keepdims=True))\n assert_eq_ma(dfunc(mdx, axis=0, split_every=2), func(mx, axis=0))\n assert_eq_ma(\n dfunc(mdx, axis=0, keepdims=True, split_every=2),\n func(mx, axis=0, keepdims=True),\n )\n assert_eq_ma(dfunc(mdx, axis=1, split_every=2), func(mx, axis=1))\n assert_eq_ma(\n dfunc(mdx, axis=1, keepdims=True, split_every=2),\n func(mx, axis=1, keepdims=True),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_reductions_allmasked_test_reductions_allmasked.assert_eq_ma_dfunc_dx_f": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_reductions_allmasked_test_reductions_allmasked.assert_eq_ma_dfunc_dx_f", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_masked.py", "file_name": "test_masked.py", "file_type": "text/x-python", "category": "test", "start_line": 267, "end_line": 278, "span_ids": ["test_reductions_allmasked"], "tokens": 119}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"dtype\", (\"i8\", \"f8\"))\n@pytest.mark.parametrize(\n \"reduction\", [\"sum\", \"prod\", \"mean\", \"var\", \"std\", \"min\", \"max\", \"any\", \"all\"]\n)\ndef test_reductions_allmasked(dtype, reduction):\n x = np.ma.masked_array([1, 2], mask=True)\n dx = da.from_array(x, asarray=False)\n\n dfunc = getattr(da, reduction)\n func = getattr(np, reduction)\n\n assert_eq_ma(dfunc(dx), func(x))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_arg_reductions_test_arg_reductions.assert_eq_ma_dfunc_dmx_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_arg_reductions_test_arg_reductions.assert_eq_ma_dfunc_dmx_2", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_masked.py", "file_name": "test_masked.py", "file_type": "text/x-python", "category": "test", "start_line": 281, "end_line": 294, "span_ids": ["test_arg_reductions"], "tokens": 171}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"reduction\", [\"argmin\", \"argmax\"])\ndef test_arg_reductions(reduction):\n x = np.random.random((10, 10, 10))\n dx = da.from_array(x, chunks=(3, 4, 5))\n mx = np.ma.masked_greater(x, 0.4)\n dmx = da.ma.masked_greater(dx, 0.4)\n\n dfunc = getattr(da, reduction)\n func = getattr(np, reduction)\n\n assert_eq_ma(dfunc(dmx), func(mx))\n assert_eq_ma(dfunc(dmx, 0), func(mx, 0))\n assert_eq_ma(dfunc(dmx, 1), func(mx, 1))\n assert_eq_ma(dfunc(dmx, 2), func(mx, 2))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_cumulative_test_cumulative.for_axis_in_0_1_2_.assert_eq_ma_dmx_cumprod_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_cumulative_test_cumulative.for_axis_in_0_1_2_.assert_eq_ma_dmx_cumprod_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_masked.py", "file_name": "test_masked.py", "file_type": "text/x-python", "category": "test", "start_line": 297, "end_line": 305, "span_ids": ["test_cumulative"], "tokens": 125}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_cumulative():\n x = np.random.RandomState(0).rand(20, 24, 13)\n dx = da.from_array(x, chunks=(6, 5, 4))\n mx = np.ma.masked_greater(x, 0.4)\n dmx = da.ma.masked_greater(dx, 0.4)\n\n for axis in [0, 1, 2]:\n assert_eq_ma(dmx.cumsum(axis=axis), mx.cumsum(axis=axis))\n assert_eq_ma(dmx.cumprod(axis=axis), mx.cumprod(axis=axis))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_accessors_test_accessors.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_accessors_test_accessors.None_3", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_masked.py", "file_name": "test_masked.py", "file_type": "text/x-python", "category": "test", "start_line": 308, "end_line": 317, "span_ids": ["test_accessors"], "tokens": 129}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_accessors():\n x = np.random.random((10, 10))\n dx = da.from_array(x, chunks=(3, 4))\n mx = np.ma.masked_greater(x, 0.4)\n dmx = da.ma.masked_greater(dx, 0.4)\n\n assert_eq(da.ma.getmaskarray(dmx), np.ma.getmaskarray(mx))\n assert_eq(da.ma.getmaskarray(dx), np.ma.getmaskarray(x))\n assert_eq(da.ma.getdata(dmx), np.ma.getdata(mx))\n assert_eq(da.ma.getdata(dx), np.ma.getdata(x))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_masked_array_test_masked_array.with_pytest_raises_np_ma_.da_ma_masked_array_dx_ma": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_masked_array_test_masked_array.with_pytest_raises_np_ma_.da_ma_masked_array_dx_ma", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_masked.py", "file_name": "test_masked.py", "file_type": "text/x-python", "category": "test", "start_line": 320, "end_line": 347, "span_ids": ["test_masked_array"], "tokens": 303}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_masked_array():\n x = np.random.random((10, 10)).astype(\"f4\")\n dx = da.from_array(x, chunks=(3, 4))\n f1 = da.from_array(np.array(1), chunks=())\n\n fill_values = [(None, None), (0.5, 0.5), (1, f1)]\n for data, (df, f) in product([x, dx], fill_values):\n assert_eq(\n da.ma.masked_array(data, fill_value=df), np.ma.masked_array(x, fill_value=f)\n )\n assert_eq(\n da.ma.masked_array(data, mask=data > 0.4, fill_value=df),\n np.ma.masked_array(x, mask=x > 0.4, fill_value=f),\n )\n assert_eq(\n da.ma.masked_array(data, mask=data > 0.4, fill_value=df),\n np.ma.masked_array(x, mask=x > 0.4, fill_value=f),\n )\n assert_eq(\n da.ma.masked_array(data, fill_value=df, dtype=\"f8\"),\n np.ma.masked_array(x, fill_value=f, dtype=\"f8\"),\n )\n\n with pytest.raises(ValueError):\n da.ma.masked_array(dx, fill_value=dx)\n\n with pytest.raises(np.ma.MaskError):\n da.ma.masked_array(dx, mask=dx[:3, :3])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_set_fill_value_test_set_fill_value.with_pytest_raises_ValueE.da_ma_set_fill_value_dmx_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_set_fill_value_test_set_fill_value.with_pytest_raises_ValueE.da_ma_set_fill_value_dmx_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_masked.py", "file_name": "test_masked.py", "file_type": "text/x-python", "category": "test", "start_line": 350, "end_line": 368, "span_ids": ["test_set_fill_value"], "tokens": 165}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_set_fill_value():\n x = np.random.randint(0, 10, (10, 10))\n dx = da.from_array(x, chunks=(3, 4))\n mx = np.ma.masked_greater(x, 3)\n dmx = da.ma.masked_greater(dx, 3)\n\n da.ma.set_fill_value(dmx, -10)\n np.ma.set_fill_value(mx, -10)\n assert_eq_ma(dmx, mx)\n\n da.ma.set_fill_value(dx, -10)\n np.ma.set_fill_value(x, -10)\n assert_eq_ma(dx, x)\n\n with pytest.raises(TypeError):\n da.ma.set_fill_value(dmx, 1e20)\n\n with pytest.raises(ValueError):\n da.ma.set_fill_value(dmx, dx)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_average_weights_with_masked_array_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_average_weights_with_masked_array_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_masked.py", "file_name": "test_masked.py", "file_type": "text/x-python", "category": "test", "start_line": 371, "end_line": 396, "span_ids": ["test_arithmetic_results_in_masked", "test_average_weights_with_masked_array"], "tokens": 252}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_average_weights_with_masked_array():\n mask = np.array([[True, False], [True, True], [False, True]])\n data = np.arange(6).reshape((3, 2))\n a = np.ma.array(data, mask=mask)\n d_a = da.ma.masked_array(data=data, mask=mask, chunks=2)\n\n weights = np.array([0.25, 0.75])\n d_weights = da.from_array(weights, chunks=2)\n\n np_avg = np.ma.average(a, weights=weights, axis=1)\n da_avg = da.ma.average(d_a, weights=d_weights, axis=1)\n\n assert_eq(np_avg, da_avg)\n\n\ndef test_arithmetic_results_in_masked():\n mask = np.array([[True, False], [True, True], [False, True]])\n x = np.arange(6).reshape((3, 2))\n masked = np.ma.array(x, mask=mask)\n dx = da.from_array(x, chunks=(2, 2))\n\n res = dx + masked\n sol = x + masked\n assert_eq(res, sol)\n assert isinstance(res.compute(), np.ma.masked_array)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_numpy_compat.py_pytest_test_slice_dtype.assert_result_expected": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_numpy_compat.py_pytest_test_slice_dtype.assert_result_expected", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_numpy_compat.py", "file_name": "test_numpy_compat.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 37, "span_ids": ["imports", "test_slice_dtype", "test_basic", "index", "dtype"], "tokens": 299}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\nimport numpy as np\n\nimport dask.array as da\nfrom dask.array.numpy_compat import _make_sliced_dtype\nfrom dask.array.utils import assert_eq\n\n\n@pytest.fixture(\n params=[\n [(\"A\", (\"f4\", (3, 2))), (\"B\", (\"f4\", 3)), (\"C\", (\"f8\", 3))],\n [(\"A\", (\"i4\", (3, 2))), (\"B\", (\"f4\", 3)), (\"C\", (\"S4\", 3))],\n ]\n)\ndef dtype(request):\n return np.dtype(request.param)\n\n\n@pytest.fixture(params=[[\"A\"], [\"A\", \"B\"], [\"A\", \"B\", \"C\"]])\ndef index(request):\n return request.param\n\n\ndef test_basic():\n # sanity check\n dtype = [(\"a\", \"f8\"), (\"b\", \"f8\"), (\"c\", \"f8\")]\n x = np.ones((5, 3), dtype=dtype)\n dx = da.ones((5, 3), dtype=dtype, chunks=3)\n result = dx[[\"a\", \"b\"]]\n expected = x[[\"a\", \"b\"]]\n assert_eq(result, expected)\n\n\ndef test_slice_dtype(dtype, index):\n result = _make_sliced_dtype(dtype, index)\n expected = np.ones((5, len(dtype)), dtype=dtype)[index].dtype\n assert result == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_numpy_compat.py_test_min_max_round_funcs_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_numpy_compat.py_test_min_max_round_funcs_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_numpy_compat.py", "file_name": "test_numpy_compat.py", "file_type": "text/x-python", "category": "test", "start_line": 40, "end_line": 48, "span_ids": ["test_min_max_round_funcs"], "tokens": 112}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_min_max_round_funcs():\n # Regression test for gh-5031\n image = da.from_array(np.array([[0, 1], [1, 2]]), chunks=(1, 2))\n # These use __array_function__ (and min/max/round are aliased,\n # to amin/amax/round_ in numpy)\n assert int(np.min(image)) == 0\n assert int(np.max(image)) == 2\n assert np.round(image)[1, 1] == 2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_pytest_test_fuse_getitem.for_inp_expected_in_pair.assert_result_y_ex": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_pytest_test_fuse_getitem.for_inp_expected_in_pair.assert_result_y_ex", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 101, "span_ids": ["test_fuse_getitem", "imports"], "tokens": 968}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\n\npytest.importorskip(\"numpy\")\n\nimport numpy as np\nimport dask\nimport dask.array as da\nfrom dask.highlevelgraph import HighLevelGraph\nfrom dask.optimization import fuse\nfrom dask.utils import SerializableLock\nfrom dask.array.core import getter, getter_nofancy\nfrom dask.array.optimization import getitem, optimize, optimize_slices, fuse_slice\nfrom dask.array.utils import assert_eq\n\n\ndef test_fuse_getitem():\n pairs = [\n (\n (getter, (getter, \"x\", slice(1000, 2000)), slice(15, 20)),\n (getter, \"x\", slice(1015, 1020)),\n ),\n (\n (\n getitem,\n (getter, \"x\", (slice(1000, 2000), slice(100, 200))),\n (slice(15, 20), slice(50, 60)),\n ),\n (getter, \"x\", (slice(1015, 1020), slice(150, 160))),\n ),\n (\n (\n getitem,\n (getter_nofancy, \"x\", (slice(1000, 2000), slice(100, 200))),\n (slice(15, 20), slice(50, 60)),\n ),\n (getter_nofancy, \"x\", (slice(1015, 1020), slice(150, 160))),\n ),\n ((getter, (getter, \"x\", slice(1000, 2000)), 10), (getter, \"x\", 1010)),\n (\n (getitem, (getter, \"x\", (slice(1000, 2000), 10)), (slice(15, 20),)),\n (getter, \"x\", (slice(1015, 1020), 10)),\n ),\n (\n (getitem, (getter_nofancy, \"x\", (slice(1000, 2000), 10)), (slice(15, 20),)),\n (getter_nofancy, \"x\", (slice(1015, 1020), 10)),\n ),\n (\n (getter, (getter, \"x\", (10, slice(1000, 2000))), (slice(15, 20),)),\n (getter, \"x\", (10, slice(1015, 1020))),\n ),\n (\n (\n getter,\n (getter, \"x\", (slice(1000, 2000), slice(100, 200))),\n (slice(None, None), slice(50, 60)),\n ),\n (getter, \"x\", (slice(1000, 2000), slice(150, 160))),\n ),\n (\n (getter, (getter, \"x\", (None, slice(None, None))), (slice(None, None), 5)),\n (getter, \"x\", (None, 5)),\n ),\n (\n (\n getter,\n (getter, \"x\", (slice(1000, 2000), slice(10, 20))),\n (slice(5, 10),),\n ),\n (getter, \"x\", (slice(1005, 1010), slice(10, 20))),\n ),\n (\n (\n getitem,\n (getitem, \"x\", (slice(1000, 2000),)),\n (slice(5, 10), slice(10, 20)),\n ),\n (getitem, \"x\", (slice(1005, 1010), slice(10, 20))),\n ),\n (\n (getter, (getter, \"x\", slice(1000, 2000), False, False), slice(15, 20)),\n (getter, \"x\", slice(1015, 1020)),\n ),\n (\n (getter, (getter, \"x\", slice(1000, 2000)), slice(15, 20), False, False),\n (getter, \"x\", slice(1015, 1020)),\n ),\n (\n (\n getter,\n (getter_nofancy, \"x\", slice(1000, 2000), False, False),\n slice(15, 20),\n False,\n False,\n ),\n (getter_nofancy, \"x\", slice(1015, 1020), False, False),\n ),\n ]\n\n for inp, expected in pairs:\n result = optimize_slices({\"y\": inp})\n assert result == {\"y\": expected}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_fuse_getitem_lock_test_fuse_getitem_lock.for_inp_expected_in_pair.assert_result_y_ex": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_fuse_getitem_lock_test_fuse_getitem_lock.for_inp_expected_in_pair.assert_result_y_ex", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 103, "end_line": 154, "span_ids": ["test_fuse_getitem_lock"], "tokens": 386}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fuse_getitem_lock():\n lock1 = SerializableLock()\n lock2 = SerializableLock()\n\n pairs = [\n (\n (getter, (getter, \"x\", slice(1000, 2000), True, lock1), slice(15, 20)),\n (getter, \"x\", slice(1015, 1020), True, lock1),\n ),\n (\n (\n getitem,\n (getter, \"x\", (slice(1000, 2000), slice(100, 200)), True, lock1),\n (slice(15, 20), slice(50, 60)),\n ),\n (getter, \"x\", (slice(1015, 1020), slice(150, 160)), True, lock1),\n ),\n (\n (\n getitem,\n (\n getter_nofancy,\n \"x\",\n (slice(1000, 2000), slice(100, 200)),\n True,\n lock1,\n ),\n (slice(15, 20), slice(50, 60)),\n ),\n (getter_nofancy, \"x\", (slice(1015, 1020), slice(150, 160)), True, lock1),\n ),\n (\n (\n getter,\n (getter, \"x\", slice(1000, 2000), True, lock1),\n slice(15, 20),\n True,\n lock2,\n ),\n (\n getter,\n (getter, \"x\", slice(1000, 2000), True, lock1),\n slice(15, 20),\n True,\n lock2,\n ),\n ),\n ]\n\n for inp, expected in pairs:\n result = optimize_slices({\"y\": inp})\n assert result == {\"y\": expected}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_optimize_with_getitem_fusion_test_optimize_with_getitem_fusion.assert_len_result_len_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_optimize_with_getitem_fusion_test_optimize_with_getitem_fusion.assert_len_result_len_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 157, "end_line": 167, "span_ids": ["test_optimize_with_getitem_fusion"], "tokens": 124}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_optimize_with_getitem_fusion():\n dsk = {\n \"a\": \"some-array\",\n \"b\": (getter, \"a\", (slice(10, 20), slice(100, 200))),\n \"c\": (getter, \"b\", (5, slice(50, 60))),\n }\n\n result = optimize(dsk, [\"c\"])\n expected_task = (getter, \"some-array\", (15, slice(150, 160)))\n assert any(v == expected_task for v in result.values())\n assert len(result) < len(dsk)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_optimize_slicing_test_optimize_slicing.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_optimize_slicing_test_optimize_slicing.None_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 170, "end_line": 191, "span_ids": ["test_optimize_slicing"], "tokens": 256}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_optimize_slicing():\n dsk = {\n \"a\": (range, 10),\n \"b\": (getter, \"a\", (slice(None, None, None),)),\n \"c\": (getter, \"b\", (slice(None, None, None),)),\n \"d\": (getter, \"c\", (slice(0, 5, None),)),\n \"e\": (getter, \"d\", (slice(None, None, None),)),\n }\n\n expected = {\"e\": (getter, (range, 10), (slice(0, 5, None),))}\n result = optimize_slices(fuse(dsk, [], rename_keys=False)[0])\n assert result == expected\n\n # protect output keys\n expected = {\n \"c\": (getter, (range, 10), (slice(0, None, None),)),\n \"d\": (getter, \"c\", (slice(0, 5, None),)),\n \"e\": (getter, \"d\", (slice(None, None, None),)),\n }\n result = optimize_slices(fuse(dsk, [\"c\", \"d\", \"e\"], rename_keys=False)[0])\n\n assert result == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_fuse_slice_test_fuse_slice.None_1.fuse_slice_None_np_array": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_fuse_slice_test_fuse_slice.None_1.fuse_slice_None_np_array", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 194, "end_line": 218, "span_ids": ["test_fuse_slice"], "tokens": 232}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fuse_slice():\n assert fuse_slice(slice(10, 15), slice(0, 5, 2)) == slice(10, 15, 2)\n\n assert fuse_slice((slice(100, 200),), (None, slice(10, 20))) == (\n None,\n slice(110, 120),\n )\n assert fuse_slice((slice(100, 200),), (slice(10, 20), None)) == (\n slice(110, 120),\n None,\n )\n assert fuse_slice((1,), (None,)) == (1, None)\n assert fuse_slice((1, slice(10, 20)), (None, None, 3, None)) == (\n 1,\n None,\n None,\n 13,\n None,\n )\n\n with pytest.raises(NotImplementedError):\n fuse_slice(slice(10, 15, 2), -1)\n # Regression test for #3076\n with pytest.raises(NotImplementedError):\n fuse_slice(None, np.array([0, 0]))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_fuse_slice_with_lists_test_fuse_slice_with_lists.None_6": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_fuse_slice_with_lists_test_fuse_slice_with_lists.None_6", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 221, "end_line": 232, "span_ids": ["test_fuse_slice_with_lists"], "tokens": 288}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fuse_slice_with_lists():\n assert fuse_slice(slice(10, 20, 2), [1, 2, 3]) == [12, 14, 16]\n assert fuse_slice([10, 20, 30, 40, 50], [3, 1, 2]) == [40, 20, 30]\n assert fuse_slice([10, 20, 30, 40, 50], 3) == 40\n assert fuse_slice([10, 20, 30, 40, 50], -1) == 50\n assert fuse_slice([10, 20, 30, 40, 50], slice(1, None, 2)) == [20, 40]\n assert fuse_slice(\n (slice(None), slice(0, 10), [1, 2, 3]), (slice(None), slice(1, 5), slice(None))\n ) == (slice(0, None), slice(1, 5), [1, 2, 3])\n assert fuse_slice(\n (slice(None), slice(None), [1, 2, 3]), (slice(None), slice(1, 5), 1)\n ) == (slice(0, None), slice(1, 5), 2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_nonfusible_fancy_indexing_test_nonfusible_fancy_indexing.for_a_b_in_cases_.with_pytest_raises_NotImp.fuse_slice_a_b_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_nonfusible_fancy_indexing_test_nonfusible_fancy_indexing.for_a_b_in_cases_.with_pytest_raises_NotImp.fuse_slice_a_b_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 235, "end_line": 247, "span_ids": ["test_nonfusible_fancy_indexing"], "tokens": 153}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_nonfusible_fancy_indexing():\n nil = slice(None)\n cases = [ # x[:, list, :][int, :, :]\n ((nil, [1, 2, 3], nil), (0, nil, nil)),\n # x[int, :, :][:, list, :]\n ((0, nil, nil), (nil, [1, 2, 3], nil)),\n # x[:, list, :, :][:, :, :, int]\n ((nil, [1, 2], nil, nil), (nil, nil, nil, 0)),\n ]\n\n for a, b in cases:\n with pytest.raises(NotImplementedError):\n fuse_slice(a, b)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_hard_fuse_slice_cases_test_dont_fuse_numpy_arrays.for_chunks_in_5_10_.assert_sum_isinstance_v_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_hard_fuse_slice_cases_test_dont_fuse_numpy_arrays.for_chunks_in_5_10_.assert_sum_isinstance_v_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 250, "end_line": 263, "span_ids": ["test_hard_fuse_slice_cases", "test_dont_fuse_numpy_arrays"], "tokens": 152}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_hard_fuse_slice_cases():\n dsk = {\n \"x\": (getter, (getter, \"x\", (None, slice(None, None))), (slice(None, None), 5))\n }\n assert optimize_slices(dsk) == {\"x\": (getter, \"x\", (None, 5))}\n\n\ndef test_dont_fuse_numpy_arrays():\n x = np.ones(10)\n for chunks in [(5,), (10,)]:\n y = da.from_array(x, chunks=(10,))\n\n dsk = y.__dask_optimize__(y.dask, y.__dask_keys__())\n assert sum(isinstance(v, np.ndarray) for v in dsk.values()) == 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_minimize_data_transfer_test_minimize_data_transfer.for_dep_in_deps_.assert_dsk_dep_1_big": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_minimize_data_transfer_test_minimize_data_transfer.for_dep_in_deps_.assert_dsk_dep_1_big", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 266, "end_line": 282, "span_ids": ["test_minimize_data_transfer"], "tokens": 177}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_minimize_data_transfer():\n zarr = pytest.importorskip(\"zarr\")\n x = zarr.ones((100,))\n y = da.from_array(x, chunks=25)\n z = y + 1\n dsk = z.__dask_optimize__(z.dask, z.__dask_keys__())\n\n keys = list(dsk)\n results = dask.get(dsk, keys)\n big_key = [k for k, r in zip(keys, results) if r is x][0]\n dependencies, dependents = dask.core.get_deps(dsk)\n deps = dependents[big_key]\n\n assert len(deps) == 4\n for dep in deps:\n assert dsk[dep][0] in (getitem, getter)\n assert dsk[dep][1] == big_key", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_fuse_slices_with_alias_test_fuse_slices_with_alias.assert_dsk2_fused_key_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_fuse_slices_with_alias_test_fuse_slices_with_alias.assert_dsk2_fused_key_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 285, "end_line": 296, "span_ids": ["test_fuse_slices_with_alias"], "tokens": 190}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fuse_slices_with_alias():\n dsk = {\n \"x\": np.arange(16).reshape((4, 4)),\n (\"dx\", 0, 0): (getter, \"x\", (slice(0, 4), slice(0, 4))),\n (\"alias\", 0, 0): (\"dx\", 0, 0),\n (\"dx2\", 0): (getitem, (\"alias\", 0, 0), (slice(None), 0)),\n }\n keys = [(\"dx2\", 0)]\n dsk2 = optimize(dsk, keys)\n assert len(dsk2) == 3\n fused_key = set(dsk2).difference([\"x\", (\"dx2\", 0)]).pop()\n assert dsk2[fused_key] == (getter, \"x\", (slice(0, 4), 0))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_dont_fuse_fancy_indexing_in_getter_nofancy_test_dont_fuse_fancy_indexing_in_getter_nofancy.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_dont_fuse_fancy_indexing_in_getter_nofancy_test_dont_fuse_fancy_indexing_in_getter_nofancy.None_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 299, "end_line": 310, "span_ids": ["test_dont_fuse_fancy_indexing_in_getter_nofancy"], "tokens": 132}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_dont_fuse_fancy_indexing_in_getter_nofancy():\n dsk = {\n \"a\": (\n getitem,\n (getter_nofancy, \"x\", (slice(10, 20, None), slice(100, 200, None))),\n ([1, 3], slice(50, 60, None)),\n )\n }\n assert optimize_slices(dsk) == dsk\n\n dsk = {\"a\": (getitem, (getter_nofancy, \"x\", [1, 2, 3]), 0)}\n assert optimize_slices(dsk) == dsk", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_fuse_getter_with_asarray_test_fuse_getter_with_asarray.assert_eq_z_x_1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_fuse_getter_with_asarray_test_fuse_getter_with_asarray.assert_eq_z_x_1_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 313, "end_line": 331, "span_ids": ["test_fuse_getter_with_asarray"], "tokens": 215}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"chunks\", [10, 5, 3])\ndef test_fuse_getter_with_asarray(chunks):\n x = np.ones(10) * 1234567890\n y = da.ones(10, chunks=chunks)\n z = x + y\n dsk = z.__dask_optimize__(z.dask, z.__dask_keys__())\n assert any(v is x for v in dsk.values())\n for v in dsk.values():\n s = str(v)\n assert s.count(\"getitem\") + s.count(\"getter\") <= 1\n if v is not x:\n assert \"1234567890\" not in s\n n_getters = len([v for v in dsk.values() if v[0] in (getitem, getter)])\n if y.npartitions > 1:\n assert n_getters == y.npartitions\n else:\n assert n_getters == 0\n\n assert_eq(z, x + 1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_remove_no_op_slices_if_get_is_not_getter_or_getter_nofancy_test_remove_no_op_slices_if_get_is_not_getter_or_getter_nofancy.for_orig_final_in_opts_.assert_optimize_slices_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_remove_no_op_slices_if_get_is_not_getter_or_getter_nofancy_test_remove_no_op_slices_if_get_is_not_getter_or_getter_nofancy.for_orig_final_in_opts_.assert_optimize_slices_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 334, "end_line": 357, "span_ids": ["test_remove_no_op_slices_if_get_is_not_getter_or_getter_nofancy"], "tokens": 264}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"get,remove\", [(getter, False), (getter_nofancy, False), (getitem, True)]\n)\ndef test_remove_no_op_slices_if_get_is_not_getter_or_getter_nofancy(get, remove):\n # Test that no-op slices are removed as long as get is not getter or\n # getter_nofancy. This ensures that `get` calls are always made in all\n # tasks created by `from_array`, even after optimization\n null = slice(0, None)\n opts = [\n (\n (get, \"x\", null, False, False),\n \"x\" if remove else (get, \"x\", null, False, False),\n ),\n (\n (getitem, (get, \"x\", null, False, False), null),\n \"x\" if remove else (get, \"x\", null, False, False),\n ),\n (\n (getitem, (get, \"x\", (null, null), False, False), ()),\n \"x\" if remove else (get, \"x\", (null, null), False, False),\n ),\n ]\n for orig, final in opts:\n assert optimize_slices({\"a\": orig}) == {\"a\": final}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_turn_off_fusion_test_turn_off_fusion.assert_len_a_len_b_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_turn_off_fusion_test_turn_off_fusion.assert_len_a_len_b_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 360, "end_line": 371, "span_ids": ["test_turn_off_fusion"], "tokens": 150}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.xfail(reason=\"blockwise fusion does not respect this, which is ok\")\ndef test_turn_off_fusion():\n x = da.ones(10, chunks=(5,))\n y = da.sum(x + 1 + 2 + 3)\n\n a = y.__dask_optimize__(y.dask, y.__dask_keys__())\n\n with dask.config.set({\"optimization.fuse.ave-width\": 0}):\n b = y.__dask_optimize__(y.dask, y.__dask_keys__())\n\n assert dask.get(a, y.__dask_keys__()) == dask.get(b, y.__dask_keys__())\n assert len(a) < len(b)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_gh3937_test_gh3937.y_compute_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_gh3937_test_gh3937.y_compute_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 374, "end_line": 383, "span_ids": ["test_gh3937"], "tokens": 115}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_gh3937():\n # test for github issue #3937\n x = da.from_array([1, 2, 3.0], (2,))\n x = da.concatenate((x, [x[-1]]))\n y = x.rechunk((2,))\n # This will produce Integral type indices that are not ints (np.int64), failing\n # the optimizer\n y = da.coarsen(np.sum, y, {0: 2})\n # How to trigger the optimizer explicitly?\n y.compute()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_double_dependencies_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_double_dependencies_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 386, "end_line": 403, "span_ids": ["test_double_dependencies", "test_fuse_roots"], "tokens": 170}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_double_dependencies():\n x = np.arange(56).reshape((7, 8))\n d = da.from_array(x, chunks=(4, 4))\n X = d + 1\n X = da.dot(X, X.T)\n\n assert_eq(X.compute(optimize_graph=False), X)\n\n\ndef test_fuse_roots():\n x = da.ones(10, chunks=(2,))\n y = da.zeros(10, chunks=(2,))\n z = (x + 1) + (2 * y ** 2)\n (zz,) = dask.optimize(z)\n # assert len(zz.dask) == 5\n assert sum(map(dask.istask, zz.dask.values())) == 5 # there are some aliases\n assert_eq(zz, z)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_pytest_test_fractional_slice.assert_isinstance_fs_1_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_pytest_test_fractional_slice.assert_isinstance_fs_1_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 40, "span_ids": ["imports", "test_fractional_slice"], "tokens": 285}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\n\npytest.importorskip(\"numpy\")\n\nimport numpy as np\nfrom numpy.testing import assert_array_almost_equal, assert_array_equal\n\nimport dask.array as da\nfrom dask.array.overlap import (\n fractional_slice,\n getitem,\n trim_internal,\n overlap_internal,\n nearest,\n constant,\n boundaries,\n reflect,\n periodic,\n overlap,\n)\nfrom dask.array.utils import assert_eq, same_keys\n\n\ndef test_fractional_slice():\n assert fractional_slice((\"x\", 4.9), {0: 2}) == (getitem, (\"x\", 5), (slice(0, 2),))\n\n assert fractional_slice((\"x\", 3, 5.1), {0: 2, 1: 3}) == (\n getitem,\n (\"x\", 3, 5),\n (slice(None, None, None), slice(-3, None)),\n )\n\n assert fractional_slice((\"x\", 2.9, 5.1), {0: 2, 1: 3}) == (\n getitem,\n (\"x\", 3, 5),\n (slice(0, 2), slice(-3, None)),\n )\n\n fs = fractional_slice((\"x\", 4.9), {0: 2})\n assert isinstance(fs[1][1], int)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_overlap_internal_test_overlap_internal.assert_same_keys_overlap_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_overlap_internal_test_overlap_internal.assert_same_keys_overlap_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 43, "end_line": 69, "span_ids": ["test_overlap_internal"], "tokens": 496}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_overlap_internal():\n x = np.arange(64).reshape((8, 8))\n d = da.from_array(x, chunks=(4, 4))\n\n g = overlap_internal(d, {0: 2, 1: 1})\n result = g.compute(scheduler=\"sync\")\n assert g.chunks == ((6, 6), (5, 5))\n\n expected = np.array(\n [\n [0, 1, 2, 3, 4, 3, 4, 5, 6, 7],\n [8, 9, 10, 11, 12, 11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20, 19, 20, 21, 22, 23],\n [24, 25, 26, 27, 28, 27, 28, 29, 30, 31],\n [32, 33, 34, 35, 36, 35, 36, 37, 38, 39],\n [40, 41, 42, 43, 44, 43, 44, 45, 46, 47],\n [16, 17, 18, 19, 20, 19, 20, 21, 22, 23],\n [24, 25, 26, 27, 28, 27, 28, 29, 30, 31],\n [32, 33, 34, 35, 36, 35, 36, 37, 38, 39],\n [40, 41, 42, 43, 44, 43, 44, 45, 46, 47],\n [48, 49, 50, 51, 52, 51, 52, 53, 54, 55],\n [56, 57, 58, 59, 60, 59, 60, 61, 62, 63],\n ]\n )\n\n assert_eq(result, expected)\n assert same_keys(overlap_internal(d, {0: 2, 1: 1}), g)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_overlap_internal_asymmetric_test_overlap_internal_asymmetric.assert_same_keys_overlap_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_overlap_internal_asymmetric_test_overlap_internal_asymmetric.assert_same_keys_overlap_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 72, "end_line": 94, "span_ids": ["test_overlap_internal_asymmetric"], "tokens": 409}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_overlap_internal_asymmetric():\n x = np.arange(64).reshape((8, 8))\n d = da.from_array(x, chunks=(4, 4))\n\n result = overlap_internal(d, {0: (2, 0), 1: (1, 0)})\n assert result.chunks == ((4, 6), (4, 5))\n\n expected = np.array(\n [\n [0, 1, 2, 3, 3, 4, 5, 6, 7],\n [8, 9, 10, 11, 11, 12, 13, 14, 15],\n [16, 17, 18, 19, 19, 20, 21, 22, 23],\n [24, 25, 26, 27, 27, 28, 29, 30, 31],\n [16, 17, 18, 19, 19, 20, 21, 22, 23],\n [24, 25, 26, 27, 27, 28, 29, 30, 31],\n [32, 33, 34, 35, 35, 36, 37, 38, 39],\n [40, 41, 42, 43, 43, 44, 45, 46, 47],\n [48, 49, 50, 51, 51, 52, 53, 54, 55],\n [56, 57, 58, 59, 59, 60, 61, 62, 63],\n ]\n )\n assert_eq(result, expected)\n assert same_keys(overlap_internal(d, {0: (2, 0), 1: (1, 0)}), result)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_overlap_internal_asymmetric_small_test_overlap_internal_asymmetric_small.assert_same_keys_overlap_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_overlap_internal_asymmetric_small_test_overlap_internal_asymmetric_small.assert_same_keys_overlap_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 97, "end_line": 135, "span_ids": ["test_overlap_internal_asymmetric_small"], "tokens": 292}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_overlap_internal_asymmetric_small():\n x = np.arange(32).reshape((2, 16))\n d = da.from_array(x, chunks=(2, 4))\n\n result = overlap_internal(d, {0: (0, 0), 1: (1, 1)})\n assert result.chunks == ((2,), (5, 6, 6, 5))\n\n expected = np.array(\n [\n [0, 1, 2, 3, 4, 3, 4, 5, 6, 7, 8, 7, 8, 9, 10, 11, 12, 11, 12, 13, 14, 15],\n [\n 16,\n 17,\n 18,\n 19,\n 20,\n 19,\n 20,\n 21,\n 22,\n 23,\n 24,\n 23,\n 24,\n 25,\n 26,\n 27,\n 28,\n 27,\n 28,\n 29,\n 30,\n 31,\n ],\n ]\n )\n\n assert_eq(result, expected)\n assert same_keys(overlap_internal(d, {0: (0, 0), 1: (1, 1)}), result)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_trim_internal_test_periodic.assert_eq_e_0_d_2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_trim_internal_test_periodic.assert_eq_e_0_d_2_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 138, "end_line": 154, "span_ids": ["test_periodic", "test_trim_internal"], "tokens": 186}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_trim_internal():\n d = da.ones((40, 60), chunks=(10, 10))\n e = trim_internal(d, axes={0: 1, 1: 2})\n\n assert e.chunks == ((8, 8, 8, 8), (6, 6, 6, 6, 6, 6))\n\n\ndef test_periodic():\n x = np.arange(64).reshape((8, 8))\n d = da.from_array(x, chunks=(4, 4))\n\n e = periodic(d, axis=0, depth=2)\n assert e.shape[0] == d.shape[0] + 4\n assert e.shape[1] == d.shape[1]\n\n assert_eq(e[1, :], d[-1, :])\n assert_eq(e[0, :], d[-2, :])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_reflect_test_reflect.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_reflect_test_reflect.None_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 157, "end_line": 167, "span_ids": ["test_reflect"], "tokens": 158}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_reflect():\n x = np.arange(10)\n d = da.from_array(x, chunks=(5, 5))\n\n e = reflect(d, axis=0, depth=2)\n expected = np.array([1, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9, 8])\n assert_eq(e, expected)\n\n e = reflect(d, axis=0, depth=1)\n expected = np.array([0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9])\n assert_eq(e, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_nearest_test_nearest.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_nearest_test_nearest.None_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 170, "end_line": 180, "span_ids": ["test_nearest"], "tokens": 158}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_nearest():\n x = np.arange(10)\n d = da.from_array(x, chunks=(5, 5))\n\n e = nearest(d, axis=0, depth=2)\n expected = np.array([0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9, 9])\n assert_eq(e, expected)\n\n e = nearest(d, axis=0, depth=1)\n expected = np.array([0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9])\n assert_eq(e, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_constant_test_constant.assert_eq_e_1_np_on": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_constant_test_constant.assert_eq_e_1_np_on", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 183, "end_line": 192, "span_ids": ["test_constant"], "tokens": 124}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_constant():\n x = np.arange(64).reshape((8, 8))\n d = da.from_array(x, chunks=(4, 4))\n\n e = constant(d, axis=0, depth=2, value=10)\n assert e.shape[0] == d.shape[0] + 4\n assert e.shape[1] == d.shape[1]\n\n assert_eq(e[1, :], np.ones(8, dtype=x.dtype) * 10)\n assert_eq(e[-1, :], np.ones(8, dtype=x.dtype) * 10)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_boundaries_test_boundaries.assert_eq_e_expected_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_boundaries_test_boundaries.assert_eq_e_expected_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 195, "end_line": 217, "span_ids": ["test_boundaries"], "tokens": 458}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_boundaries():\n x = np.arange(64).reshape((8, 8))\n d = da.from_array(x, chunks=(4, 4))\n\n e = boundaries(d, {0: 2, 1: 1}, {0: 0, 1: \"periodic\"})\n\n expected = np.array(\n [\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [7, 0, 1, 2, 3, 4, 5, 6, 7, 0],\n [15, 8, 9, 10, 11, 12, 13, 14, 15, 8],\n [23, 16, 17, 18, 19, 20, 21, 22, 23, 16],\n [31, 24, 25, 26, 27, 28, 29, 30, 31, 24],\n [39, 32, 33, 34, 35, 36, 37, 38, 39, 32],\n [47, 40, 41, 42, 43, 44, 45, 46, 47, 40],\n [55, 48, 49, 50, 51, 52, 53, 54, 55, 48],\n [63, 56, 57, 58, 59, 60, 61, 62, 63, 56],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n ]\n )\n assert_eq(e, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_overlap_test_overlap.assert_same_keys_g_overl": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_overlap_test_overlap.assert_same_keys_g_overl", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 220, "end_line": 246, "span_ids": ["test_overlap"], "tokens": 736}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_overlap():\n x = np.arange(64).reshape((8, 8))\n d = da.from_array(x, chunks=(4, 4))\n g = overlap(d, depth={0: 2, 1: 1}, boundary={0: 100, 1: \"reflect\"})\n assert g.chunks == ((8, 8), (6, 6))\n expected = np.array(\n [\n [100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100],\n [100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100],\n [0, 0, 1, 2, 3, 4, 3, 4, 5, 6, 7, 7],\n [8, 8, 9, 10, 11, 12, 11, 12, 13, 14, 15, 15],\n [16, 16, 17, 18, 19, 20, 19, 20, 21, 22, 23, 23],\n [24, 24, 25, 26, 27, 28, 27, 28, 29, 30, 31, 31],\n [32, 32, 33, 34, 35, 36, 35, 36, 37, 38, 39, 39],\n [40, 40, 41, 42, 43, 44, 43, 44, 45, 46, 47, 47],\n [16, 16, 17, 18, 19, 20, 19, 20, 21, 22, 23, 23],\n [24, 24, 25, 26, 27, 28, 27, 28, 29, 30, 31, 31],\n [32, 32, 33, 34, 35, 36, 35, 36, 37, 38, 39, 39],\n [40, 40, 41, 42, 43, 44, 43, 44, 45, 46, 47, 47],\n [48, 48, 49, 50, 51, 52, 51, 52, 53, 54, 55, 55],\n [56, 56, 57, 58, 59, 60, 59, 60, 61, 62, 63, 63],\n [100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100],\n [100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100],\n ]\n )\n assert_eq(g, expected)\n assert same_keys(g, overlap(d, depth={0: 2, 1: 1}, boundary={0: 100, 1: \"reflect\"}))\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_overlap.g_4_test_overlap.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_overlap.g_4_test_overlap.None_2", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 248, "end_line": 270, "span_ids": ["test_overlap"], "tokens": 575}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_overlap():\n # ... other code\n\n g = overlap(d, depth={0: 2, 1: 1}, boundary={0: 100, 1: \"none\"})\n expected = np.array(\n [\n [100, 100, 100, 100, 100, 100, 100, 100, 100, 100],\n [100, 100, 100, 100, 100, 100, 100, 100, 100, 100],\n [0, 1, 2, 3, 4, 3, 4, 5, 6, 7],\n [8, 9, 10, 11, 12, 11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20, 19, 20, 21, 22, 23],\n [24, 25, 26, 27, 28, 27, 28, 29, 30, 31],\n [32, 33, 34, 35, 36, 35, 36, 37, 38, 39],\n [40, 41, 42, 43, 44, 43, 44, 45, 46, 47],\n [16, 17, 18, 19, 20, 19, 20, 21, 22, 23],\n [24, 25, 26, 27, 28, 27, 28, 29, 30, 31],\n [32, 33, 34, 35, 36, 35, 36, 37, 38, 39],\n [40, 41, 42, 43, 44, 43, 44, 45, 46, 47],\n [48, 49, 50, 51, 52, 51, 52, 53, 54, 55],\n [56, 57, 58, 59, 60, 59, 60, 61, 62, 63],\n [100, 100, 100, 100, 100, 100, 100, 100, 100, 100],\n [100, 100, 100, 100, 100, 100, 100, 100, 100, 100],\n ]\n )\n assert_eq(g, expected)\n assert g.chunks == ((8, 8), (5, 5))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_asymmetric_overlap_boundary_exception_test_map_overlap.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_asymmetric_overlap_boundary_exception_test_map_overlap.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 273, "end_line": 317, "span_ids": ["test_map_overlap", "test_asymmetric_overlap_boundary_exception"], "tokens": 498}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_asymmetric_overlap_boundary_exception():\n x = da.arange(10, chunks=5)\n with pytest.raises(NotImplementedError):\n x.map_overlap(\n lambda x: x + len(x), depth={0: (0, 2)}, boundary=\"reflect\", dtype=x.dtype\n )\n\n\ndef test_map_overlap():\n x = da.arange(10, chunks=5)\n y = x.map_overlap(lambda x: x + len(x), depth=2, dtype=x.dtype)\n assert_eq(y, np.arange(10) + 5 + 2 + 2)\n\n x = da.arange(10, chunks=5)\n y = x.map_overlap(lambda x: x + len(x), depth=np.int64(2), dtype=x.dtype)\n assert all([(type(s) is int) for s in y.shape])\n assert_eq(y, np.arange(10) + 5 + 2 + 2)\n\n x = np.arange(16).reshape((4, 4))\n d = da.from_array(x, chunks=(2, 2))\n exp1 = d.map_overlap(lambda x: x + x.size, depth=1, dtype=d.dtype)\n exp2 = d.map_overlap(\n lambda x: x + x.size,\n depth={0: 1, 1: 1},\n boundary={0: \"reflect\", 1: \"none\"},\n dtype=d.dtype,\n )\n exp3 = d.map_overlap(\n lambda x: x + x.size, depth={1: 1}, boundary={1: \"reflect\"}, dtype=d.dtype\n )\n exp4 = d.map_overlap(\n lambda x: x + x.size,\n depth={1: (1, 0)},\n boundary={0: \"none\", 1: \"none\"},\n dtype=d.dtype,\n )\n assert_eq(exp1, x + 16)\n assert_eq(exp2, x + 12)\n assert_eq(exp3, x + 8)\n assert_eq(\n exp4,\n np.block(\n [[x[0:2, 0:2] + 4, x[0:2, 2:4] + 6], [x[2:4, 0:2] + 4, x[2:4, 2:4] + 6]]\n ),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_map_overlap_no_depth_test_map_overlap_multiarray._are_not_somehow_shifted": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_map_overlap_no_depth_test_map_overlap_multiarray._are_not_somehow_shifted", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 320, "end_line": 360, "span_ids": ["test_map_overlap_no_depth", "test_map_overlap_multiarray"], "tokens": 520}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"boundary\", [None, \"reflect\", \"periodic\", \"nearest\", \"none\", 0]\n)\ndef test_map_overlap_no_depth(boundary):\n x = da.arange(10, chunks=5)\n y = x.map_overlap(lambda i: i, depth=0, boundary=boundary, dtype=x.dtype)\n assert_eq(y, x)\n\n\ndef test_map_overlap_multiarray():\n # Same ndim, same numblocks, same chunks\n x = da.arange(10, chunks=5)\n y = da.arange(10, chunks=5)\n z = da.map_overlap(lambda x, y: x + y, x, y, depth=1)\n assert_eq(z, 2 * np.arange(10))\n\n # Same ndim, same numblocks, different chunks\n x = da.arange(10, chunks=(2, 3, 5))\n y = da.arange(10, chunks=(5, 3, 2))\n z = da.map_overlap(lambda x, y: x + y, x, y, depth=1)\n assert z.chunks == ((2, 3, 3, 2),)\n assert_eq(z, 2 * np.arange(10))\n\n # Same ndim, different numblocks, different chunks\n x = da.arange(10, chunks=(10,))\n y = da.arange(10, chunks=(4, 4, 2))\n z = da.map_overlap(lambda x, y: x + y, x, y, depth=1)\n assert z.chunks == ((4, 4, 2),)\n assert_eq(z, 2 * np.arange(10))\n\n # Different ndim, different numblocks, different chunks\n x = da.arange(10, chunks=(10,))\n y = da.arange(10).reshape(1, 10).rechunk((1, (4, 4, 2)))\n z = da.map_overlap(lambda x, y: x + y, x, y, depth=1)\n assert z.chunks == ((1,), (4, 4, 2))\n assert z.shape == (1, 10)\n assert_eq(z, 2 * np.arange(10)[np.newaxis])\n\n # Note: checks on arange equality in all of the above help ensure that\n # trimming is applied appropriately to result chunks (i.e. results\n # are not somehow shifted)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_map_overlap_multiarray_defaults_test_map_overlap_multiarray_defaults.assert_eq_z_sum_20_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_map_overlap_multiarray_defaults_test_map_overlap_multiarray_defaults.assert_eq_z_sum_20_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 363, "end_line": 372, "span_ids": ["test_map_overlap_multiarray_defaults"], "tokens": 142}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_map_overlap_multiarray_defaults():\n # Check that by default, chunk alignment and arrays of varying dimensionality\n # are supported by with no effect on result shape\n # (i.e. defaults are pass-through to map_blocks)\n x = da.ones((10,), chunks=10)\n y = da.ones((1, 10), chunks=5)\n z = da.map_overlap(lambda x, y: x + y, x, y)\n # func should be called twice and get (5,) and (1, 5) arrays of ones each time\n assert_eq(z.shape, (1, 10))\n assert_eq(z.sum(), 20)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_map_overlap_multiarray_different_depths_test_map_overlap_multiarray_uneven_numblocks_exception.with_pytest_raises_ValueE.da_map_overlap_lambda_x_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_map_overlap_multiarray_different_depths_test_map_overlap_multiarray_uneven_numblocks_exception.with_pytest_raises_ValueE.da_map_overlap_lambda_x_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 375, "end_line": 404, "span_ids": ["test_map_overlap_multiarray_uneven_numblocks_exception", "test_map_overlap_multiarray_different_depths"], "tokens": 289}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_map_overlap_multiarray_different_depths():\n x = da.ones(5, dtype=\"int\")\n y = da.ones(5, dtype=\"int\")\n\n def run(depth):\n return da.map_overlap(\n lambda x, y: x.sum() + y.sum(), x, y, depth=depth, chunks=(0,), trim=False\n ).compute()\n\n # Check that the number of elements added\n # to arrays in overlap works as expected\n # when depths differ for each array\n assert_eq(run([0, 0]), 10)\n assert_eq(run([0, 1]), 12)\n assert_eq(run([1, 1]), 14)\n assert_eq(run([1, 2]), 16)\n assert_eq(run([0, 5]), 20)\n assert_eq(run([5, 5]), 30)\n\n # Ensure that depth > chunk size results in error\n with pytest.raises(ValueError):\n run([0, 6])\n\n\ndef test_map_overlap_multiarray_uneven_numblocks_exception():\n x = da.arange(10, chunks=(10,))\n y = da.arange(10, chunks=(5, 5))\n with pytest.raises(ValueError):\n # Fail with chunk alignment explicitly disabled\n da.map_overlap(lambda x, y: x + y, x, y, align_arrays=False).compute()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_map_overlap_multiarray_block_broadcast_test_map_overlap_multiarray_block_broadcast.assert_eq_z_sum_4_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_map_overlap_multiarray_block_broadcast_test_map_overlap_multiarray_block_broadcast.assert_eq_z_sum_4_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 407, "end_line": 421, "span_ids": ["test_map_overlap_multiarray_block_broadcast"], "tokens": 222}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_map_overlap_multiarray_block_broadcast():\n def func(x, y):\n # Return result with expected padding\n z = x.size + y.size\n return np.ones((3, 3)) * z\n\n # Chunks in trailing dimension will be unified to two chunks of size 6\n # and block broadcast will allow chunks from x to repeat\n x = da.ones((12,), chunks=12) # numblocks = (1,) -> (2, 2) after broadcast\n y = da.ones((16, 12), chunks=(8, 6)) # numblocks = (2, 2)\n z = da.map_overlap(func, x, y, chunks=(3, 3), depth=1, trim=True)\n assert_eq(z, z)\n assert z.shape == (2, 2)\n # func call will receive (8,) and (10, 8) arrays for each of 4 blocks\n assert_eq(z.sum(), 4 * (10 * 8 + 8))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_map_overlap_multiarray_variadic_test_map_overlap_multiarray_variadic.assert_all_x_compute_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_map_overlap_multiarray_variadic_test_map_overlap_multiarray_variadic.assert_all_x_compute_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 424, "end_line": 441, "span_ids": ["test_map_overlap_multiarray_variadic"], "tokens": 233}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_map_overlap_multiarray_variadic():\n # Test overlapping row slices from 3D arrays\n xs = [\n # Dim 0 will unify to chunks of size 4 for all:\n da.ones((12, 1, 1), chunks=((12,), 1, 1)),\n da.ones((12, 8, 1), chunks=((8, 4), 8, 1)),\n da.ones((12, 8, 4), chunks=((4, 8), 8, 4)),\n ]\n\n def func(*args):\n return np.array([sum([x.size for x in args])])\n\n x = da.map_overlap(func, *xs, chunks=(1,), depth=1, trim=False, drop_axis=[1, 2])\n\n # Each func call should get 4 rows from each array padded by 1 in each dimension\n size_per_slice = sum([np.pad(x[:4], 1, mode=\"constant\").size for x in xs])\n assert x.shape == (3,)\n assert all(x.compute() == size_per_slice)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_nearest_overlap_test_nearest_overlap.assert_array_almost_equal": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_nearest_overlap_test_nearest_overlap.assert_array_almost_equal", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 467, "end_line": 473, "span_ids": ["test_nearest_overlap"], "tokens": 105}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_nearest_overlap():\n a = np.arange(144).reshape(12, 12).astype(float)\n\n darr = da.from_array(a, chunks=(6, 6))\n garr = overlap(darr, depth={0: 5, 1: 5}, boundary={0: \"nearest\", 1: \"nearest\"})\n tarr = trim_internal(garr, {0: 5, 1: 5})\n assert_array_almost_equal(tarr, a)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_0_depth_test_0_depth.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_0_depth_test_0_depth.None_3", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 476, "end_line": 497, "span_ids": ["test_0_depth"], "tokens": 185}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_0_depth():\n expected = np.arange(100).reshape(10, 10)\n darr = da.from_array(expected, chunks=(5, 2))\n\n depth = {0: 0, 1: 0}\n\n reflected = overlap(darr, depth=depth, boundary=\"reflect\")\n nearest = overlap(darr, depth=depth, boundary=\"nearest\")\n periodic = overlap(darr, depth=depth, boundary=\"periodic\")\n constant = overlap(darr, depth=depth, boundary=42)\n\n result = trim_internal(reflected, depth)\n assert_array_equal(result, expected)\n\n result = trim_internal(nearest, depth)\n assert_array_equal(result, expected)\n\n result = trim_internal(periodic, depth)\n assert_array_equal(result, expected)\n\n result = trim_internal(constant, depth)\n assert_array_equal(result, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_some_0_depth_test_some_0_depth.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_some_0_depth_test_some_0_depth.None_3", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 500, "end_line": 521, "span_ids": ["test_some_0_depth"], "tokens": 186}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_some_0_depth():\n expected = np.arange(100).reshape(10, 10)\n darr = da.from_array(expected, chunks=(5, 5))\n\n depth = {0: 4, 1: 0}\n\n reflected = overlap(darr, depth=depth, boundary=\"reflect\")\n nearest = overlap(darr, depth=depth, boundary=\"nearest\")\n periodic = overlap(darr, depth=depth, boundary=\"periodic\")\n constant = overlap(darr, depth=depth, boundary=42)\n\n result = trim_internal(reflected, depth)\n assert_array_equal(result, expected)\n\n result = trim_internal(nearest, depth)\n assert_array_equal(result, expected)\n\n result = trim_internal(periodic, depth)\n assert_array_equal(result, expected)\n\n result = trim_internal(constant, depth)\n assert_array_equal(result, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_one_chunk_along_axis_test_constant_boundaries.assert_b_chunks_darr_c": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_one_chunk_along_axis_test_constant_boundaries.assert_b_chunks_darr_c", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 524, "end_line": 535, "span_ids": ["test_constant_boundaries", "test_one_chunk_along_axis"], "tokens": 164}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_one_chunk_along_axis():\n a = np.arange(2 * 9).reshape(2, 9)\n darr = da.from_array(a, chunks=((2,), (2, 2, 2, 3)))\n g = overlap(darr, depth=0, boundary=0)\n assert a.shape == g.shape\n\n\ndef test_constant_boundaries():\n a = np.arange(1 * 9).reshape(1, 9)\n darr = da.from_array(a, chunks=((1,), (2, 2, 2, 3)))\n b = boundaries(darr, {0: 0, 1: 0}, {0: 0, 1: 0})\n assert b.chunks == darr.chunks", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_depth_equals_boundary_length_test_depth_equals_boundary_length.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_depth_equals_boundary_length_test_depth_equals_boundary_length.None_3", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 538, "end_line": 559, "span_ids": ["test_depth_equals_boundary_length"], "tokens": 186}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_depth_equals_boundary_length():\n expected = np.arange(100).reshape(10, 10)\n darr = da.from_array(expected, chunks=(5, 5))\n\n depth = {0: 5, 1: 5}\n\n reflected = overlap(darr, depth=depth, boundary=\"reflect\")\n nearest = overlap(darr, depth=depth, boundary=\"nearest\")\n periodic = overlap(darr, depth=depth, boundary=\"periodic\")\n constant = overlap(darr, depth=depth, boundary=42)\n\n result = trim_internal(reflected, depth)\n assert_array_equal(result, expected)\n\n result = trim_internal(nearest, depth)\n assert_array_equal(result, expected)\n\n result = trim_internal(periodic, depth)\n assert_array_equal(result, expected)\n\n result = trim_internal(constant, depth)\n assert_array_equal(result, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_depth_greater_than_boundary_length_test_depth_greater_than_boundary_length.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_depth_greater_than_boundary_length_test_depth_greater_than_boundary_length.None_3", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 562, "end_line": 584, "span_ids": ["test_depth_greater_than_boundary_length"], "tokens": 192}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.xfail\ndef test_depth_greater_than_boundary_length():\n expected = np.arange(100).reshape(10, 10)\n darr = da.from_array(expected, chunks=(5, 5))\n\n depth = {0: 8, 1: 7}\n\n reflected = overlap(darr, depth=depth, boundary=\"reflect\")\n nearest = overlap(darr, depth=depth, boundary=\"nearest\")\n periodic = overlap(darr, depth=depth, boundary=\"periodic\")\n constant = overlap(darr, depth=depth, boundary=42)\n\n result = trim_internal(reflected, depth)\n assert_array_equal(result, expected)\n\n result = trim_internal(nearest, depth)\n assert_array_equal(result, expected)\n\n result = trim_internal(periodic, depth)\n assert_array_equal(result, expected)\n\n result = trim_internal(constant, depth)\n assert_array_equal(result, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_bad_depth_raises_test_none_boundaries.assert_eq_exp_res_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_bad_depth_raises_test_none_boundaries.assert_eq_exp_res_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 587, "end_line": 607, "span_ids": ["test_none_boundaries", "test_bad_depth_raises"], "tokens": 241}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_bad_depth_raises():\n expected = np.arange(144).reshape(12, 12)\n darr = da.from_array(expected, chunks=(5, 5))\n\n depth = {0: 4, 1: 2}\n\n pytest.raises(ValueError, overlap, darr, depth=depth, boundary=1)\n\n\ndef test_none_boundaries():\n x = da.from_array(np.arange(16).reshape(4, 4), chunks=(2, 2))\n exp = boundaries(x, 2, {0: \"none\", 1: 33})\n res = np.array(\n [\n [33, 33, 0, 1, 2, 3, 33, 33],\n [33, 33, 4, 5, 6, 7, 33, 33],\n [33, 33, 8, 9, 10, 11, 33, 33],\n [33, 33, 12, 13, 14, 15, 33, 33],\n ]\n )\n assert_eq(exp, res)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_overlap_small_test_no_shared_keys_with_different_depths.da_compute_r_scheduler_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_overlap_small_test_no_shared_keys_with_different_depths.da_compute_r_scheduler_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 610, "end_line": 639, "span_ids": ["test_no_shared_keys_with_different_depths", "test_overlap_small"], "tokens": 237}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_overlap_small():\n x = da.ones((10, 10), chunks=(5, 5))\n\n y = x.map_overlap(lambda x: x, depth=1)\n assert len(y.dask) < 200\n\n y = x.map_overlap(lambda x: x, depth=1, boundary=\"none\")\n assert len(y.dask) < 100\n\n\ndef test_no_shared_keys_with_different_depths():\n da.random.seed(0)\n a = da.random.random((9, 9), chunks=(3, 3))\n\n def check(x):\n assert x.shape == (3, 3)\n return x\n\n r = [\n a.map_overlap(\n lambda a: a + 1,\n dtype=a.dtype,\n depth={j: int(i == j) for j in range(a.ndim)},\n boundary=\"none\",\n ).map_blocks(check, dtype=a.dtype)\n for i in range(a.ndim)\n ]\n\n assert set(r[0].dask) & set(r[1].dask) == set(a.dask)\n da.compute(*r, scheduler=\"single-threaded\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_overlap_few_dimensions_small_test_overlap_few_dimensions_small.None_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_overlap_few_dimensions_small_test_overlap_few_dimensions_small.None_5", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 642, "end_line": 658, "span_ids": ["test_overlap_few_dimensions_small"], "tokens": 209}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_overlap_few_dimensions_small():\n x = da.ones((20, 20), chunks=(10, 10))\n\n a = x.map_overlap(lambda x: x, depth={0: 1})\n assert_eq(x, a)\n assert any(isinstance(k[1], float) for k in a.dask)\n assert all(isinstance(k[2], int) for k in a.dask)\n\n b = x.map_overlap(lambda x: x, depth={1: 1})\n assert_eq(x, b)\n assert all(isinstance(k[1], int) for k in b.dask)\n assert any(isinstance(k[2], float) for k in b.dask)\n\n c = x.map_overlap(lambda x: x, depth={0: 1, 1: 1})\n assert_eq(x, c)\n assert any(isinstance(k[1], float) for k in c.dask)\n assert any(isinstance(k[2], float) for k in c.dask)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_overlap_few_dimensions_test_overlap_few_dimensions.assert_len_c_dask_10_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_overlap_few_dimensions_test_overlap_few_dimensions.assert_len_c_dask_10_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 661, "end_line": 671, "span_ids": ["test_overlap_few_dimensions"], "tokens": 127}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_overlap_few_dimensions():\n x = da.ones((100, 100), chunks=(10, 10))\n\n a = x.map_overlap(lambda x: x, depth={0: 1})\n b = x.map_overlap(lambda x: x, depth={1: 1})\n c = x.map_overlap(lambda x: x, depth={0: 1, 1: 1})\n\n assert len(a.dask) == len(b.dask)\n assert len(a.dask) < len(c.dask)\n\n assert len(c.dask) < 10 * len(a.dask)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_trim_boundry_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_trim_boundry_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 674, "end_line": 690, "span_ids": ["test_trim_boundry"], "tokens": 232}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"boundary\", [\"reflect\", \"periodic\", \"nearest\", \"none\"])\ndef test_trim_boundry(boundary):\n x = da.from_array(np.arange(24).reshape(4, 6), chunks=(2, 3))\n x_overlaped = da.overlap.overlap(x, 2, boundary={0: \"reflect\", 1: boundary})\n x_trimmed = da.overlap.trim_overlap(\n x_overlaped, 2, boundary={0: \"reflect\", 1: boundary}\n )\n assert np.all(x == x_trimmed)\n\n x_overlaped = da.overlap.overlap(x, 2, boundary={1: boundary})\n x_trimmed = da.overlap.trim_overlap(x_overlaped, 2, boundary={1: boundary})\n assert np.all(x == x_trimmed)\n\n x_overlaped = da.overlap.overlap(x, 2, boundary=boundary)\n x_trimmed = da.overlap.trim_overlap(x_overlaped, 2, boundary=boundary)\n assert np.all(x == x_trimmed)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_percentiles.py_pytest_test_percentile.if_method_tdigest_.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_percentiles.py_pytest_test_percentile.if_method_tdigest_.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_percentiles.py", "file_name": "test_percentiles.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 52, "span_ids": ["test_percentile", "imports"], "tokens": 383}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\n\npytest.importorskip(\"numpy\")\n\nimport numpy as np\n\nimport dask.array as da\nfrom dask.array.utils import assert_eq, same_keys\n\ntry:\n import crick\nexcept ImportError:\n crick = None\n\n\npercentile_methods = pytest.mark.parametrize(\n \"method\",\n [\n pytest.param(\n \"tdigest\", marks=pytest.mark.skipif(not crick, reason=\"Requires crick\")\n ),\n \"dask\",\n ],\n)\n\n\n@percentile_methods\ndef test_percentile(method):\n d = da.ones((16,), chunks=(4,))\n qs = [0, 50, 100]\n\n assert_eq(da.percentile(d, qs, method=method), np.array([1, 1, 1], dtype=d.dtype))\n\n x = np.array([0, 0, 5, 5, 5, 5, 20, 20])\n d = da.from_array(x, chunks=(3,))\n\n result = da.percentile(d, qs, method=method)\n assert_eq(result, np.array([0, 5, 20], dtype=result.dtype))\n\n assert same_keys(\n da.percentile(d, qs, method=method), da.percentile(d, qs, method=method)\n )\n assert not same_keys(\n da.percentile(d, qs, method=method), da.percentile(d, [0, 50], method=method)\n )\n\n if method != \"tdigest\":\n x = np.array([\"a\", \"a\", \"d\", \"d\", \"d\", \"e\"])\n d = da.from_array(x, chunks=(3,))\n assert_eq(\n da.percentile(d, [0, 50, 100]), np.array([\"a\", \"d\", \"e\"], dtype=x.dtype)\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_percentiles.py_test_percentile_with_categoricals_test_percentile_with_categoricals.assert_same_keys_da_perce": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_percentiles.py_test_percentile_with_categoricals_test_percentile_with_categoricals.assert_same_keys_da_perce", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_percentiles.py", "file_name": "test_percentiles.py", "file_type": "text/x-python", "category": "test", "start_line": 55, "end_line": 71, "span_ids": ["test_percentile_with_categoricals"], "tokens": 180}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skip\ndef test_percentile_with_categoricals():\n try:\n import pandas as pd\n except ImportError:\n return\n x0 = pd.Categorical([\"Alice\", \"Bob\", \"Charlie\", \"Dennis\", \"Alice\", \"Alice\"])\n x1 = pd.Categorical([\"Alice\", \"Bob\", \"Charlie\", \"Dennis\", \"Alice\", \"Alice\"])\n\n dsk = {(\"x\", 0): x0, (\"x\", 1): x1}\n\n x = da.Array(dsk, \"x\", chunks=((6, 6),))\n\n p = da.percentile(x, [50])\n assert (p.compute().categories == x0.categories).all()\n assert (p.compute().codes == [0]).all()\n assert same_keys(da.percentile(x, [50]), da.percentile(x, [50]))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_percentiles.py_test_percentiles_with_empty_arrays_test_percentiles_with_scaler_percentile.assert_eq_da_percentile_d": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_percentiles.py_test_percentiles_with_empty_arrays_test_percentiles_with_scaler_percentile.assert_eq_da_percentile_d", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_percentiles.py", "file_name": "test_percentiles.py", "file_type": "text/x-python", "category": "test", "start_line": 74, "end_line": 98, "span_ids": ["test_percentiles_with_empty_q", "test_percentiles_with_scaler_percentile", "test_percentiles_with_empty_arrays"], "tokens": 233}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@percentile_methods\ndef test_percentiles_with_empty_arrays(method):\n x = da.ones(10, chunks=((5, 0, 5),))\n assert_eq(\n da.percentile(x, [10, 50, 90], method=method),\n np.array([1, 1, 1], dtype=x.dtype),\n )\n\n\n@percentile_methods\ndef test_percentiles_with_empty_q(method):\n x = da.ones(10, chunks=((5, 0, 5),))\n assert_eq(\n da.percentile(x, [], method=method),\n np.array([], dtype=x.dtype),\n )\n\n\n@percentile_methods\n@pytest.mark.parametrize(\"q\", [5, 5.0, np.int64(5), np.float64(5)])\ndef test_percentiles_with_scaler_percentile(method, q):\n # Regression test to ensure da.percentile works with scalar percentiles\n # See #3020\n d = da.ones((16,), chunks=(4,))\n assert_eq(da.percentile(d, q, method=method), np.array([1], dtype=d.dtype))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_percentiles.py_test_unknown_chunk_sizes_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_percentiles.py_test_unknown_chunk_sizes_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_percentiles.py", "file_name": "test_percentiles.py", "file_type": "text/x-python", "category": "test", "start_line": 101, "end_line": 113, "span_ids": ["test_unknown_chunk_sizes"], "tokens": 126}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@percentile_methods\ndef test_unknown_chunk_sizes(method):\n x = da.random.random(1000, chunks=(100,))\n x._chunks = ((np.nan,) * 10,)\n\n result = da.percentile(x, 50, method=method).compute()\n assert 0.1 < result < 0.9\n\n a, b = da.percentile(x, [40, 60], method=method).compute()\n assert 0.1 < a < 0.9\n assert 0.1 < b < 0.9\n assert a < b", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_random.py_pytest_test_determinisim_through_dask_values.assert_eq_samples_1_samp": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_random.py_pytest_test_determinisim_through_dask_values.assert_eq_samples_1_samp", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_random.py", "file_name": "test_random.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 53, "span_ids": ["test_doc_randomstate", "imports", "test_RandomState", "test_serializability", "test_concurrency", "test_determinisim_through_dask_values"], "tokens": 399}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\n\npytest.importorskip(\"numpy\")\n\nimport numpy as np\n\nimport dask\nimport dask.array as da\nfrom dask.utils import key_split\nfrom dask.array.core import Array\nfrom dask.array.random import random, exponential, normal\nfrom dask.array.utils import assert_eq\nfrom dask.multiprocessing import _dumps, _loads\n\n\ndef test_RandomState():\n state = da.random.RandomState(5)\n x = state.normal(10, 1, size=10, chunks=5)\n assert_eq(x, x)\n\n state = da.random.RandomState(5)\n y = state.normal(10, 1, size=10, chunks=5)\n assert_eq(x, y)\n\n\ndef test_concurrency():\n state = da.random.RandomState(5)\n x = state.normal(10, 1, size=10, chunks=2)\n\n state = da.random.RandomState(5)\n y = state.normal(10, 1, size=10, chunks=2)\n assert (x.compute(scheduler=\"processes\") == y.compute(scheduler=\"processes\")).all()\n\n\ndef test_doc_randomstate():\n assert \"mean\" in da.random.RandomState(5).normal.__doc__\n\n\ndef test_serializability():\n state = da.random.RandomState(5)\n x = state.normal(10, 1, size=10, chunks=5)\n\n y = _loads(_dumps(x))\n\n assert_eq(x, y)\n\n\ndef test_determinisim_through_dask_values():\n samples_1 = da.random.RandomState(42).normal(size=1000, chunks=10)\n samples_2 = da.random.RandomState(42).normal(size=1000, chunks=10)\n\n assert set(samples_1.dask) == set(samples_2.dask)\n assert_eq(samples_1, samples_2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_random.py_test_randomstate_consistent_names_test_randomstate_consistent_names.assert_sorted_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_random.py_test_randomstate_consistent_names_test_randomstate_consistent_names.assert_sorted_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_random.py", "file_name": "test_random.py", "file_type": "text/x-python", "category": "test", "start_line": 56, "end_line": 64, "span_ids": ["test_randomstate_consistent_names"], "tokens": 140}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_randomstate_consistent_names():\n state1 = da.random.RandomState(42)\n state2 = da.random.RandomState(42)\n assert sorted(state1.normal(size=(100, 100), chunks=(10, 10)).dask) == sorted(\n state2.normal(size=(100, 100), chunks=(10, 10)).dask\n )\n assert sorted(\n state1.normal(size=100, loc=4.5, scale=5.0, chunks=10).dask\n ) == sorted(state2.normal(size=100, loc=4.5, scale=5.0, chunks=10).dask)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_random.py_test_random_test_parametrized_random_function.assert_len_y_90": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_random.py_test_random_test_parametrized_random_function.assert_len_y_90", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_random.py", "file_name": "test_random.py", "file_type": "text/x-python", "category": "test", "start_line": 67, "end_line": 90, "span_ids": ["test_parametrized_random_function", "test_random"], "tokens": 200}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_random():\n a = random((10, 10), chunks=(5, 5))\n assert isinstance(a, Array)\n assert isinstance(a.name, str) and a.name\n assert a.shape == (10, 10)\n assert a.chunks == ((5, 5), (5, 5))\n\n x = set(np.array(a).flat)\n\n assert len(x) > 90\n\n\ndef test_parametrized_random_function():\n a = exponential(1000, (10, 10), chunks=(5, 5))\n assert isinstance(a, Array)\n assert isinstance(a.name, str) and a.name\n assert a.shape == (10, 10)\n assert a.chunks == ((5, 5), (5, 5))\n\n x = np.array(a)\n assert 10 < x.mean() < 100000\n\n y = set(x.flat)\n assert len(y) > 90", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_random.py_test_kwargs_test_consistent_across_sizes.assert_eq_x1_x3_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_random.py_test_kwargs_test_consistent_across_sizes.assert_eq_x1_x3_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_random.py", "file_name": "test_random.py", "file_type": "text/x-python", "category": "test", "start_line": 93, "end_line": 135, "span_ids": ["test_consistent_across_sizes", "test_kwargs", "test_docs", "test_can_make_really_big_random_array", "test_random_seed", "test_unique_names"], "tokens": 361}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_kwargs():\n a = normal(loc=10.0, scale=0.1, size=(10, 10), chunks=(5, 5))\n assert isinstance(a, Array)\n x = np.array(a)\n assert 8 < x.mean() < 12\n\n\ndef test_unique_names():\n a = random((10, 10), chunks=(5, 5))\n b = random((10, 10), chunks=(5, 5))\n\n assert a.name != b.name\n\n\ndef test_docs():\n assert \"exponential\" in exponential.__doc__\n assert \"exponential\" in exponential.__name__\n assert \"# doctest: +SKIP\" in normal.__doc__\n\n\ndef test_can_make_really_big_random_array():\n normal(10, 1, (1000000, 1000000), chunks=(100000, 100000))\n\n\ndef test_random_seed():\n da.random.seed(123)\n x = da.random.normal(size=10, chunks=5)\n y = da.random.normal(size=10, chunks=5)\n\n da.random.seed(123)\n a = da.random.normal(size=10, chunks=5)\n b = da.random.normal(size=10, chunks=5)\n\n assert_eq(x, a)\n assert_eq(y, b)\n\n\ndef test_consistent_across_sizes():\n x1 = da.random.RandomState(123).random(20, chunks=20)\n x2 = da.random.RandomState(123).random(100, chunks=20)[:20]\n x3 = da.random.RandomState(123).random(200, chunks=20)[:20]\n assert_eq(x1, x2)\n assert_eq(x1, x3)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_random.py_test_random_all_test_random_all.da_random_standard_t_2_s": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_random.py_test_random_all_test_random_all.da_random_standard_t_2_s", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_random.py", "file_name": "test_random.py", "file_type": "text/x-python", "category": "test", "start_line": 138, "end_line": 177, "span_ids": ["test_random_all"], "tokens": 666}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_random_all():\n da.random.beta(1, 2, size=5, chunks=3).compute()\n da.random.binomial(10, 0.5, size=5, chunks=3).compute()\n da.random.chisquare(1, size=5, chunks=3).compute()\n da.random.exponential(1, size=5, chunks=3).compute()\n da.random.f(1, 2, size=5, chunks=3).compute()\n da.random.gamma(5, 1, size=5, chunks=3).compute()\n da.random.geometric(1, size=5, chunks=3).compute()\n da.random.gumbel(1, size=5, chunks=3).compute()\n da.random.hypergeometric(1, 2, 3, size=5, chunks=3).compute()\n da.random.laplace(size=5, chunks=3).compute()\n da.random.logistic(size=5, chunks=3).compute()\n da.random.lognormal(size=5, chunks=3).compute()\n da.random.logseries(0.5, size=5, chunks=3).compute()\n da.random.multinomial(20, [1 / 6.0] * 6, size=5, chunks=3).compute()\n da.random.negative_binomial(5, 0.5, size=5, chunks=3).compute()\n da.random.noncentral_chisquare(2, 2, size=5, chunks=3).compute()\n\n da.random.noncentral_f(2, 2, 3, size=5, chunks=3).compute()\n da.random.normal(2, 2, size=5, chunks=3).compute()\n da.random.pareto(1, size=5, chunks=3).compute()\n da.random.poisson(size=5, chunks=3).compute()\n\n da.random.power(1, size=5, chunks=3).compute()\n da.random.rayleigh(size=5, chunks=3).compute()\n da.random.random_sample(size=5, chunks=3).compute()\n\n da.random.triangular(1, 2, 3, size=5, chunks=3).compute()\n da.random.uniform(size=5, chunks=3).compute()\n da.random.vonmises(2, 3, size=5, chunks=3).compute()\n da.random.wald(1, 2, size=5, chunks=3).compute()\n\n da.random.weibull(2, size=5, chunks=3).compute()\n da.random.zipf(2, size=5, chunks=3).compute()\n\n da.random.standard_cauchy(size=5, chunks=3).compute()\n da.random.standard_exponential(size=5, chunks=3).compute()\n da.random.standard_gamma(2, size=5, chunks=3).compute()\n da.random.standard_normal(size=5, chunks=3).compute()\n da.random.standard_t(2, size=5, chunks=3).compute()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_random.py_test_array_broadcasting_test_multinomial.for_size_chunks_in_5_.assert_x_shape_y_shape": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_random.py_test_array_broadcasting_test_multinomial.for_size_chunks_in_5_.assert_x_shape_y_shape", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_random.py", "file_name": "test_random.py", "file_type": "text/x-python", "category": "test", "start_line": 180, "end_line": 231, "span_ids": ["test_multinomial", "test_array_broadcasting"], "tokens": 590}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n not hasattr(np, \"broadcast_to\"), reason='requires numpy 1.10 method \"broadcast_to\"'\n)\ndef test_array_broadcasting():\n arr = np.arange(6).reshape((2, 3))\n daones = da.ones((2, 3, 4), chunks=3)\n assert da.random.poisson(arr, chunks=3).compute().shape == (2, 3)\n\n for x in (arr, daones):\n y = da.random.normal(x, 2, chunks=3)\n assert y.shape == x.shape\n assert y.compute().shape == x.shape\n\n y = da.random.normal(daones, 2, chunks=3)\n assert set(daones.dask).issubset(set(y.dask))\n\n assert da.random.normal(\n np.ones((1, 4)), da.ones((2, 3, 4), chunks=(2, 3, 4)), chunks=(2, 3, 4)\n ).compute().shape == (2, 3, 4)\n assert (\n da.random.normal(\n scale=np.ones((1, 4)),\n loc=da.ones((2, 3, 4), chunks=(2, 3, 4)),\n size=(2, 2, 3, 4),\n chunks=(2, 2, 3, 4),\n )\n .compute()\n .shape\n == (2, 2, 3, 4)\n )\n\n with pytest.raises(ValueError):\n da.random.normal(arr, np.ones((3, 1)), size=(2, 3, 4), chunks=3)\n\n for o in (np.ones(100), da.ones(100, chunks=(50,)), 1):\n a = da.random.normal(1000 * o, 0.01, chunks=(50,))\n assert 800 < a.mean().compute() < 1200\n\n # ensure that mis-matched chunks align well\n x = np.arange(10) ** 3\n y = da.from_array(x, chunks=(1,))\n z = da.random.normal(y, 0.01, chunks=(10,))\n\n assert 0.8 < z.mean().compute() / x.mean() < 1.2\n\n\ndef test_multinomial():\n for size, chunks in [(5, 3), ((5, 4), (2, 3))]:\n x = da.random.multinomial(20, [1 / 6.0] * 6, size=size, chunks=chunks)\n y = np.random.multinomial(20, [1 / 6.0] * 6, size=size)\n\n assert x.shape == y.shape == x.compute().shape", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_random.py_test_choice_test_choice.assert_len_res_len_np": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_random.py_test_choice_test_choice.assert_len_res_len_np", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_random.py", "file_name": "test_random.py", "file_type": "text/x-python", "category": "test", "start_line": 234, "end_line": 292, "span_ids": ["test_choice"], "tokens": 623}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_choice():\n np_dtype = np.random.choice(1, size=()).dtype\n size = (10, 3)\n chunks = 4\n x = da.random.choice(3, size=size, chunks=chunks)\n assert x.dtype == np_dtype\n assert x.shape == size\n res = x.compute()\n assert res.dtype == np_dtype\n assert res.shape == size\n\n py_a = [1, 3, 5, 7, 9]\n np_a = np.array(py_a, dtype=\"f8\")\n da_a = da.from_array(np_a, chunks=2)\n\n for a in [py_a, np_a, da_a]:\n x = da.random.choice(a, size=size, chunks=chunks)\n res = x.compute()\n expected_dtype = np.asarray(a).dtype\n assert x.dtype == expected_dtype\n assert res.dtype == expected_dtype\n assert set(np.unique(res)).issubset(np_a)\n\n np_p = np.array([0, 0.2, 0.2, 0.3, 0.3])\n da_p = da.from_array(np_p, chunks=2)\n\n for a, p in [(da_a, np_p), (np_a, da_p)]:\n x = da.random.choice(a, size=size, chunks=chunks, p=p)\n res = x.compute()\n assert x.dtype == np_a.dtype\n assert res.dtype == np_a.dtype\n assert set(np.unique(res)).issubset(np_a[1:])\n\n np_dtype = np.random.choice(1, size=(), p=np.array([1])).dtype\n x = da.random.choice(5, size=size, chunks=chunks, p=np_p)\n res = x.compute()\n assert x.dtype == np_dtype\n assert res.dtype == np_dtype\n\n errs = [\n (-1, None), # negative a\n (np_a[:, None], None), # a must be 1D\n (np_a, np_p[:, None]), # p must be 1D\n (np_a, np_p[:-2]), # a and p must match\n (3, np_p), # a and p must match\n (4, [0.2, 0.2, 0.3]),\n ] # p must sum to 1\n\n for (a, p) in errs:\n with pytest.raises(ValueError):\n da.random.choice(a, size=size, chunks=chunks, p=p)\n\n with pytest.raises(NotImplementedError):\n da.random.choice(da_a, size=size, chunks=chunks, replace=False)\n\n # Want to make sure replace=False works for a single-partition output array\n x = da.random.choice(da_a, size=da_a.shape[0], chunks=-1, replace=False)\n res = x.compute()\n assert len(res) == len(np.unique(res))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_random.py_test_create_with_auto_dimensions_test_permutation.assert_x_shape_100_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_random.py_test_create_with_auto_dimensions_test_permutation.assert_x_shape_100_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_random.py", "file_name": "test_random.py", "file_type": "text/x-python", "category": "test", "start_line": 295, "end_line": 327, "span_ids": ["test_create_with_auto_dimensions", "test_names", "test_permutation"], "tokens": 265}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_create_with_auto_dimensions():\n with dask.config.set({\"array.chunk-size\": \"128MiB\"}):\n x = da.random.random((10000, 10000), chunks=(-1, \"auto\"))\n assert x.chunks == ((10000,), (1250,) * 8)\n\n y = da.random.random((10000, 10000), chunks=\"auto\")\n assert y.chunks == ((2500,) * 4, (2500,) * 4)\n\n\ndef test_names():\n name = da.random.normal(0, 1, size=(1000,), chunks=(500,)).name\n\n assert name.startswith(\"normal\")\n assert len(key_split(name)) < 10\n\n\ndef test_permutation():\n x = da.arange(12, chunks=3)\n y = da.random.permutation(x)\n\n assert y.shape == x.shape\n assert y.dtype == x.dtype\n\n y.compute() # smoke test\n\n a = da.random.RandomState(0)\n b = da.random.RandomState(0)\n r1 = a.permutation(x)\n r2 = b.permutation(x)\n assert_eq(r1, r2)\n\n x = da.random.permutation(100)\n assert x.shape == (100,)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_random.py_test_external_randomstate_class_test_external_randomstate_class.assert_eq_a_b_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_random.py_test_external_randomstate_class_test_external_randomstate_class.assert_eq_a_b_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_random.py", "file_name": "test_random.py", "file_type": "text/x-python", "category": "test", "start_line": 330, "end_line": 350, "span_ids": ["test_external_randomstate_class"], "tokens": 189}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_external_randomstate_class():\n randomgen = pytest.importorskip(\"randomgen\")\n\n rs = da.random.RandomState(\n RandomState=lambda seed: randomgen.RandomGenerator(randomgen.DSFMT(seed))\n )\n x = rs.normal(0, 1, size=10, chunks=(5,))\n assert_eq(x, x)\n\n rs = da.random.RandomState(\n RandomState=lambda seed: randomgen.RandomGenerator(randomgen.DSFMT(seed)),\n seed=123,\n )\n a = rs.normal(0, 1, size=10, chunks=(5,))\n rs = da.random.RandomState(\n RandomState=lambda seed: randomgen.RandomGenerator(randomgen.DSFMT(seed)),\n seed=123,\n )\n b = rs.normal(0, 1, size=10, chunks=(5,))\n assert a.name == b.name\n assert_eq(a, b)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_random.py_test_auto_chunks_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_random.py_test_auto_chunks_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_random.py", "file_name": "test_random.py", "file_type": "text/x-python", "category": "test", "start_line": 353, "end_line": 387, "span_ids": ["test_randint_dtype", "test_doc_wraps_deprecated", "test_auto_chunks", "test_raises_bad_kwarg", "test_randomstate_kwargs"], "tokens": 236}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_auto_chunks():\n with dask.config.set({\"array.chunk-size\": \"50 MiB\"}):\n x = da.random.random((10000, 10000))\n assert 4 < x.npartitions < 32\n\n\ndef test_randint_dtype():\n x = da.random.randint(0, 255, size=10, dtype=\"uint8\")\n assert_eq(x, x)\n assert x.dtype == \"uint8\"\n assert x.compute().dtype == \"uint8\"\n\n\ndef test_doc_wraps_deprecated():\n with pytest.warns(FutureWarning):\n\n @da.random.doc_wraps(np.random.normal)\n def f():\n pass\n\n\ndef test_raises_bad_kwarg():\n with pytest.raises(Exception) as info:\n da.random.standard_normal(size=(10,), dtype=\"float64\")\n\n assert \"dtype\" in str(info.value)\n\n\ndef test_randomstate_kwargs():\n cupy = pytest.importorskip(\"cupy\")\n\n rs = da.random.RandomState(RandomState=cupy.random.RandomState)\n x = rs.standard_normal((10, 5), dtype=np.float32)\n assert x.dtype == np.float32", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_from_itertools_import_pro_test_rechunk_internals_1.assert_i1d_1_answer4": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_from_itertools_import_pro_test_rechunk_internals_1.assert_i1d_1_answer4", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 52, "span_ids": ["imports", "test_rechunk_internals_1"], "tokens": 513}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from itertools import product\nimport warnings\n\nimport pytest\n\nnp = pytest.importorskip(\"numpy\")\n\nimport dask\nfrom dask.utils import funcname\nfrom dask.array.utils import assert_eq\nfrom dask.array.rechunk import intersect_chunks, rechunk, normalize_chunks\nfrom dask.array.rechunk import cumdims_label, _breakpoints, _intersect_1d, _old_to_new\nfrom dask.array.rechunk import plan_rechunk, divide_to_width, merge_to_number\nimport dask.array as da\n\n\ndef test_rechunk_internals_1():\n \"\"\"Test the cumdims_label and _breakpoints and\n _intersect_1d internal funcs to rechunk.\"\"\"\n new = cumdims_label(((1, 1, 2), (1, 5, 1)), \"n\")\n old = cumdims_label(((4,), (1,) * 5), \"o\")\n breaks = tuple(_breakpoints(o, n) for o, n in zip(old, new))\n answer = ((\"o\", 0), (\"n\", 0), (\"n\", 1), (\"n\", 2), (\"o\", 4), (\"n\", 4))\n assert breaks[0] == answer\n answer2 = (\n (\"o\", 0),\n (\"n\", 0),\n (\"o\", 1),\n (\"n\", 1),\n (\"o\", 2),\n (\"o\", 3),\n (\"o\", 4),\n (\"o\", 5),\n (\"n\", 6),\n (\"n\", 7),\n )\n assert breaks[1] == answer2\n i1d = [_intersect_1d(b) for b in breaks]\n answer3 = [[(0, slice(0, 1))], [(0, slice(1, 2))], [(0, slice(2, 4))]]\n assert i1d[0] == answer3\n answer4 = [\n [(0, slice(0, 1))],\n [\n (1, slice(0, 1)),\n (2, slice(0, 1)),\n (3, slice(0, 1)),\n (4, slice(0, 1)),\n (5, slice(0, 1)),\n ],\n [(5, slice(1, 2))],\n ]\n assert i1d[1] == answer4", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_intersect_1_test_intersect_1.assert_answer_cross": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_intersect_1_test_intersect_1.assert_answer_cross", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 55, "end_line": 65, "span_ids": ["test_intersect_1"], "tokens": 143}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_intersect_1():\n \"\"\" Convert 1 D chunks\"\"\"\n old = ((10, 10, 10, 10, 10),)\n new = ((25, 5, 20),)\n answer = [\n (((0, slice(0, 10)),), ((1, slice(0, 10)),), ((2, slice(0, 5)),)),\n (((2, slice(5, 10)),),),\n (((3, slice(0, 10)),), ((4, slice(0, 10)),)),\n ]\n cross = list(intersect_chunks(old_chunks=old, new_chunks=new))\n assert answer == cross", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_intersect_2_test_intersect_2.assert_answer_cross": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_intersect_2_test_intersect_2.assert_answer_cross", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 68, "end_line": 79, "span_ids": ["test_intersect_2"], "tokens": 169}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_intersect_2():\n \"\"\" Convert 1 D chunks\"\"\"\n old = ((20, 20, 20, 20, 20),)\n new = ((58, 4, 20, 18),)\n answer = [\n (((0, slice(0, 20)),), ((1, slice(0, 20)),), ((2, slice(0, 18)),)),\n (((2, slice(18, 20)),), ((3, slice(0, 2)),)),\n (((3, slice(2, 20)),), ((4, slice(0, 2)),)),\n (((4, slice(2, 20)),),),\n ]\n cross = list(intersect_chunks(old_chunks=old, new_chunks=new))\n assert answer == cross", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_1d_test_rechunk_2d.assert_np_all_x2_compute_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_1d_test_rechunk_2d.assert_np_all_x2_compute_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 82, "end_line": 99, "span_ids": ["test_rechunk_2d", "test_rechunk_1d"], "tokens": 211}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_rechunk_1d():\n \"\"\"Try rechunking a random 1d matrix\"\"\"\n a = np.random.uniform(0, 1, 30)\n x = da.from_array(a, chunks=((10,) * 3,))\n new = ((5,) * 6,)\n x2 = rechunk(x, chunks=new)\n assert x2.chunks == new\n assert np.all(x2.compute() == a)\n\n\ndef test_rechunk_2d():\n \"\"\"Try rechunking a random 2d matrix\"\"\"\n a = np.random.uniform(0, 1, 300).reshape((10, 30))\n x = da.from_array(a, chunks=((1, 2, 3, 4), (5,) * 6))\n new = ((5, 5), (15,) * 2)\n x2 = rechunk(x, chunks=new)\n assert x2.chunks == new\n assert np.all(x2.compute() == a)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_4d_test_rechunk_expand.assert_np_all_y_compute_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_4d_test_rechunk_expand.assert_np_all_y_compute_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 102, "end_line": 117, "span_ids": ["test_rechunk_expand", "test_rechunk_4d"], "tokens": 198}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_rechunk_4d():\n \"\"\"Try rechunking a random 4d matrix\"\"\"\n old = ((5, 5),) * 4\n a = np.random.uniform(0, 1, 10000).reshape((10,) * 4)\n x = da.from_array(a, chunks=old)\n new = ((10,),) * 4\n x2 = rechunk(x, chunks=new)\n assert x2.chunks == new\n assert np.all(x2.compute() == a)\n\n\ndef test_rechunk_expand():\n a = np.random.uniform(0, 1, 100).reshape((10, 10))\n x = da.from_array(a, chunks=(5, 5))\n y = x.rechunk(chunks=((3, 3, 3, 1), (3, 3, 3, 1)))\n assert np.all(y.compute() == a)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_expand2_test_rechunk_expand2.for_off_off2_in_product_.if_a_off_off2_0_.assert_np_all_y_orig_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_expand2_test_rechunk_expand2.for_off_off2_in_product_.if_a_off_off2_0_.assert_np_all_y_orig_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 120, "end_line": 131, "span_ids": ["test_rechunk_expand2"], "tokens": 179}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_rechunk_expand2():\n (a, b) = (3, 2)\n orig = np.random.uniform(0, 1, a ** b).reshape((a,) * b)\n for off, off2 in product(range(1, a - 1), range(1, a - 1)):\n old = ((a - off, off),) * b\n x = da.from_array(orig, chunks=old)\n new = ((a - off2, off2),) * b\n assert np.all(x.rechunk(chunks=new).compute() == orig)\n if a - off - off2 > 0:\n new = ((off, a - off2 - off, off2),) * b\n y = x.rechunk(chunks=new).compute()\n assert np.all(y == orig)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_method_test_rechunk_method.assert_np_all_x2_compute_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_method_test_rechunk_method.assert_np_all_x2_compute_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 134, "end_line": 142, "span_ids": ["test_rechunk_method"], "tokens": 127}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_rechunk_method():\n \"\"\" Test rechunking can be done as a method of dask array.\"\"\"\n old = ((5, 2, 3),) * 4\n new = ((3, 3, 3, 1),) * 4\n a = np.random.uniform(0, 1, 10000).reshape((10,) * 4)\n x = da.from_array(a, chunks=old)\n x2 = x.rechunk(chunks=new)\n assert x2.chunks == new\n assert np.all(x2.compute() == a)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_blockshape_test_dtype.assert_x_rechunk_chunks_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_blockshape_test_dtype.assert_x_rechunk_chunks_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 145, "end_line": 159, "span_ids": ["test_rechunk_blockshape", "test_dtype"], "tokens": 174}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_rechunk_blockshape():\n \"\"\" Test that blockshape can be used.\"\"\"\n new_shape, new_chunks = (10, 10), (4, 3)\n new_blockdims = normalize_chunks(new_chunks, new_shape)\n old_chunks = ((4, 4, 2), (3, 3, 3, 1))\n a = np.random.uniform(0, 1, 100).reshape((10, 10))\n x = da.from_array(a, chunks=old_chunks)\n check1 = rechunk(x, chunks=new_chunks)\n assert check1.chunks == new_blockdims\n assert np.all(check1.compute() == a)\n\n\ndef test_dtype():\n x = da.ones(5, chunks=(2,))\n assert x.rechunk(chunks=(1,)).dtype == x.dtype", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_with_dict_test_rechunk_with_dict.assert_y_chunks_24_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_with_dict_test_rechunk_with_dict.assert_y_chunks_24_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 162, "end_line": 173, "span_ids": ["test_rechunk_with_dict"], "tokens": 166}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_rechunk_with_dict():\n x = da.ones((24, 24), chunks=(4, 8))\n y = x.rechunk(chunks={0: 12})\n assert y.chunks == ((12, 12), (8, 8, 8))\n\n x = da.ones((24, 24), chunks=(4, 8))\n y = x.rechunk(chunks={0: (12, 12)})\n assert y.chunks == ((12, 12), (8, 8, 8))\n\n x = da.ones((24, 24), chunks=(4, 8))\n y = x.rechunk(chunks={0: -1})\n assert y.chunks == ((24,), (8, 8, 8))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_with_empty_input_test_rechunk_intermediates.assert_len_y_dask_30": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_with_empty_input_test_rechunk_intermediates.assert_len_y_dask_30", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 176, "end_line": 254, "span_ids": ["test_rechunk_zero_dim_array_II", "test_rechunk_empty_array", "test_rechunk_with_null_dimensions", "test_rechunk_with_integer", "test_rechunk_with_empty_input", "test_rechunk_minus_one", "test_rechunk_with_zero_placeholders", "test_rechunk_same", "test_rechunk_0d", "test_rechunk_intermediates", "test_rechunk_zero_dim_array", "test_rechunk_empty"], "tokens": 717}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_rechunk_with_empty_input():\n x = da.ones((24, 24), chunks=(4, 8))\n assert x.rechunk(chunks={}).chunks == x.chunks\n pytest.raises(ValueError, lambda: x.rechunk(chunks=()))\n\n\ndef test_rechunk_with_null_dimensions():\n x = da.from_array(np.ones((24, 24)), chunks=(4, 8))\n assert x.rechunk(chunks=(None, 4)).chunks == da.ones((24, 24), chunks=(4, 4)).chunks\n\n\ndef test_rechunk_with_integer():\n x = da.from_array(np.arange(5), chunks=4)\n y = x.rechunk(3)\n assert y.chunks == ((3, 2),)\n assert (x.compute() == y.compute()).all()\n\n\ndef test_rechunk_0d():\n a = np.array(42)\n x = da.from_array(a, chunks=())\n y = x.rechunk(())\n assert y.chunks == ()\n assert y.compute() == a\n\n\n@pytest.mark.parametrize(\n \"arr\", [da.array([]), da.array([[], []]), da.array([[[]], [[]]])]\n)\ndef test_rechunk_empty_array(arr):\n arr.rechunk()\n assert arr.size == 0\n\n\ndef test_rechunk_empty():\n x = da.ones((0, 10), chunks=(5, 5))\n y = x.rechunk((2, 2))\n assert y.chunks == ((0,), (2,) * 5)\n assert_eq(x, y)\n\n\ndef test_rechunk_zero_dim_array():\n x = da.zeros((4, 0), chunks=3)\n y = x.rechunk({0: 4})\n assert y.chunks == ((4,), (0,))\n assert_eq(x, y)\n\n\ndef test_rechunk_zero_dim_array_II():\n x = da.zeros((4, 0, 6, 10), chunks=3)\n y = x.rechunk({0: 4, 2: 2})\n assert y.chunks == ((4,), (0,), (2, 2, 2), (3, 3, 3, 1))\n assert_eq(x, y)\n\n\ndef test_rechunk_same():\n x = da.ones((24, 24), chunks=(4, 8))\n y = x.rechunk(x.chunks)\n assert x is y\n\n\ndef test_rechunk_with_zero_placeholders():\n x = da.ones((24, 24), chunks=((12, 12), (24, 0)))\n y = da.ones((24, 24), chunks=((12, 12), (12, 12)))\n y = y.rechunk(((12, 12), (24, 0)))\n assert x.chunks == y.chunks\n\n\ndef test_rechunk_minus_one():\n x = da.ones((24, 24), chunks=(4, 8))\n y = x.rechunk((-1, 8))\n assert y.chunks == ((24,), (8, 8, 8))\n assert_eq(x, y)\n\n\ndef test_rechunk_intermediates():\n x = da.random.normal(10, 0.1, (10, 10), chunks=(10, 1))\n y = x.rechunk((1, 10))\n assert len(y.dask) > 30", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_divide_to_width_test_divide_to_width.assert_chunks_4_4_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_divide_to_width_test_divide_to_width.assert_chunks_4_4_2", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 257, "end_line": 262, "span_ids": ["test_divide_to_width"], "tokens": 143}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_divide_to_width():\n chunks = divide_to_width((8, 9, 10), 10)\n assert chunks == (8, 9, 10)\n chunks = divide_to_width((8, 2, 9, 10, 11, 12), 4)\n # Note how 9 gives (3, 3, 3), not (4, 4, 1) or whatever\n assert chunks == (4, 4, 2, 3, 3, 3, 3, 3, 4, 3, 4, 4, 4, 4, 4)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_merge_to_number__assert_steps.assert_steps_expected": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_merge_to_number__assert_steps.assert_steps_expected", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 265, "end_line": 317, "span_ids": ["_assert_steps", "test_merge_to_number", "_plan"], "tokens": 700}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_merge_to_number():\n chunks = merge_to_number((10,) * 4, 5)\n assert chunks == (10, 10, 10, 10)\n chunks = merge_to_number((10,) * 4, 4)\n assert chunks == (10, 10, 10, 10)\n chunks = merge_to_number((10,) * 4, 3)\n assert chunks == (20, 10, 10)\n chunks = merge_to_number((10,) * 4, 2)\n assert chunks == (20, 20)\n chunks = merge_to_number((10,) * 4, 1)\n assert chunks == (40,)\n\n chunks = merge_to_number((10,) * 10, 2)\n assert chunks == (50,) * 2\n chunks = merge_to_number((10,) * 10, 3)\n assert chunks == (40, 30, 30)\n\n chunks = merge_to_number((5, 1, 1, 15, 10), 4)\n assert chunks == (5, 2, 15, 10)\n chunks = merge_to_number((5, 1, 1, 15, 10), 3)\n assert chunks == (7, 15, 10)\n chunks = merge_to_number((5, 1, 1, 15, 10), 2)\n assert chunks == (22, 10)\n chunks = merge_to_number((5, 1, 1, 15, 10), 1)\n assert chunks == (32,)\n\n chunks = merge_to_number((1, 1, 1, 1, 3, 1, 1), 6)\n assert chunks == (2, 1, 1, 3, 1, 1)\n chunks = merge_to_number((1, 1, 1, 1, 3, 1, 1), 5)\n assert chunks == (2, 2, 3, 1, 1)\n chunks = merge_to_number((1, 1, 1, 1, 3, 1, 1), 4)\n assert chunks == (2, 2, 3, 2)\n chunks = merge_to_number((1, 1, 1, 1, 3, 1, 1), 3)\n assert chunks == (4, 3, 2)\n chunks = merge_to_number((1, 1, 1, 1, 3, 1, 1), 2)\n assert chunks == (4, 5)\n chunks = merge_to_number((1, 1, 1, 1, 3, 1, 1), 1)\n assert chunks == (9,)\n\n\ndef _plan(old_chunks, new_chunks, itemsize=1, block_size_limit=1e7, threshold=4):\n return plan_rechunk(\n old_chunks,\n new_chunks,\n itemsize=itemsize,\n block_size_limit=block_size_limit,\n threshold=threshold,\n )\n\n\ndef _assert_steps(steps, expected):\n assert len(steps) == len(expected)\n assert steps == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_plan_rechunk_test_plan_rechunk.for_i_in_range_len_steps_.assert_len_succ_1_le": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_plan_rechunk_test_plan_rechunk.for_i_in_range_len_steps_.assert_len_succ_1_le", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 320, "end_line": 386, "span_ids": ["test_plan_rechunk"], "tokens": 765}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_plan_rechunk():\n c = (20,) * 2 # coarse\n f = (2,) * 20 # fine\n nc = (float(\"nan\"),) * 2 # nan-coarse\n nf = (float(\"nan\"),) * 20 # nan-fine\n\n # Trivial cases\n steps = _plan((), ())\n _assert_steps(steps, [()])\n steps = _plan((c, ()), (f, ()))\n _assert_steps(steps, [(f, ())])\n\n # No intermediate required\n steps = _plan((c,), (f,))\n _assert_steps(steps, [(f,)])\n steps = _plan((f,), (c,))\n _assert_steps(steps, [(c,)])\n steps = _plan((c, c), (f, f))\n _assert_steps(steps, [(f, f)])\n steps = _plan((f, f), (c, c))\n _assert_steps(steps, [(c, c)])\n steps = _plan((f, c), (c, c))\n _assert_steps(steps, [(c, c)])\n steps = _plan((c, c, c, c), (c, f, c, c))\n _assert_steps(steps, [(c, f, c, c)])\n\n # An intermediate is used to reduce graph size\n steps = _plan((f, c), (c, f))\n _assert_steps(steps, [(c, c), (c, f)])\n\n steps = _plan((c + c, c + f), (f + f, c + c))\n _assert_steps(steps, [(c + c, c + c), (f + f, c + c)])\n\n # Same, with unknown dim\n steps = _plan((nc + nf, c + c, c + f), (nc + nf, f + f, c + c))\n _assert_steps(steps, steps)\n\n # Regression test for #5908\n steps = _plan((c, c), (f, f), threshold=1)\n _assert_steps(steps, [(f, f)])\n\n # Just at the memory limit => an intermediate is used\n steps = _plan((f, c), (c, f), block_size_limit=400)\n _assert_steps(steps, [(c, c), (c, f)])\n\n # Hitting the memory limit => partial merge\n m = (10,) * 4 # mid\n\n steps = _plan((f, c), (c, f), block_size_limit=399)\n _assert_steps(steps, [(m, c), (c, f)])\n\n steps2 = _plan((f, c), (c, f), block_size_limit=3999, itemsize=10)\n _assert_steps(steps2, steps)\n\n # Larger problem size => more intermediates\n c = (1000,) * 2 # coarse\n f = (2,) * 1000 # fine\n\n steps = _plan((f, c), (c, f), block_size_limit=99999)\n assert len(steps) == 3\n assert steps[-1] == (c, f)\n for i in range(len(steps) - 1):\n prev = steps[i]\n succ = steps[i + 1]\n # Merging on the first dim, splitting on the second dim\n assert len(succ[0]) <= len(prev[0]) / 2.0\n assert len(succ[1]) >= len(prev[1]) * 2.0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_plan_rechunk_5d_test_plan_rechunk_5d.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_plan_rechunk_5d_test_plan_rechunk_5d.None_2", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 389, "end_line": 400, "span_ids": ["test_plan_rechunk_5d"], "tokens": 216}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_plan_rechunk_5d():\n # 5d problem\n c = (10,) * 1 # coarse\n f = (1,) * 10 # fine\n\n steps = _plan((c, c, c, c, c), (f, f, f, f, f))\n _assert_steps(steps, [(f, f, f, f, f)])\n steps = _plan((f, f, f, f, c), (c, c, c, f, f))\n _assert_steps(steps, [(c, c, c, f, c), (c, c, c, f, f)])\n # Only 1 dim can be merged at first\n steps = _plan((c, c, f, f, c), (c, c, c, f, f), block_size_limit=2e4)\n _assert_steps(steps, [(c, c, c, f, c), (c, c, c, f, f)])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_plan_rechunk_asymmetric_test_rechunk_warning.assert_not_w": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_plan_rechunk_asymmetric_test_rechunk_warning.assert_not_w", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 433, "end_line": 450, "span_ids": ["test_rechunk_warning", "test_plan_rechunk_asymmetric"], "tokens": 185}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_plan_rechunk_asymmetric():\n a = ((1,) * 1000, (80000000,))\n b = ((1000,), (80000,) * 1000)\n steps = plan_rechunk(a, b, itemsize=8)\n assert len(steps) > 1\n\n x = da.ones((1000, 80000000), chunks=(1, 80000000))\n y = x.rechunk((1000, x.shape[1] // 1000))\n assert len(y.dask) < 100000\n\n\ndef test_rechunk_warning():\n N = 20\n x = da.random.normal(size=(N, N, 100), chunks=(1, N, 100))\n with warnings.catch_warnings(record=True) as w:\n x = x.rechunk((N, 1, 100))\n\n assert not w", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_dont_concatenate_single_chunks_test_dont_concatenate_single_chunks.assert_not_any_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_dont_concatenate_single_chunks_test_dont_concatenate_single_chunks.assert_not_any_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 453, "end_line": 464, "span_ids": ["test_dont_concatenate_single_chunks"], "tokens": 120}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"shape,chunks\", [[(4,), (2,)], [(4, 4), (2, 2)], [(4, 4), (4, 2)]]\n)\ndef test_dont_concatenate_single_chunks(shape, chunks):\n x = da.ones(shape, chunks=shape)\n y = x.rechunk(chunks)\n dsk = dict(y.dask)\n assert not any(\n funcname(task[0]).startswith(\"concat\")\n for task in dsk.values()\n if dask.istask(task)\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_intersect_nan_test_intersect_nan.assert_result_expected": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_intersect_nan_test_intersect_nan.assert_result_expected", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 467, "end_line": 478, "span_ids": ["test_intersect_nan"], "tokens": 164}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_intersect_nan():\n old_chunks = ((float(\"nan\"), float(\"nan\")), (8,))\n new_chunks = ((float(\"nan\"), float(\"nan\")), (4, 4))\n\n result = list(intersect_chunks(old_chunks, new_chunks))\n expected = [\n (((0, slice(0, None, None)), (0, slice(0, 4, None))),),\n (((0, slice(0, None, None)), (0, slice(4, 8, None))),),\n (((1, slice(0, None, None)), (0, slice(0, 4, None))),),\n (((1, slice(0, None, None)), (0, slice(4, 8, None))),),\n ]\n assert result == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_intersect_nan_single_test_intersect_nan_single.assert_result_expected": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_intersect_nan_single_test_intersect_nan_single.assert_result_expected", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 481, "end_line": 490, "span_ids": ["test_intersect_nan_single"], "tokens": 109}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_intersect_nan_single():\n old_chunks = ((float(\"nan\"),), (10,))\n new_chunks = ((float(\"nan\"),), (5, 5))\n\n result = list(intersect_chunks(old_chunks, new_chunks))\n expected = [\n (((0, slice(0, None, None)), (0, slice(0, 5, None))),),\n (((0, slice(0, None, None)), (0, slice(5, 10, None))),),\n ]\n assert result == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_intersect_nan_long_test_intersect_nan_long.assert_result_expected": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_intersect_nan_long_test_intersect_nan_long.assert_result_expected", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 493, "end_line": 508, "span_ids": ["test_intersect_nan_long"], "tokens": 269}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_intersect_nan_long():\n\n old_chunks = (tuple([float(\"nan\")] * 4), (10,))\n new_chunks = (tuple([float(\"nan\")] * 4), (5, 5))\n result = list(intersect_chunks(old_chunks, new_chunks))\n expected = [\n (((0, slice(0, None, None)), (0, slice(0, 5, None))),),\n (((0, slice(0, None, None)), (0, slice(5, 10, None))),),\n (((1, slice(0, None, None)), (0, slice(0, 5, None))),),\n (((1, slice(0, None, None)), (0, slice(5, 10, None))),),\n (((2, slice(0, None, None)), (0, slice(0, 5, None))),),\n (((2, slice(0, None, None)), (0, slice(5, 10, None))),),\n (((3, slice(0, None, None)), (0, slice(0, 5, None))),),\n (((3, slice(0, None, None)), (0, slice(5, 10, None))),),\n ]\n assert result == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_unknown_from_pandas_test_rechunk_unknown_from_pandas.assert_eq_result_expecte": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_unknown_from_pandas_test_rechunk_unknown_from_pandas.assert_eq_result_expecte", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 511, "end_line": 522, "span_ids": ["test_rechunk_unknown_from_pandas"], "tokens": 152}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_rechunk_unknown_from_pandas():\n dd = pytest.importorskip(\"dask.dataframe\")\n pd = pytest.importorskip(\"pandas\")\n\n arr = np.random.randn(50, 10)\n x = dd.from_pandas(pd.DataFrame(arr), 2).values\n result = x.rechunk((None, (5, 5)))\n assert np.isnan(x.chunks[0]).all()\n assert np.isnan(result.chunks[0]).all()\n assert result.chunks[1] == (5, 5)\n expected = da.from_array(arr, chunks=((25, 25), (10,))).rechunk((None, (5, 5)))\n assert_eq(result, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_unknown_from_array_test_rechunk_unknown_from_array.assert_eq_x_result_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_unknown_from_array_test_rechunk_unknown_from_array.assert_eq_x_result_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 525, "end_line": 534, "span_ids": ["test_rechunk_unknown_from_array"], "tokens": 125}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_rechunk_unknown_from_array():\n dd = pytest.importorskip(\"dask.dataframe\")\n # pd = pytest.importorskip('pandas')\n x = dd.from_array(da.ones(shape=(4, 4), chunks=(2, 2))).values\n # result = x.rechunk({1: 5})\n result = x.rechunk((None, 4))\n assert np.isnan(x.chunks[0]).all()\n assert np.isnan(result.chunks[0]).all()\n assert x.chunks[1] == (4,)\n assert_eq(x, result)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_unknown_test_rechunk_unknown.assert_eq_result_expecte": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_unknown_test_rechunk_unknown.assert_eq_result_expecte", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 537, "end_line": 561, "span_ids": ["test_rechunk_unknown"], "tokens": 387}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"x, chunks\",\n [\n (da.ones(shape=(50, 10), chunks=(25, 10)), (None, 5)),\n (da.ones(shape=(50, 10), chunks=(25, 10)), {1: 5}),\n (da.ones(shape=(50, 10), chunks=(25, 10)), (None, (5, 5))),\n (da.ones(shape=(1000, 10), chunks=(5, 10)), (None, 5)),\n (da.ones(shape=(1000, 10), chunks=(5, 10)), {1: 5}),\n (da.ones(shape=(1000, 10), chunks=(5, 10)), (None, (5, 5))),\n (da.ones(shape=(10, 10), chunks=(10, 10)), (None, 5)),\n (da.ones(shape=(10, 10), chunks=(10, 10)), {1: 5}),\n (da.ones(shape=(10, 10), chunks=(10, 10)), (None, (5, 5))),\n (da.ones(shape=(10, 10), chunks=(10, 2)), (None, 5)),\n (da.ones(shape=(10, 10), chunks=(10, 2)), {1: 5}),\n (da.ones(shape=(10, 10), chunks=(10, 2)), (None, (5, 5))),\n ],\n)\ndef test_rechunk_unknown(x, chunks):\n dd = pytest.importorskip(\"dask.dataframe\")\n y = dd.from_array(x).values\n result = y.rechunk(chunks)\n expected = x.rechunk(chunks)\n\n assert_chunks_match(result.chunks, expected.chunks)\n assert_eq(result, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_unknown_explicit_test_rechunk_unknown_raises.with_pytest_raises_ValueE.x_rechunk_None_5_5_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_unknown_explicit_test_rechunk_unknown_raises.with_pytest_raises_ValueE.x_rechunk_None_5_5_5", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 564, "end_line": 587, "span_ids": ["test_rechunk_unknown_explicit", "assert_chunks_match", "test_rechunk_unknown_raises"], "tokens": 215}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_rechunk_unknown_explicit():\n dd = pytest.importorskip(\"dask.dataframe\")\n x = da.ones(shape=(10, 10), chunks=(5, 2))\n y = dd.from_array(x).values\n result = y.rechunk(((float(\"nan\"), float(\"nan\")), (5, 5)))\n expected = x.rechunk((None, (5, 5)))\n assert_chunks_match(result.chunks, expected.chunks)\n assert_eq(result, expected)\n\n\ndef assert_chunks_match(left, right):\n for x, y in zip(left, right):\n if np.isnan(x).any():\n assert np.isnan(x).all()\n else:\n assert x == y\n\n\ndef test_rechunk_unknown_raises():\n dd = pytest.importorskip(\"dask.dataframe\")\n\n x = dd.from_array(da.ones(shape=(10, 10), chunks=(5, 5))).values\n with pytest.raises(ValueError):\n x.rechunk((None, (5, 5, 5)))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_old_to_new_single_test_old_to_new.assert_result_expected": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_old_to_new_single_test_old_to_new.assert_result_expected", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 590, "end_line": 612, "span_ids": ["test_old_to_new_single", "test_old_to_new"], "tokens": 214}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_old_to_new_single():\n old = ((float(\"nan\"), float(\"nan\")), (8,))\n new = ((float(\"nan\"), float(\"nan\")), (4, 4))\n result = _old_to_new(old, new)\n\n expected = [\n [[(0, slice(0, None, None))], [(1, slice(0, None, None))]],\n [[(0, slice(0, 4, None))], [(0, slice(4, 8, None))]],\n ]\n\n assert result == expected\n\n\ndef test_old_to_new():\n old = ((float(\"nan\"),), (10,))\n new = ((float(\"nan\"),), (5, 5))\n result = _old_to_new(old, new)\n expected = [\n [[(0, slice(0, None, None))]],\n [[(0, slice(0, 5, None))], [(0, slice(5, 10, None))]],\n ]\n\n assert result == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_old_to_new_large_test_old_to_new_large.assert_result_expected": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_old_to_new_large_test_old_to_new_large.assert_result_expected", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 615, "end_line": 629, "span_ids": ["test_old_to_new_large"], "tokens": 150}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_old_to_new_large():\n old = (tuple([float(\"nan\")] * 4), (10,))\n new = (tuple([float(\"nan\")] * 4), (5, 5))\n\n result = _old_to_new(old, new)\n expected = [\n [\n [(0, slice(0, None, None))],\n [(1, slice(0, None, None))],\n [(2, slice(0, None, None))],\n [(3, slice(0, None, None))],\n ],\n [[(0, slice(0, 5, None))], [(0, slice(5, 10, None))]],\n ]\n assert result == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_changing_raises_test_old_to_new_known.assert_result_expected": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_changing_raises_test_old_to_new_known.assert_result_expected", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 632, "end_line": 651, "span_ids": ["test_old_to_new_known", "test_changing_raises"], "tokens": 205}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_changing_raises():\n nan = float(\"nan\")\n with pytest.raises(ValueError) as record:\n _old_to_new(((nan, nan), (4, 4)), ((nan, nan, nan), (4, 4)))\n\n assert \"unchanging\" in str(record.value)\n\n\ndef test_old_to_new_known():\n old = ((10, 10, 10, 10, 10),)\n new = ((25, 5, 20),)\n result = _old_to_new(old, new)\n expected = [\n [\n [(0, slice(0, 10, None)), (1, slice(0, 10, None)), (2, slice(0, 5, None))],\n [(2, slice(5, 10, None))],\n [(3, slice(0, 10, None)), (4, slice(0, 10, None))],\n ]\n ]\n assert result == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_zero_dim_test_rechunk_avoid_needless_chunking.assert_len_dsk_8_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_zero_dim_test_rechunk_avoid_needless_chunking.assert_len_dsk_8_2", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 654, "end_line": 671, "span_ids": ["test_rechunk_zero_dim", "test_rechunk_empty_chunks", "test_rechunk_avoid_needless_chunking"], "tokens": 177}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_rechunk_zero_dim():\n da = pytest.importorskip(\"dask.array\")\n\n x = da.ones((0, 10, 100), chunks=(0, 10, 10)).rechunk((0, 10, 50))\n assert len(x.compute()) == 0\n\n\ndef test_rechunk_empty_chunks():\n x = da.zeros((7, 24), chunks=((7,), (10, 0, 0, 9, 0, 5)))\n y = x.rechunk((2, 3))\n assert_eq(x, y)\n\n\ndef test_rechunk_avoid_needless_chunking():\n x = da.ones(16, chunks=2)\n y = x.rechunk(8)\n dsk = y.__dask_graph__()\n assert len(dsk) <= 8 + 2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_auto_1d_test_rechunk_auto_1d.assert_y_chunks_expec": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_auto_1d_test_rechunk_auto_1d.assert_y_chunks_expec", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 674, "end_line": 687, "span_ids": ["test_rechunk_auto_1d"], "tokens": 191}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"shape,chunks,bs,expected\",\n [\n (100, 1, 10, (10,) * 10),\n (100, 50, 10, (10,) * 10),\n (100, 100, 10, (10,) * 10),\n (20, 7, 10, (7, 7, 6)),\n (20, (1, 1, 1, 1, 6, 2, 1, 7), 5, (5, 5, 5, 5)),\n ],\n)\ndef test_rechunk_auto_1d(shape, chunks, bs, expected):\n x = da.ones(shape, chunks=(chunks,))\n y = x.rechunk({0: \"auto\"}, block_size_limit=bs * x.dtype.itemsize)\n assert y.chunks == (expected,)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_auto_2d_test_rechunk_auto_2d._limited_by_largest": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_auto_2d_test_rechunk_auto_2d._limited_by_largest", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 690, "end_line": 707, "span_ids": ["test_rechunk_auto_2d"], "tokens": 307}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_rechunk_auto_2d():\n x = da.ones((20, 20), chunks=(2, 2))\n y = x.rechunk({0: -1, 1: \"auto\"}, block_size_limit=20 * x.dtype.itemsize)\n assert y.chunks == ((20,), (1,) * 20)\n\n x = da.ones((20, 20), chunks=(2, 2))\n y = x.rechunk((-1, \"auto\"), block_size_limit=80 * x.dtype.itemsize)\n assert y.chunks == ((20,), (4,) * 5)\n\n x = da.ones((20, 20), chunks=((2, 2)))\n y = x.rechunk({0: \"auto\"}, block_size_limit=20 * x.dtype.itemsize)\n assert y.chunks[1] == x.chunks[1]\n assert y.chunks[0] == (10, 10)\n\n x = da.ones((20, 20), chunks=((2,) * 10, (2, 2, 2, 2, 2, 5, 5)))\n y = x.rechunk({0: \"auto\"}, block_size_limit=20 * x.dtype.itemsize)\n assert y.chunks[1] == x.chunks[1]\n assert y.chunks[0] == (4, 4, 4, 4, 4) # limited by largest", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_auto_3d_test_rechunk_auto_3d._even_split": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_auto_3d_test_rechunk_auto_3d._even_split", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 710, "end_line": 715, "span_ids": ["test_rechunk_auto_3d"], "tokens": 111}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_rechunk_auto_3d():\n x = da.ones((20, 20, 20), chunks=((2, 2, 2)))\n y = x.rechunk({0: \"auto\", 1: \"auto\"}, block_size_limit=200 * x.dtype.itemsize)\n assert y.chunks[2] == x.chunks[2]\n assert y.chunks[0] == (10, 10)\n assert y.chunks[1] == (10, 10) # even split", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_auto_image_stack_test_rechunk_auto_image_stack.None_2.assert_z_chunks_1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_auto_image_stack_test_rechunk_auto_image_stack.None_2.assert_z_chunks_1_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 718, "end_line": 733, "span_ids": ["test_rechunk_auto_image_stack"], "tokens": 260}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"n\", [100, 1000])\ndef test_rechunk_auto_image_stack(n):\n with dask.config.set({\"array.chunk-size\": \"10MiB\"}):\n x = da.ones((n, 1000, 1000), chunks=(1, 1000, 1000), dtype=\"uint8\")\n y = x.rechunk(\"auto\")\n assert y.chunks == ((10,) * (n // 10), (1000,), (1000,))\n assert y.rechunk(\"auto\").chunks == y.chunks # idempotent\n\n with dask.config.set({\"array.chunk-size\": \"7MiB\"}):\n z = x.rechunk(\"auto\")\n assert z.chunks == ((5,) * (n // 5), (1000,), (1000,))\n\n with dask.config.set({\"array.chunk-size\": \"1MiB\"}):\n x = da.ones((n, 1000, 1000), chunks=(1, 1000, 1000), dtype=\"float64\")\n z = x.rechunk(\"auto\")\n assert z.chunks == ((1,) * n, (250,) * 4, (250,) * 4)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_down_test_rechunk_down.None_2.assert_z_chunks_10_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_down_test_rechunk_down.None_2.assert_z_chunks_10_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 736, "end_line": 751, "span_ids": ["test_rechunk_down"], "tokens": 228}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_rechunk_down():\n with dask.config.set({\"array.chunk-size\": \"10MiB\"}):\n x = da.ones((100, 1000, 1000), chunks=(1, 1000, 1000), dtype=\"uint8\")\n y = x.rechunk(\"auto\")\n assert y.chunks == ((10,) * 10, (1000,), (1000,))\n\n with dask.config.set({\"array.chunk-size\": \"1MiB\"}):\n z = y.rechunk(\"auto\")\n assert z.chunks == ((5,) * 20, (250,) * 4, (250,) * 4)\n\n with dask.config.set({\"array.chunk-size\": \"1MiB\"}):\n z = y.rechunk({0: \"auto\"})\n assert z.chunks == ((1,) * 100, (1000,), (1000,))\n\n z = y.rechunk({1: \"auto\"})\n assert z.chunks == ((10,) * 10, (100,) * 10, (1000,))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_from_itertools_import_zip_test_numel.None_1.for_sub_in_itertools_comb.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_from_itertools_import_zip_test_numel.None_1.for_sub_in_itertools_comb.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reductions.py", "file_name": "test_reductions.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 48, "span_ids": ["test_numel", "imports", "assert_eq"], "tokens": 386}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from itertools import zip_longest\nimport os\nimport warnings\n\nimport pytest\n\nnp = pytest.importorskip(\"numpy\")\n\nimport itertools\n\nimport dask.array as da\nfrom dask.array.utils import assert_eq as _assert_eq, same_keys\nfrom dask.core import get_deps\nimport dask.config as config\n\n\ndef assert_eq(a, b):\n _assert_eq(a, b, equal_nan=True)\n\n\n@pytest.mark.parametrize(\"dtype\", [\"f4\", \"i4\"])\n@pytest.mark.parametrize(\"keepdims\", [True, False])\ndef test_numel(dtype, keepdims):\n x = np.ones((2, 3, 4))\n\n assert_eq(\n da.reductions.numel(x, axis=(), keepdims=keepdims, dtype=dtype),\n np.sum(x, axis=(), keepdims=keepdims, dtype=dtype),\n )\n assert_eq(\n da.reductions.numel(x, axis=0, keepdims=keepdims, dtype=dtype),\n np.sum(x, axis=0, keepdims=keepdims, dtype=dtype),\n )\n\n for length in range(x.ndim):\n for sub in itertools.combinations([d for d in range(x.ndim)], length):\n assert_eq(\n da.reductions.numel(x, axis=sub, keepdims=keepdims, dtype=dtype),\n np.sum(x, axis=sub, keepdims=keepdims, dtype=dtype),\n )\n\n for length in range(x.ndim):\n for sub in itertools.combinations([d for d in range(x.ndim)], length):\n ssub = np.random.shuffle(list(sub))\n assert_eq(\n da.reductions.numel(x, axis=ssub, keepdims=keepdims, dtype=dtype),\n np.sum(x, axis=ssub, keepdims=keepdims, dtype=dtype),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_reduction_1d_test_reduction_1d_test.if_split_every_.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_reduction_1d_test_reduction_1d_test.if_split_every_.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reductions.py", "file_name": "test_reductions.py", "file_type": "text/x-python", "category": "test", "start_line": 51, "end_line": 72, "span_ids": ["reduction_1d_test"], "tokens": 316}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def reduction_1d_test(da_func, darr, np_func, narr, use_dtype=True, split_every=True):\n assert_eq(da_func(darr), np_func(narr))\n assert_eq(\n da_func(narr), np_func(narr)\n ) # Ensure Dask reductions work with NumPy arrays\n assert_eq(da_func(darr, keepdims=True), np_func(narr, keepdims=True))\n assert_eq(da_func(darr, axis=()), np_func(narr, axis=()))\n assert same_keys(da_func(darr), da_func(darr))\n assert same_keys(da_func(darr, keepdims=True), da_func(darr, keepdims=True))\n if use_dtype:\n assert_eq(da_func(darr, dtype=\"f8\"), np_func(narr, dtype=\"f8\"))\n assert_eq(da_func(darr, dtype=\"i8\"), np_func(narr, dtype=\"i8\"))\n assert same_keys(da_func(darr, dtype=\"i8\"), da_func(darr, dtype=\"i8\"))\n if split_every:\n a1 = da_func(darr, split_every=2)\n a2 = da_func(darr, split_every={0: 2})\n assert same_keys(a1, a2)\n assert_eq(a1, np_func(narr))\n assert_eq(a2, np_func(narr))\n assert_eq(\n da_func(darr, keepdims=True, split_every=2), np_func(narr, keepdims=True)\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_reductions_1D_test_reductions_1D.None_15": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_reductions_1D_test_reductions_1D.None_15", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reductions.py", "file_name": "test_reductions.py", "file_type": "text/x-python", "category": "test", "start_line": 75, "end_line": 96, "span_ids": ["test_reductions_1D"], "tokens": 329}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"dtype\", [\"f4\", \"i4\"])\ndef test_reductions_1D(dtype):\n x = np.arange(5).astype(dtype)\n a = da.from_array(x, chunks=(2,))\n\n reduction_1d_test(da.sum, a, np.sum, x)\n reduction_1d_test(da.prod, a, np.prod, x)\n reduction_1d_test(da.mean, a, np.mean, x)\n reduction_1d_test(da.var, a, np.var, x)\n reduction_1d_test(da.std, a, np.std, x)\n reduction_1d_test(da.min, a, np.min, x, False)\n reduction_1d_test(da.max, a, np.max, x, False)\n reduction_1d_test(da.any, a, np.any, x, False)\n reduction_1d_test(da.all, a, np.all, x, False)\n\n reduction_1d_test(da.nansum, a, np.nansum, x)\n reduction_1d_test(da.nanprod, a, np.nanprod, x)\n reduction_1d_test(da.nanmean, a, np.mean, x)\n reduction_1d_test(da.nanvar, a, np.var, x)\n reduction_1d_test(da.nanstd, a, np.std, x)\n reduction_1d_test(da.nanmin, a, np.nanmin, x, False)\n reduction_1d_test(da.nanmax, a, np.nanmax, x, False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_reduction_2d_test_reduction_2d_test.with_warnings_catch_warni.if_split_every_.None_8": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_reduction_2d_test_reduction_2d_test.with_warnings_catch_warni.if_split_every_.None_8", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reductions.py", "file_name": "test_reductions.py", "file_type": "text/x-python", "category": "test", "start_line": 99, "end_line": 149, "span_ids": ["reduction_2d_test"], "tokens": 687}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def reduction_2d_test(da_func, darr, np_func, narr, use_dtype=True, split_every=True):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\") # overflow\n assert_eq(da_func(darr), np_func(narr))\n assert_eq(da_func(darr, keepdims=True), np_func(narr, keepdims=True))\n assert_eq(da_func(darr, axis=()), np_func(narr, axis=()))\n assert_eq(da_func(darr, axis=0), np_func(narr, axis=0))\n assert_eq(da_func(darr, axis=1), np_func(narr, axis=1))\n assert_eq(da_func(darr, axis=-1), np_func(narr, axis=-1))\n assert_eq(da_func(darr, axis=-2), np_func(narr, axis=-2))\n assert_eq(\n da_func(darr, axis=1, keepdims=True), np_func(narr, axis=1, keepdims=True)\n )\n assert_eq(\n da_func(darr, axis=(), keepdims=True), np_func(narr, axis=(), keepdims=True)\n )\n assert_eq(da_func(darr, axis=(1, 0)), np_func(narr, axis=(1, 0)))\n\n assert same_keys(da_func(darr, axis=()), da_func(darr, axis=()))\n assert same_keys(da_func(darr, axis=1), da_func(darr, axis=1))\n assert same_keys(da_func(darr, axis=(1, 0)), da_func(darr, axis=(1, 0)))\n\n if use_dtype:\n assert_eq(da_func(darr, dtype=\"f8\"), np_func(narr, dtype=\"f8\"))\n assert_eq(da_func(darr, dtype=\"i8\"), np_func(narr, dtype=\"i8\"))\n\n if split_every:\n a1 = da_func(darr, split_every=4)\n a2 = da_func(darr, split_every={0: 2, 1: 2})\n assert same_keys(a1, a2)\n assert_eq(a1, np_func(narr))\n assert_eq(a2, np_func(narr))\n assert_eq(\n da_func(darr, keepdims=True, split_every=4),\n np_func(narr, keepdims=True),\n )\n assert_eq(da_func(darr, axis=(), split_every=2), np_func(narr, axis=()))\n assert_eq(da_func(darr, axis=0, split_every=2), np_func(narr, axis=0))\n assert_eq(\n da_func(darr, axis=(), keepdims=True, split_every=2),\n np_func(narr, axis=(), keepdims=True),\n )\n assert_eq(\n da_func(darr, axis=0, keepdims=True, split_every=2),\n np_func(narr, axis=0, keepdims=True),\n )\n assert_eq(da_func(darr, axis=1, split_every=2), np_func(narr, axis=1))\n assert_eq(\n da_func(darr, axis=1, keepdims=True, split_every=2),\n np_func(narr, axis=1, keepdims=True),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_reduction_errors_test_reductions_2D.None_15": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_reduction_errors_test_reductions_2D.None_15", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reductions.py", "file_name": "test_reductions.py", "file_type": "text/x-python", "category": "test", "start_line": 152, "end_line": 185, "span_ids": ["test_reductions_2D", "test_reduction_errors"], "tokens": 464}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_reduction_errors():\n x = da.ones((5, 5), chunks=(3, 3))\n with pytest.raises(ValueError):\n x.sum(axis=2)\n with pytest.raises(ValueError):\n x.sum(axis=-3)\n\n\n@pytest.mark.slow\n@pytest.mark.parametrize(\"dtype\", [\"f4\", \"i4\"])\ndef test_reductions_2D(dtype):\n x = np.arange(1, 122).reshape((11, 11)).astype(dtype)\n a = da.from_array(x, chunks=(4, 4))\n\n b = a.sum(keepdims=True)\n assert b.__dask_keys__() == [[(b.name, 0, 0)]]\n\n reduction_2d_test(da.sum, a, np.sum, x)\n reduction_2d_test(da.prod, a, np.prod, x)\n reduction_2d_test(da.mean, a, np.mean, x)\n reduction_2d_test(da.var, a, np.var, x, False) # Difference in dtype algo\n reduction_2d_test(da.std, a, np.std, x, False) # Difference in dtype algo\n reduction_2d_test(da.min, a, np.min, x, False)\n reduction_2d_test(da.max, a, np.max, x, False)\n reduction_2d_test(da.any, a, np.any, x, False)\n reduction_2d_test(da.all, a, np.all, x, False)\n\n reduction_2d_test(da.nansum, a, np.nansum, x)\n reduction_2d_test(da.nanprod, a, np.nanprod, x)\n reduction_2d_test(da.nanmean, a, np.mean, x)\n reduction_2d_test(da.nanvar, a, np.nanvar, x, False) # Difference in dtype algo\n reduction_2d_test(da.nanstd, a, np.nanstd, x, False) # Difference in dtype algo\n reduction_2d_test(da.nanmin, a, np.nanmin, x, False)\n reduction_2d_test(da.nanmax, a, np.nanmax, x, False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_arg_reductions_test_arg_reductions.assert_eq_dfunc_a2_0_sp": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_arg_reductions_test_arg_reductions.assert_eq_dfunc_a2_0_sp", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reductions.py", "file_name": "test_reductions.py", "file_type": "text/x-python", "category": "test", "start_line": 188, "end_line": 218, "span_ids": ["test_arg_reductions"], "tokens": 335}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n [\"dfunc\", \"func\"],\n [\n (da.argmin, np.argmin),\n (da.argmax, np.argmax),\n (da.nanargmin, np.nanargmin),\n (da.nanargmax, np.nanargmax),\n ],\n)\ndef test_arg_reductions(dfunc, func):\n x = np.random.random((10, 10, 10))\n a = da.from_array(x, chunks=(3, 4, 5))\n\n assert_eq(dfunc(a), func(x))\n assert_eq(dfunc(a, 0), func(x, 0))\n assert_eq(dfunc(a, 1), func(x, 1))\n assert_eq(dfunc(a, 2), func(x, 2))\n with config.set(split_every=2):\n assert_eq(dfunc(a), func(x))\n assert_eq(dfunc(a, 0), func(x, 0))\n assert_eq(dfunc(a, 1), func(x, 1))\n assert_eq(dfunc(a, 2), func(x, 2))\n\n pytest.raises(ValueError, lambda: dfunc(a, 3))\n pytest.raises(TypeError, lambda: dfunc(a, (0, 1)))\n\n x2 = np.arange(10)\n a2 = da.from_array(x2, chunks=3)\n assert_eq(dfunc(a2), func(x2))\n assert_eq(dfunc(a2, 0), func(x2, 0))\n assert_eq(dfunc(a2, 0, split_every=2), func(x2, 0))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_nanarg_reductions_test_nanarg_reductions.None_2.with_pytest_warns_None_.dfunc_a_compute_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_nanarg_reductions_test_nanarg_reductions.None_2.with_pytest_warns_None_.dfunc_a_compute_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reductions.py", "file_name": "test_reductions.py", "file_type": "text/x-python", "category": "test", "start_line": 221, "end_line": 243, "span_ids": ["test_nanarg_reductions"], "tokens": 224}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n [\"dfunc\", \"func\"], [(da.nanargmin, np.nanargmin), (da.nanargmax, np.nanargmax)]\n)\ndef test_nanarg_reductions(dfunc, func):\n\n x = np.random.random((10, 10, 10))\n x[5] = np.nan\n a = da.from_array(x, chunks=(3, 4, 5))\n assert_eq(dfunc(a), func(x))\n assert_eq(dfunc(a, 0), func(x, 0))\n with pytest.raises(ValueError):\n with pytest.warns(None): # All NaN axis\n dfunc(a, 1).compute()\n\n with pytest.raises(ValueError):\n with pytest.warns(None): # All NaN axis\n dfunc(a, 2).compute()\n\n x[:] = np.nan\n a = da.from_array(x, chunks=(3, 4, 5))\n with pytest.raises(ValueError):\n with pytest.warns(None): # All NaN axis\n dfunc(a).compute()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_arg_reductions_unknown_chunksize_test_arg_reductions_unknown_single_chunksize.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_arg_reductions_unknown_chunksize_test_arg_reductions_unknown_single_chunksize.None_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reductions.py", "file_name": "test_reductions.py", "file_type": "text/x-python", "category": "test", "start_line": 246, "end_line": 274, "span_ids": ["test_arg_reductions_unknown_chunksize_2d", "test_arg_reductions_unknown_chunksize", "test_arg_reductions_unknown_single_chunksize"], "tokens": 274}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"func\", [\"argmax\", \"nanargmax\"])\ndef test_arg_reductions_unknown_chunksize(func):\n x = da.arange(10, chunks=5)\n x = x[x > 1]\n\n with pytest.raises(ValueError) as info:\n getattr(da, func)(x)\n\n assert \"unknown chunksize\" in str(info.value)\n\n\n@pytest.mark.parametrize(\"func\", [\"argmax\", \"nanargmax\"])\ndef test_arg_reductions_unknown_chunksize_2d(func):\n x = da.ones((10, 10), chunks=(5, 5))\n x = x[x[0, :] > 0, :] # unknown chunks in first dimension only\n\n with pytest.raises(ValueError):\n getattr(da, func)(x, axis=0)\n\n getattr(da, func)(x, axis=1).compute()\n\n\n@pytest.mark.parametrize(\"func\", [\"argmax\", \"nanargmax\"])\ndef test_arg_reductions_unknown_single_chunksize(func):\n x = da.ones((10, 10), chunks=(10, 10))\n x = x[x[0, :] > 0, :] # unknown chunks in first dimension only\n\n getattr(da, func)(x, axis=0).compute()\n getattr(da, func)(x, axis=1).compute()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_reductions_2D_nans_test_reductions_2D_nans.None_9.assert_eq_da_nanargmin_a_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_reductions_2D_nans_test_reductions_2D_nans.None_9.assert_eq_da_nanargmin_a_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reductions.py", "file_name": "test_reductions.py", "file_type": "text/x-python", "category": "test", "start_line": 277, "end_line": 327, "span_ids": ["test_reductions_2D_nans"], "tokens": 749}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_reductions_2D_nans():\n # chunks are a mix of some/all/no NaNs\n x = np.full((4, 4), np.nan)\n x[:2, :2] = np.array([[1, 2], [3, 4]])\n x[2, 2] = 5\n x[3, 3] = 6\n a = da.from_array(x, chunks=(2, 2))\n\n reduction_2d_test(da.sum, a, np.sum, x, False, False)\n reduction_2d_test(da.prod, a, np.prod, x, False, False)\n reduction_2d_test(da.mean, a, np.mean, x, False, False)\n reduction_2d_test(da.var, a, np.var, x, False, False)\n reduction_2d_test(da.std, a, np.std, x, False, False)\n reduction_2d_test(da.min, a, np.min, x, False, False)\n reduction_2d_test(da.max, a, np.max, x, False, False)\n reduction_2d_test(da.any, a, np.any, x, False, False)\n reduction_2d_test(da.all, a, np.all, x, False, False)\n\n reduction_2d_test(da.nansum, a, np.nansum, x, False, False)\n reduction_2d_test(da.nanprod, a, np.nanprod, x, False, False)\n reduction_2d_test(da.nanmean, a, np.nanmean, x, False, False)\n with pytest.warns(None): # division by 0 warning\n reduction_2d_test(da.nanvar, a, np.nanvar, x, False, False)\n with pytest.warns(None): # division by 0 warning\n reduction_2d_test(da.nanstd, a, np.nanstd, x, False, False)\n with pytest.warns(None): # all NaN axis warning\n reduction_2d_test(da.nanmin, a, np.nanmin, x, False, False)\n with pytest.warns(None): # all NaN axis warning\n reduction_2d_test(da.nanmax, a, np.nanmax, x, False, False)\n\n with warnings.catch_warnings():\n # RuntimeWarning: invalid value encountered in reduce\n warnings.simplefilter(\"ignore\", RuntimeWarning)\n assert_eq(da.argmax(a), np.argmax(x))\n assert_eq(da.argmin(a), np.argmin(x))\n\n with pytest.warns(None): # all NaN axis warning\n assert_eq(da.nanargmax(a), np.nanargmax(x))\n with pytest.warns(None): # all NaN axis warning\n assert_eq(da.nanargmin(a), np.nanargmin(x))\n\n with warnings.catch_warnings():\n # RuntimeWarning: invalid value encountered in reduce\n warnings.simplefilter(\"ignore\", RuntimeWarning)\n assert_eq(da.argmax(a, axis=0), np.argmax(x, axis=0))\n assert_eq(da.argmin(a, axis=0), np.argmin(x, axis=0))\n\n with pytest.warns(None): # all NaN axis warning\n assert_eq(da.nanargmax(a, axis=0), np.nanargmax(x, axis=0))\n with pytest.warns(None): # all NaN axis warning\n assert_eq(da.nanargmin(a, axis=0), np.nanargmin(x, axis=0))\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_reductions_2D_nans.None_10_test_reductions_2D_nans.None_12.assert_eq_da_nanargmin_a_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_reductions_2D_nans.None_10_test_reductions_2D_nans.None_12.assert_eq_da_nanargmin_a_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reductions.py", "file_name": "test_reductions.py", "file_type": "text/x-python", "category": "test", "start_line": 329, "end_line": 338, "span_ids": ["test_reductions_2D_nans"], "tokens": 158}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_reductions_2D_nans():\n # ... other code\n\n with warnings.catch_warnings():\n # RuntimeWarning: invalid value encountered in reduce\n warnings.simplefilter(\"ignore\", RuntimeWarning)\n assert_eq(da.argmax(a, axis=1), np.argmax(x, axis=1))\n assert_eq(da.argmin(a, axis=1), np.argmin(x, axis=1))\n\n with pytest.warns(None): # all NaN axis warning\n assert_eq(da.nanargmax(a, axis=1), np.nanargmax(x, axis=1))\n with pytest.warns(None): # all NaN axis warning\n assert_eq(da.nanargmin(a, axis=1), np.nanargmin(x, axis=1))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_moment_test_moment.None_7": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_moment_test_moment.None_7", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reductions.py", "file_name": "test_reductions.py", "file_type": "text/x-python", "category": "test", "start_line": 341, "end_line": 362, "span_ids": ["test_moment"], "tokens": 327}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_moment():\n def moment(x, n, axis=None):\n return ((x - x.mean(axis=axis, keepdims=True)) ** n).sum(\n axis=axis\n ) / np.ones_like(x).sum(axis=axis)\n\n # Poorly conditioned\n x = np.array([1.0, 2.0, 3.0] * 10).reshape((3, 10)) + 1e8\n a = da.from_array(x, chunks=5)\n assert_eq(a.moment(2), moment(x, 2))\n assert_eq(a.moment(3), moment(x, 3))\n assert_eq(a.moment(4), moment(x, 4))\n\n x = np.arange(1, 122).reshape((11, 11)).astype(\"f8\")\n a = da.from_array(x, chunks=(4, 4))\n assert_eq(a.moment(4, axis=1), moment(x, 4, axis=1))\n assert_eq(a.moment(4, axis=(1, 0)), moment(x, 4, axis=(1, 0)))\n\n # Tree reduction\n assert_eq(a.moment(order=4, split_every=4), moment(x, 4))\n assert_eq(a.moment(order=4, axis=0, split_every=4), moment(x, 4, axis=0))\n assert_eq(a.moment(order=4, axis=1, split_every=4), moment(x, 4, axis=1))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_reductions_with_negative_axes_test_reductions_with_negative_axes.assert_eq_a_sum_axis_0_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_reductions_with_negative_axes_test_reductions_with_negative_axes.assert_eq_a_sum_axis_0_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reductions.py", "file_name": "test_reductions.py", "file_type": "text/x-python", "category": "test", "start_line": 365, "end_line": 373, "span_ids": ["test_reductions_with_negative_axes"], "tokens": 111}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_reductions_with_negative_axes():\n x = np.random.random((4, 4, 4))\n a = da.from_array(x, chunks=2)\n\n assert_eq(a.argmin(axis=-1), x.argmin(axis=-1))\n assert_eq(a.argmin(axis=-1, split_every=2), x.argmin(axis=-1))\n\n assert_eq(a.sum(axis=-1), x.sum(axis=-1))\n assert_eq(a.sum(axis=(0, -1)), x.sum(axis=(0, -1)))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_nan_test_nan.assert_eq_np_nanprod_x_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_nan_test_nan.assert_eq_np_nanprod_x_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reductions.py", "file_name": "test_reductions.py", "file_type": "text/x-python", "category": "test", "start_line": 376, "end_line": 389, "span_ids": ["test_nan"], "tokens": 260}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_nan():\n x = np.array([[1, np.nan, 3, 4], [5, 6, 7, np.nan], [9, 10, 11, 12]])\n d = da.from_array(x, chunks=(2, 2))\n\n assert_eq(np.nansum(x), da.nansum(d))\n assert_eq(np.nansum(x, axis=0), da.nansum(d, axis=0))\n assert_eq(np.nanmean(x, axis=1), da.nanmean(d, axis=1))\n assert_eq(np.nanmin(x, axis=1), da.nanmin(d, axis=1))\n assert_eq(np.nanmax(x, axis=(0, 1)), da.nanmax(d, axis=(0, 1)))\n assert_eq(np.nanvar(x), da.nanvar(d))\n assert_eq(np.nanstd(x, axis=0), da.nanstd(d, axis=0))\n assert_eq(np.nanargmin(x, axis=0), da.nanargmin(d, axis=0))\n assert_eq(np.nanargmax(x, axis=0), da.nanargmax(d, axis=0))\n assert_eq(np.nanprod(x), da.nanprod(d))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_nan_object_test_nan_object.with_warnings_catch_warni.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_nan_object_test_nan_object.with_warnings_catch_warni.None_3", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reductions.py", "file_name": "test_reductions.py", "file_type": "text/x-python", "category": "test", "start_line": 392, "end_line": 420, "span_ids": ["test_nan_object"], "tokens": 342}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"func\", [\"nansum\", \"sum\", \"nanmin\", \"min\", \"nanmax\", \"max\"])\ndef test_nan_object(func):\n with warnings.catch_warnings():\n if os.name == \"nt\" and func in {\"min\", \"max\"}:\n # RuntimeWarning: invalid value encountered in reduce in wrapreduction\n # from NumPy.\n warnings.simplefilter(\"ignore\", RuntimeWarning)\n\n x = np.array([[1, np.nan, 3, 4], [5, 6, 7, np.nan], [9, 10, 11, 12]]).astype(\n object\n )\n d = da.from_array(x, chunks=(2, 2))\n\n if func in {\"nanmin\", \"nanmax\"}:\n warnings.simplefilter(\"ignore\", RuntimeWarning)\n\n assert_eq(getattr(np, func)(x, axis=()), getattr(da, func)(d, axis=()))\n\n if func in {\"nanmin\", \"nanmax\"}:\n warnings.simplefilter(\"default\", RuntimeWarning)\n\n if func in {\"min\", \"max\"}:\n warnings.simplefilter(\"ignore\", RuntimeWarning)\n assert_eq(getattr(np, func)(x, axis=0), getattr(da, func)(d, axis=0))\n if os.name != \"nt\" and func in {\"min\", \"max\"}:\n warnings.simplefilter(\"default\", RuntimeWarning)\n\n assert_eq(getattr(np, func)(x, axis=1), getattr(da, func)(d, axis=1))\n assert_eq(getattr(np, func)(x), getattr(da, func)(d))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_0d_array_test_reduction_on_scalar.assert_x_x_all_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_0d_array_test_reduction_on_scalar.assert_x_x_all_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reductions.py", "file_name": "test_reductions.py", "file_type": "text/x-python", "category": "test", "start_line": 423, "end_line": 436, "span_ids": ["test_0d_array", "test_reduction_on_scalar"], "tokens": 131}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_0d_array():\n x = da.mean(da.ones(4, chunks=4), axis=()).compute()\n x = da.mean(da.ones(4, chunks=4), axis=0).compute()\n y = np.mean(np.ones(4))\n assert type(x) == type(y)\n\n x = da.sum(da.zeros(4, chunks=1)).compute()\n y = np.sum(np.zeros(4))\n assert type(x) == type(y)\n\n\ndef test_reduction_on_scalar():\n x = da.from_array(np.array(1.0), chunks=())\n assert (x == x).all()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_reductions_with_empty_array_assert_max_deps.if_eq_.else_.assert_max_map_len_depen": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_reductions_with_empty_array_assert_max_deps.if_eq_.else_.assert_max_map_len_depen", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reductions.py", "file_name": "test_reductions.py", "file_type": "text/x-python", "category": "test", "start_line": 439, "end_line": 459, "span_ids": ["assert_max_deps", "test_reductions_with_empty_array"], "tokens": 217}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_reductions_with_empty_array():\n dx1 = da.ones((10, 0, 5), chunks=4)\n x1 = dx1.compute()\n dx2 = da.ones((0, 0, 0), chunks=4)\n x2 = dx2.compute()\n\n for dx, x in [(dx1, x1), (dx2, x2)]:\n with pytest.warns(None): # empty slice warning\n assert_eq(dx.mean(), x.mean())\n assert_eq(dx.mean(axis=()), x.mean(axis=()))\n assert_eq(dx.mean(axis=0), x.mean(axis=0))\n assert_eq(dx.mean(axis=1), x.mean(axis=1))\n assert_eq(dx.mean(axis=2), x.mean(axis=2))\n\n\ndef assert_max_deps(x, n, eq=True):\n dependencies, dependents = get_deps(x.dask)\n if eq:\n assert max(map(len, dependencies.values())) == n\n else:\n assert max(map(len, dependencies.values())) <= n", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_tree_reduce_depth_test_tree_reduce_depth.None_26": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_tree_reduce_depth_test_tree_reduce_depth.None_26", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reductions.py", "file_name": "test_reductions.py", "file_type": "text/x-python", "category": "test", "start_line": 462, "end_line": 496, "span_ids": ["test_tree_reduce_depth"], "tokens": 646}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_tree_reduce_depth():\n # 2D\n x = da.from_array(np.arange(242).reshape((11, 22)), chunks=(3, 4))\n thresh = {0: 2, 1: 3}\n assert_max_deps(x.sum(split_every=thresh), 2 * 3)\n assert_max_deps(x.sum(axis=(), split_every=thresh), 1)\n assert_max_deps(x.sum(axis=0, split_every=thresh), 2)\n assert_max_deps(x.sum(axis=1, split_every=thresh), 3)\n assert_max_deps(x.sum(split_every=20), 20, False)\n assert_max_deps(x.sum(axis=(), split_every=20), 1)\n assert_max_deps(x.sum(axis=0, split_every=20), 4)\n assert_max_deps(x.sum(axis=1, split_every=20), 6)\n\n # 3D\n x = da.from_array(np.arange(11 * 22 * 29).reshape((11, 22, 29)), chunks=(3, 4, 5))\n thresh = {0: 2, 1: 3, 2: 4}\n assert_max_deps(x.sum(split_every=thresh), 2 * 3 * 4)\n assert_max_deps(x.sum(axis=(), split_every=thresh), 1)\n assert_max_deps(x.sum(axis=0, split_every=thresh), 2)\n assert_max_deps(x.sum(axis=1, split_every=thresh), 3)\n assert_max_deps(x.sum(axis=2, split_every=thresh), 4)\n assert_max_deps(x.sum(axis=(0, 1), split_every=thresh), 2 * 3)\n assert_max_deps(x.sum(axis=(0, 2), split_every=thresh), 2 * 4)\n assert_max_deps(x.sum(axis=(1, 2), split_every=thresh), 3 * 4)\n assert_max_deps(x.sum(split_every=20), 20, False)\n assert_max_deps(x.sum(axis=(), split_every=20), 1)\n assert_max_deps(x.sum(axis=0, split_every=20), 4)\n assert_max_deps(x.sum(axis=1, split_every=20), 6)\n assert_max_deps(x.sum(axis=2, split_every=20), 6)\n assert_max_deps(x.sum(axis=(0, 1), split_every=20), 20, False)\n assert_max_deps(x.sum(axis=(0, 2), split_every=20), 20, False)\n assert_max_deps(x.sum(axis=(1, 2), split_every=20), 20, False)\n assert_max_deps(x.sum(axis=(0, 1), split_every=40), 4 * 6)\n assert_max_deps(x.sum(axis=(0, 2), split_every=40), 4 * 6)\n assert_max_deps(x.sum(axis=(1, 2), split_every=40), 6 * 6)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_tree_reduce_set_options_test_array_reduction_out.assert_eq_x_func_np_ones": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_tree_reduce_set_options_test_array_reduction_out.assert_eq_x_func_np_ones", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reductions.py", "file_name": "test_reductions.py", "file_type": "text/x-python", "category": "test", "start_line": 499, "end_line": 533, "span_ids": ["test_array_reduction_out", "test_tree_reduce_set_options", "test_general_reduction_names", "test_reduction_names"], "tokens": 383}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_tree_reduce_set_options():\n x = da.from_array(np.arange(242).reshape((11, 22)), chunks=(3, 4))\n with config.set(split_every={0: 2, 1: 3}):\n assert_max_deps(x.sum(), 2 * 3)\n assert_max_deps(x.sum(axis=()), 1)\n assert_max_deps(x.sum(axis=0), 2)\n\n\ndef test_reduction_names():\n x = da.ones(5, chunks=(2,))\n assert x.sum().name.startswith(\"sum\")\n assert \"max\" in x.max().name.split(\"-\")[0]\n assert x.var().name.startswith(\"var\")\n assert x.all().name.startswith(\"all\")\n assert any(k[0].startswith(\"nansum\") for k in da.nansum(x).dask)\n assert x.mean().name.startswith(\"mean\")\n\n\ndef test_general_reduction_names():\n dtype = int\n a = da.reduction(\n da.ones(10, dtype, chunks=2), np.sum, np.sum, dtype=dtype, name=\"foo\"\n )\n names, tokens = list(zip_longest(*[key[0].rsplit(\"-\", 1) for key in a.dask]))\n assert set(names) == {\"ones\", \"foo\", \"foo-partial\", \"foo-aggregate\"}\n assert all(tokens)\n\n\n@pytest.mark.filterwarnings(\"ignore:`argmax` is not implemented by dask\")\n@pytest.mark.parametrize(\"func\", [np.sum, np.argmax])\ndef test_array_reduction_out(func):\n x = da.arange(10, chunks=(5,))\n y = da.ones((10, 10), chunks=(4, 4))\n func(y, axis=0, out=x)\n assert_eq(x, func(np.ones((10, 10)), axis=0))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_array_cumreduction_axis_test_array_cumreduction_out.assert_eq_x_func_np_ones": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_array_cumreduction_axis_test_array_cumreduction_out.assert_eq_x_func_np_ones", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reductions.py", "file_name": "test_reductions.py", "file_type": "text/x-python", "category": "test", "start_line": 536, "end_line": 560, "span_ids": ["test_array_cumreduction_axis", "test_array_cumreduction_out"], "tokens": 269}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"func\", [\"cumsum\", \"cumprod\", \"nancumsum\", \"nancumprod\"])\n@pytest.mark.parametrize(\"use_nan\", [False, True])\n@pytest.mark.parametrize(\"axis\", [None, 0, 1, -1])\n@pytest.mark.parametrize(\"method\", [\"sequential\", \"blelloch\"])\ndef test_array_cumreduction_axis(func, use_nan, axis, method):\n np_func = getattr(np, func)\n da_func = getattr(da, func)\n\n s = (10, 11, 12)\n a = np.arange(np.prod(s)).reshape(s)\n if use_nan:\n a[1] = np.nan\n d = da.from_array(a, chunks=(4, 5, 6))\n\n a_r = np_func(a, axis=axis)\n d_r = da_func(d, axis=axis, method=method)\n\n assert_eq(a_r, d_r)\n\n\n@pytest.mark.parametrize(\"func\", [np.cumsum, np.cumprod])\ndef test_array_cumreduction_out(func):\n x = da.ones((10, 10), chunks=(4, 4))\n func(x, axis=0, out=x)\n assert_eq(x, func(np.ones((10, 10)), axis=0))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_topk_argtopk1_test_topk_argtopk1.None_1.daskfunc_b_k_axis_3_s": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_topk_argtopk1_test_topk_argtopk1.None_1.daskfunc_b_k_axis_3_s", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reductions.py", "file_name": "test_reductions.py", "file_type": "text/x-python", "category": "test", "start_line": 562, "end_line": 613, "span_ids": ["test_topk_argtopk1"], "tokens": 595}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"npfunc,daskfunc\", [(np.sort, da.topk), (np.argsort, da.argtopk)]\n)\n@pytest.mark.parametrize(\"split_every\", [None, 2, 4, 8])\ndef test_topk_argtopk1(npfunc, daskfunc, split_every):\n # Test data\n k = 5\n # Test at least 3 levels of aggregation when split_every=2\n # to stress the different chunk, combine, aggregate kernels\n npa = np.random.random(800)\n npb = np.random.random((10, 20, 30))\n\n a = da.from_array(npa, chunks=((120, 80, 100, 200, 300),))\n b = da.from_array(npb, chunks=(4, 8, 8))\n\n # 1-dimensional arrays\n # top 5 elements, sorted descending\n assert_eq(npfunc(npa)[-k:][::-1], daskfunc(a, k, split_every=split_every))\n # bottom 5 elements, sorted ascending\n assert_eq(npfunc(npa)[:k], daskfunc(a, -k, split_every=split_every))\n\n # n-dimensional arrays\n # also testing when k > chunk\n # top 5 elements, sorted descending\n assert_eq(\n npfunc(npb, axis=0)[-k:, :, :][::-1, :, :],\n daskfunc(b, k, axis=0, split_every=split_every),\n )\n assert_eq(\n npfunc(npb, axis=1)[:, -k:, :][:, ::-1, :],\n daskfunc(b, k, axis=1, split_every=split_every),\n )\n assert_eq(\n npfunc(npb, axis=-1)[:, :, -k:][:, :, ::-1],\n daskfunc(b, k, axis=-1, split_every=split_every),\n )\n with pytest.raises(ValueError):\n daskfunc(b, k, axis=3, split_every=split_every)\n\n # bottom 5 elements, sorted ascending\n assert_eq(\n npfunc(npb, axis=0)[:k, :, :], daskfunc(b, -k, axis=0, split_every=split_every)\n )\n assert_eq(\n npfunc(npb, axis=1)[:, :k, :], daskfunc(b, -k, axis=1, split_every=split_every)\n )\n assert_eq(\n npfunc(npb, axis=-1)[:, :, :k],\n daskfunc(b, -k, axis=-1, split_every=split_every),\n )\n with pytest.raises(ValueError):\n daskfunc(b, -k, axis=3, split_every=split_every)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_topk_argtopk2_test_topk_argtopk2.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_topk_argtopk2_test_topk_argtopk2.None_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reductions.py", "file_name": "test_reductions.py", "file_type": "text/x-python", "category": "test", "start_line": 616, "end_line": 630, "span_ids": ["test_topk_argtopk2"], "tokens": 210}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"npfunc,daskfunc\", [(np.sort, da.topk), (np.argsort, da.argtopk)]\n)\n@pytest.mark.parametrize(\"split_every\", [None, 2, 3, 4])\n@pytest.mark.parametrize(\"chunksize\", [1, 2, 3, 4, 5, 10])\ndef test_topk_argtopk2(npfunc, daskfunc, split_every, chunksize):\n \"\"\"Fine test use cases when k is larger than chunk size\"\"\"\n npa = np.random.random((10,))\n a = da.from_array(npa, chunks=chunksize)\n k = 5\n\n # top 5 elements, sorted descending\n assert_eq(npfunc(npa)[-k:][::-1], daskfunc(a, k, split_every=split_every))\n # bottom 5 elements, sorted ascending\n assert_eq(npfunc(npa)[:k], daskfunc(a, -k, split_every=split_every))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_topk_argtopk3_test_topk_argtopk3.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_topk_argtopk3_test_topk_argtopk3.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reductions.py", "file_name": "test_reductions.py", "file_type": "text/x-python", "category": "test", "start_line": 633, "end_line": 640, "span_ids": ["test_topk_argtopk3"], "tokens": 116}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_topk_argtopk3():\n a = da.random.random((10, 20, 30), chunks=(4, 8, 8))\n\n # As Array methods\n assert_eq(a.topk(5, axis=1, split_every=2), da.topk(a, 5, axis=1, split_every=2))\n assert_eq(\n a.argtopk(5, axis=1, split_every=2), da.argtopk(a, 5, axis=1, split_every=2)\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_regres_3940_test_regres_3940.if_func_not_in_da_cumsum.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_regres_3940_test_regres_3940.if_func_not_in_da_cumsum.None_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reductions.py", "file_name": "test_reductions.py", "file_type": "text/x-python", "category": "test", "start_line": 644, "end_line": 660, "span_ids": ["test_regres_3940"], "tokens": 238}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"func\",\n [da.cumsum, da.cumprod, da.argmin, da.argmax, da.min, da.max, da.nansum, da.nanmax],\n)\n@pytest.mark.parametrize(\"method\", [\"sequential\", \"blelloch\"])\ndef test_regres_3940(func, method):\n if func in {da.cumsum, da.cumprod}:\n kwargs = {\"method\": method}\n else:\n kwargs = {}\n a = da.ones((5, 2), chunks=(2, 2))\n assert func(a, **kwargs).name != func(a + 1, **kwargs).name\n assert func(a, axis=0, **kwargs).name != func(a, **kwargs).name\n assert func(a, axis=0, **kwargs).name != func(a, axis=1, **kwargs).name\n if func not in {da.cumsum, da.cumprod, da.argmin, da.argmax}:\n assert func(a, axis=()).name != func(a).name\n assert func(a, axis=()).name != func(a, axis=0).name", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_trace_test_trace.None_13": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_trace_test_trace.None_13", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reductions.py", "file_name": "test_reductions.py", "file_type": "text/x-python", "category": "test", "start_line": 657, "end_line": 679, "span_ids": ["test_trace"], "tokens": 283}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_trace():\n def _assert(a, b, *args, **kwargs):\n return assert_eq(a.trace(*args, **kwargs), b.trace(*args, **kwargs))\n\n b = np.arange(12).reshape((3, 4))\n a = da.from_array(b, 1)\n _assert(a, b)\n _assert(a, b, 0)\n _assert(a, b, 1)\n _assert(a, b, -1)\n\n b = np.arange(8).reshape((2, 2, 2))\n a = da.from_array(b, 2)\n _assert(a, b)\n _assert(a, b, 0)\n _assert(a, b, 1)\n _assert(a, b, -1)\n _assert(a, b, 0, 0, 1)\n _assert(a, b, 0, 0, 2)\n _assert(a, b, 0, 1, 2, int)\n _assert(a, b, 0, 1, 2, float)\n _assert(a, b, offset=1, axis1=0, axis2=2, dtype=int)\n _assert(a, b, offset=1, axis1=0, axis2=2, dtype=float)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reshape.py_pytest_test_reshape_rechunk.assert_np_prod_list_map_l": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reshape.py_pytest_test_reshape_rechunk.assert_np_prod_list_map_l", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reshape.py", "file_name": "test_reshape.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 51, "span_ids": ["imports", "test_reshape_rechunk"], "tokens": 843}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\nimport numpy as np\nimport dask.array as da\nfrom dask.array.reshape import reshape_rechunk, expand_tuple, contract_tuple\nfrom dask.array.utils import assert_eq\n\n\n@pytest.mark.parametrize(\n \"inshape,outshape,prechunks,inchunks,outchunks\",\n [\n ((4,), (4,), ((2, 2),), ((2, 2),), ((2, 2),)),\n ((4,), (2, 2), ((2, 2),), ((2, 2),), ((1, 1), (2,))),\n ((4,), (4, 1), ((2, 2),), ((2, 2),), ((2, 2), (1,))),\n ((4,), (1, 4), ((2, 2),), ((2, 2),), ((1,), (2, 2))),\n ((1, 4), (4,), ((1,), (2, 2)), ((1,), (2, 2)), ((2, 2),)),\n ((4, 1), (4,), ((2, 2), (1,)), ((2, 2), (1,)), ((2, 2),)),\n (\n (4, 1, 4),\n (4, 4),\n ((2, 2), (1,), (2, 2)),\n ((2, 2), (1,), (2, 2)),\n ((2, 2), (2, 2)),\n ),\n ((4, 4), (4, 1, 4), ((2, 2), (2, 2)), ((2, 2), (2, 2)), ((2, 2), (1,), (2, 2))),\n ((2, 2), (4,), ((2,), (2,)), ((2,), (2,)), ((4,),)),\n ((2, 2), (4,), ((1, 1), (2,)), ((1, 1), (2,)), ((2, 2),)),\n ((2, 2), (4,), ((2,), (1, 1)), ((1, 1), (2,)), ((2, 2),)),\n (\n (64,),\n (4, 4, 4),\n ((8, 8, 8, 8, 8, 8, 8, 8),),\n ((16, 16, 16, 16),),\n ((1, 1, 1, 1), (4,), (4,)),\n ),\n ((64,), (4, 4, 4), ((32, 32),), ((32, 32),), ((2, 2), (4,), (4,))),\n ((64,), (4, 4, 4), ((16, 48),), ((16, 48),), ((1, 3), (4,), (4,))),\n ((64,), (4, 4, 4), ((20, 44),), ((16, 48),), ((1, 3), (4,), (4,))),\n (\n (64, 4),\n (8, 8, 4),\n ((16, 16, 16, 16), (2, 2)),\n ((16, 16, 16, 16), (2, 2)),\n ((2, 2, 2, 2), (8,), (2, 2)),\n ),\n ],\n)\ndef test_reshape_rechunk(inshape, outshape, prechunks, inchunks, outchunks):\n result_in, result_out = reshape_rechunk(inshape, outshape, prechunks)\n assert result_in == inchunks\n assert result_out == outchunks\n assert np.prod(list(map(len, result_in))) == np.prod(list(map(len, result_out)))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reshape.py_test_expand_tuple_test_expand_tuple.assert_expand_tuple_7_4": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reshape.py_test_expand_tuple_test_expand_tuple.assert_expand_tuple_7_4", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reshape.py", "file_name": "test_reshape.py", "file_type": "text/x-python", "category": "test", "start_line": 54, "end_line": 58, "span_ids": ["test_expand_tuple"], "tokens": 118}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_expand_tuple():\n assert expand_tuple((2, 4), 2) == (1, 1, 2, 2)\n assert expand_tuple((2, 4), 3) == (1, 1, 1, 1, 2)\n assert expand_tuple((3, 4), 2) == (1, 2, 2, 2)\n assert expand_tuple((7, 4), 3) == (2, 2, 3, 1, 1, 2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reshape.py_test_contract_tuple_test_contract_tuple.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reshape.py_test_contract_tuple_test_contract_tuple.None_3", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reshape.py", "file_name": "test_reshape.py", "file_type": "text/x-python", "category": "test", "start_line": 61, "end_line": 65, "span_ids": ["test_contract_tuple"], "tokens": 112}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_contract_tuple():\n assert contract_tuple((1, 1, 2, 3, 1), 2) == (2, 2, 2, 2)\n assert contract_tuple((1, 1, 2, 5, 1), 2) == (2, 2, 4, 2)\n assert contract_tuple((2, 4), 2) == (2, 4)\n assert contract_tuple((2, 4), 3) == (6,)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_itertools_test_array.assert_isinstance_y_da_A": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_itertools_test_array.assert_isinstance_y_da_A", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 23, "span_ids": ["imports", "test_array"], "tokens": 183}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import itertools\nfrom numbers import Number\n\nimport pytest\nfrom distutils.version import LooseVersion\n\nnp = pytest.importorskip(\"numpy\")\n\nimport dask.array as da\nfrom dask.utils import ignoring\nfrom dask.array.utils import assert_eq, same_keys, AxisError, IS_NEP18_ACTIVE\nfrom dask.array.numpy_compat import _numpy_115\n\n\ndef test_array():\n x = np.ones(5, dtype=\"i4\")\n d = da.ones(5, chunks=3, dtype=\"i4\")\n assert_eq(da.array(d, ndmin=3, dtype=\"i8\"), np.array(x, ndmin=3, dtype=\"i8\"))\n\n # regression #1847 this shall not raise an exception.\n x = da.ones((100, 3), chunks=10)\n y = da.array(x)\n assert isinstance(y, da.Array)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_array_return_type_test_atleast_nd_no_args.assert_np_r_n_da_r_n": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_array_return_type_test_atleast_nd_no_args.assert_np_r_n_da_r_n", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 26, "end_line": 47, "span_ids": ["test_array_return_type", "test_atleast_nd_no_args", "test_derived_docstrings"], "tokens": 187}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_array_return_type():\n # Regression test for https://github.com/dask/dask/issues/5426\n x = [0, 1, 2, 3]\n dx = da.array(x)\n assert isinstance(dx, da.Array)\n assert_eq(x, dx)\n\n\ndef test_derived_docstrings():\n assert \"This docstring was copied from numpy.array\" in da.routines.array.__doc__\n assert \"Create an array.\" in da.routines.array.__doc__\n\n\n@pytest.mark.parametrize(\"funcname\", [\"atleast_1d\", \"atleast_2d\", \"atleast_3d\"])\ndef test_atleast_nd_no_args(funcname):\n np_func = getattr(np, funcname)\n da_func = getattr(da, funcname)\n\n np_r_n = np_func()\n da_r_n = da_func()\n\n assert np_r_n == da_r_n", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_atleast_nd_one_arg_test_atleast_nd_one_arg.assert_eq_np_r_da_r_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_atleast_nd_one_arg_test_atleast_nd_one_arg.assert_eq_np_r_da_r_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 50, "end_line": 71, "span_ids": ["test_atleast_nd_one_arg"], "tokens": 198}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"funcname\", [\"atleast_1d\", \"atleast_2d\", \"atleast_3d\"])\n@pytest.mark.parametrize(\n \"shape, chunks\",\n [\n (tuple(), tuple()),\n ((4,), (2,)),\n ((4, 6), (2, 3)),\n ((4, 6, 8), (2, 3, 4)),\n ((4, 6, 8, 10), (2, 3, 4, 5)),\n ],\n)\ndef test_atleast_nd_one_arg(funcname, shape, chunks):\n np_a = np.random.random(shape)\n da_a = da.from_array(np_a, chunks=chunks)\n\n np_func = getattr(np, funcname)\n da_func = getattr(da, funcname)\n\n np_r = np_func(np_a)\n da_r = da_func(da_a)\n\n assert_eq(np_r, da_r)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_atleast_nd_two_args_test_atleast_nd_two_args.for_np_r_da_r_in_zip_np_.assert_eq_np_r_da_r_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_atleast_nd_two_args_test_atleast_nd_two_args.for_np_r_da_r_in_zip_np_.assert_eq_np_r_da_r_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 74, "end_line": 104, "span_ids": ["test_atleast_nd_two_args"], "tokens": 315}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"funcname\", [\"atleast_1d\", \"atleast_2d\", \"atleast_3d\"])\n@pytest.mark.parametrize(\n \"shape1, shape2\",\n list(\n itertools.combinations_with_replacement(\n [tuple(), (4,), (4, 6), (4, 6, 8), (4, 6, 8, 10)], 2\n )\n ),\n)\ndef test_atleast_nd_two_args(funcname, shape1, shape2):\n np_a_1 = np.random.random(shape1)\n da_a_1 = da.from_array(np_a_1, chunks=tuple(c // 2 for c in shape1))\n\n np_a_2 = np.random.random(shape2)\n da_a_2 = da.from_array(np_a_2, chunks=tuple(c // 2 for c in shape2))\n\n np_a_n = [np_a_1, np_a_2]\n da_a_n = [da_a_1, da_a_2]\n\n np_func = getattr(np, funcname)\n da_func = getattr(da, funcname)\n\n np_r_n = np_func(*np_a_n)\n da_r_n = da_func(*da_a_n)\n\n assert type(np_r_n) is type(da_r_n)\n\n assert len(np_r_n) == len(da_r_n)\n\n for np_r, da_r in zip(np_r_n, da_r_n):\n assert_eq(np_r, da_r)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_transpose_test_transpose.None_1.d_transpose_1_2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_transpose_test_transpose.None_1.d_transpose_1_2_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 107, "end_line": 121, "span_ids": ["test_transpose"], "tokens": 172}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_transpose():\n x = np.arange(240).reshape((4, 6, 10))\n d = da.from_array(x, (2, 3, 4))\n\n assert_eq(d.transpose((2, 0, 1)), x.transpose((2, 0, 1)))\n assert same_keys(d.transpose((2, 0, 1)), d.transpose((2, 0, 1)))\n\n assert_eq(d.transpose(2, 0, 1), x.transpose(2, 0, 1))\n assert same_keys(d.transpose(2, 0, 1), d.transpose(2, 0, 1))\n\n with pytest.raises(ValueError):\n d.transpose(1, 2)\n\n with pytest.raises(ValueError):\n d.transpose((1, 2))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_transpose_negative_axes_test_transpose_skip_when_possible.assert_x_transpose_3_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_transpose_negative_axes_test_transpose_skip_when_possible.assert_x_transpose_3_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 124, "end_line": 134, "span_ids": ["test_transpose_negative_axes", "test_transpose_skip_when_possible"], "tokens": 134}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_transpose_negative_axes():\n x = np.ones((2, 3, 4, 5))\n y = da.ones((2, 3, 4, 5), chunks=3)\n\n assert_eq(x.transpose([-1, -2, 0, 1]), y.transpose([-1, -2, 0, 1]))\n\n\ndef test_transpose_skip_when_possible():\n x = da.ones((2, 3, 4), chunks=3)\n assert x.transpose((0, 1, 2)) is x\n assert x.transpose((-3, -2, -1)) is x", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_swapaxes_test_swapaxes.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_swapaxes_test_swapaxes.None_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 137, "end_line": 150, "span_ids": ["test_swapaxes"], "tokens": 247}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_swapaxes():\n x = np.random.normal(0, 10, size=(10, 12, 7))\n d = da.from_array(x, chunks=(4, 5, 2))\n\n assert_eq(np.swapaxes(x, 0, 1), da.swapaxes(d, 0, 1))\n assert_eq(np.swapaxes(x, 2, 1), da.swapaxes(d, 2, 1))\n assert_eq(x.swapaxes(2, 1), d.swapaxes(2, 1))\n assert_eq(x.swapaxes(0, 0), d.swapaxes(0, 0))\n assert_eq(x.swapaxes(1, 2), d.swapaxes(1, 2))\n assert_eq(x.swapaxes(0, -1), d.swapaxes(0, -1))\n assert_eq(x.swapaxes(-1, 1), d.swapaxes(-1, 1))\n\n assert d.swapaxes(0, 1).name == d.swapaxes(0, 1).name\n assert d.swapaxes(0, 1).name != d.swapaxes(1, 0).name", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_moveaxis_rollaxis_test_moveaxis_rollaxis.for_axis1_in_range_x_ndi.for_axis2_in_range_x_ndi.assert_eq_np_func_x_axis": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_moveaxis_rollaxis_test_moveaxis_rollaxis.for_axis1_in_range_x_ndi.for_axis2_in_range_x_ndi.assert_eq_np_func_x_axis", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 153, "end_line": 163, "span_ids": ["test_moveaxis_rollaxis"], "tokens": 160}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"funcname\", [\"moveaxis\", \"rollaxis\"])\n@pytest.mark.parametrize(\"shape\", [(), (5,), (3, 5, 7, 3)])\ndef test_moveaxis_rollaxis(funcname, shape):\n x = np.random.random(shape)\n d = da.from_array(x, chunks=(len(shape) * (2,)))\n np_func = getattr(np, funcname)\n da_func = getattr(da, funcname)\n for axis1 in range(-x.ndim, x.ndim):\n assert isinstance(da_func(d, 0, axis1), da.Array)\n for axis2 in range(-x.ndim, x.ndim):\n assert_eq(np_func(x, axis1, axis2), da_func(d, axis1, axis2))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_moveaxis_rollaxis_keyword_test_moveaxis_rollaxis_numpy_api.assert_eq_result_np_roll": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_moveaxis_rollaxis_keyword_test_moveaxis_rollaxis_numpy_api.assert_eq_result_np_roll", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 166, "end_line": 185, "span_ids": ["test_moveaxis_rollaxis_keyword", "test_moveaxis_rollaxis_numpy_api"], "tokens": 242}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_moveaxis_rollaxis_keyword():\n x = np.random.random((10, 12, 7))\n d = da.from_array(x, chunks=(4, 5, 2))\n assert_eq(\n np.moveaxis(x, destination=1, source=0), da.moveaxis(d, destination=1, source=0)\n )\n assert_eq(np.rollaxis(x, 2), da.rollaxis(d, 2))\n assert isinstance(da.rollaxis(d, 1), da.Array)\n assert_eq(np.rollaxis(x, start=1, axis=2), da.rollaxis(d, start=1, axis=2))\n\n\ndef test_moveaxis_rollaxis_numpy_api():\n a = da.random.random((4, 4, 4), chunks=2)\n result = np.moveaxis(a, 2, 0)\n assert isinstance(result, da.Array)\n assert_eq(result, np.moveaxis(a.compute(), 2, 0))\n\n result = np.rollaxis(a, 2, 0)\n assert isinstance(result, da.Array)\n assert_eq(result, np.rollaxis(a.compute(), 2, 0))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_flip_test_flip.try_.else_.assert_eq_np_r_da_r_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_flip_test_flip.try_.else_.assert_eq_np_r_da_r_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 188, "end_line": 223, "span_ids": ["test_flip"], "tokens": 272}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"funcname, kwargs\",\n [\n (\"flipud\", {}),\n (\"fliplr\", {}),\n (\"flip\", {\"axis\": 0}),\n (\"flip\", {\"axis\": 1}),\n (\"flip\", {\"axis\": 2}),\n (\"flip\", {\"axis\": -1}),\n ],\n)\n@pytest.mark.parametrize(\"shape\", [tuple(), (4,), (4, 6), (4, 6, 8), (4, 6, 8, 10)])\ndef test_flip(funcname, kwargs, shape):\n axis = kwargs.get(\"axis\")\n if axis is None:\n if funcname == \"flipud\":\n axis = 0\n elif funcname == \"fliplr\":\n axis = 1\n\n np_a = np.random.random(shape)\n da_a = da.from_array(np_a, chunks=1)\n\n np_func = getattr(np, funcname)\n da_func = getattr(da, funcname)\n\n try:\n range(np_a.ndim)[axis]\n except IndexError:\n with pytest.raises(ValueError):\n da_func(da_a, **kwargs)\n else:\n np_r = np_func(np_a, **kwargs)\n da_r = da_func(da_a, **kwargs)\n\n assert_eq(np_r, da_r)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_matmul_test_matmul.for_d1_d2_in_itertools_p.if_x_ndim_0_or_y_ndim_.else_.assert_eq_expected_da_ma": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_matmul_test_matmul.for_d1_d2_in_itertools_p.if_x_ndim_0_or_y_ndim_.else_.assert_eq_expected_da_ma", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 226, "end_line": 276, "span_ids": ["test_matmul"], "tokens": 540}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"x_shape, y_shape\",\n [\n [(), ()],\n [(), (7,)],\n [(), (7, 11)],\n [(), (7, 11, 15)],\n [(), (7, 11, 15, 19)],\n [(7,), ()],\n [(7,), (7,)],\n [(11,), (11, 7)],\n [(15,), (7, 15, 11)],\n [(19,), (7, 11, 19, 15)],\n [(7, 11), ()],\n [(7, 11), (11,)],\n [(7, 11), (11, 7)],\n [(11, 15), (7, 15, 11)],\n [(15, 19), (7, 11, 19, 15)],\n [(7, 11, 15), ()],\n [(7, 11, 15), (15,)],\n [(7, 11, 15), (15, 7)],\n [(7, 11, 15), (7, 15, 11)],\n [(11, 15, 19), (7, 11, 19, 15)],\n [(7, 11, 15, 19), ()],\n [(7, 11, 15, 19), (19,)],\n [(7, 11, 15, 19), (19, 7)],\n [(7, 11, 15, 19), (11, 19, 13)],\n [(7, 11, 15, 19), (7, 11, 19, 15)],\n ],\n)\ndef test_matmul(x_shape, y_shape):\n np.random.seed(3732)\n\n x = np.random.random(x_shape)[()]\n y = np.random.random(y_shape)[()]\n\n a = da.from_array(x, chunks=tuple((i // 2) for i in x.shape))\n b = da.from_array(y, chunks=tuple((i // 2) for i in y.shape))\n\n expected = None\n try:\n expected = np.matmul(x, y)\n except ValueError:\n pass\n\n for d1, d2 in itertools.product([a, x], [b, y]):\n if x.ndim == 0 or y.ndim == 0:\n with pytest.raises(ValueError):\n da.matmul(d1, d2)\n else:\n assert_eq(expected, da.matmul(d1, d2))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_tensordot_test_tensordot.with_pytest_warns_da_Perf.assert_not_same_keys_da_t": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_tensordot_test_tensordot.with_pytest_warns_da_Perf.assert_not_same_keys_da_t", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 279, "end_line": 294, "span_ids": ["test_tensordot"], "tokens": 253}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_tensordot():\n x = np.arange(400).reshape((20, 20))\n a = da.from_array(x, chunks=(5, 4))\n y = np.arange(200).reshape((20, 10))\n b = da.from_array(y, chunks=(4, 5))\n\n for axes in [1, (1, 0)]:\n assert_eq(da.tensordot(a, b, axes=axes), np.tensordot(x, y, axes=axes))\n assert_eq(da.tensordot(x, b, axes=axes), np.tensordot(x, y, axes=axes))\n assert_eq(da.tensordot(a, y, axes=axes), np.tensordot(x, y, axes=axes))\n\n assert same_keys(da.tensordot(a, b, axes=(1, 0)), da.tensordot(a, b, axes=(1, 0)))\n\n # Increasing number of chunks warning\n with pytest.warns(da.PerformanceWarning):\n assert not same_keys(da.tensordot(a, b, axes=0), da.tensordot(a, b, axes=1))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_tensordot_2_test_tensordot_2.assert_eq_da_tensordot_y_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_tensordot_2_test_tensordot_2.assert_eq_da_tensordot_y_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 297, "end_line": 304, "span_ids": ["test_tensordot_2"], "tokens": 139}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"axes\", [0, 1, (0, 1), (1, 0), ((1, 0), (2, 1)), ((1, 2), (2, 0)), ((2, 0), (1, 2))]\n)\ndef test_tensordot_2(axes):\n x = np.arange(4 * 4 * 4).reshape((4, 4, 4))\n y = da.from_array(x, chunks=2)\n\n assert_eq(da.tensordot(y, y, axes=axes), np.tensordot(x, x, axes=axes))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_tensordot_double_contraction_neq2_test_tensordot_double_contraction_neq2.assert_eq_da_tensordot_y_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_tensordot_double_contraction_neq2_test_tensordot_double_contraction_neq2.assert_eq_da_tensordot_y_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 307, "end_line": 312, "span_ids": ["test_tensordot_double_contraction_neq2"], "tokens": 122}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"chunks\", [\"auto\", (4, 6), (2, 3), (4, 3), (2, 6)])\ndef test_tensordot_double_contraction_neq2(chunks):\n # Regression test for https://github.com/dask/dask/issues/5472\n x = np.arange(24).reshape(4, 6)\n y = da.from_array(x, chunks=chunks)\n assert_eq(da.tensordot(y, y, axes=2), np.tensordot(x, x, axes=2))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_tensordot_double_contraction_ngt2_test_tensordot_double_contraction_ngt2.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_tensordot_double_contraction_ngt2_test_tensordot_double_contraction_ngt2.None_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 315, "end_line": 329, "span_ids": ["test_tensordot_double_contraction_ngt2"], "tokens": 205}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_tensordot_double_contraction_ngt2():\n # Regression test for https://github.com/dask/dask/issues/5472\n x = np.arange(60.0).reshape(3, 4, 5)\n y = np.arange(60.0).reshape(4, 5, 3)\n u = da.from_array(x)\n v = da.from_array(y)\n\n assert_eq(da.tensordot(u, v, axes=2), np.tensordot(x, y, axes=2))\n\n x = np.arange(60.0).reshape(3, 4, 5)\n y = np.arange(60.0).reshape(4, 5, 3)\n u = da.from_array(x, chunks=3)\n v = da.from_array(y)\n\n assert_eq(da.tensordot(u, v, axes=2), np.tensordot(x, y, axes=2))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_tensordot_more_than_26_dims_test_dot_method.assert_eq_a_dot_b_x_dot": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_tensordot_more_than_26_dims_test_dot_method.assert_eq_a_dot_b_x_dot", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 332, "end_line": 345, "span_ids": ["test_tensordot_more_than_26_dims", "test_dot_method"], "tokens": 146}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_tensordot_more_than_26_dims():\n ndim = 27\n x = np.broadcast_to(1, [2] * ndim)\n dx = da.from_array(x, chunks=-1)\n assert_eq(da.tensordot(dx, dx, ndim), np.array(2 ** ndim))\n\n\ndef test_dot_method():\n x = np.arange(400).reshape((20, 20))\n a = da.from_array(x, chunks=(5, 5))\n y = np.arange(200).reshape((20, 10))\n b = da.from_array(y, chunks=(5, 5))\n\n assert_eq(a.dot(b), x.dot(y))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_vdot_test_vdot.assert_eq_da_vdot_a_b_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_vdot_test_vdot.assert_eq_da_vdot_a_b_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 348, "end_line": 363, "span_ids": ["test_vdot"], "tokens": 192}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"shape, chunks\", [((20,), (6,)), ((4, 5), (2, 3))])\ndef test_vdot(shape, chunks):\n np.random.seed(1337)\n\n x = 2 * np.random.random((2,) + shape) - 1\n x = x[0] + 1j * x[1]\n\n y = 2 * np.random.random((2,) + shape) - 1\n y = y[0] + 1j * y[1]\n\n a = da.from_array(x, chunks=chunks)\n b = da.from_array(y, chunks=chunks)\n\n assert_eq(np.vdot(x, y), da.vdot(a, b))\n assert_eq(np.vdot(y, x), da.vdot(b, a))\n assert_eq(da.vdot(a, b), da.vdot(b, a).conj())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_outer_test_outer.assert_eq_np_outer_y_x_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_outer_test_outer.assert_eq_np_outer_y_x_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 366, "end_line": 377, "span_ids": ["test_outer"], "tokens": 134}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"shape1, shape2\", [((20,), (6,)), ((4, 5), (2, 3))])\ndef test_outer(shape1, shape2):\n np.random.seed(1337)\n\n x = 2 * np.random.random(shape1) - 1\n y = 2 * np.random.random(shape2) - 1\n\n a = da.from_array(x, chunks=3)\n b = da.from_array(y, chunks=3)\n\n assert_eq(np.outer(x, y), da.outer(a, b))\n assert_eq(np.outer(y, x), da.outer(b, a))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_apply_along_axis_test_apply_along_axis.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_apply_along_axis_test_apply_along_axis.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 380, "end_line": 412, "span_ids": ["test_apply_along_axis"], "tokens": 336}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"func1d_name, func1d, specify_output_props\",\n [\n [\"ndim\", lambda x: x.ndim, False],\n [\"sum\", lambda x: x.sum(), False],\n [\"range\", lambda x: [x.min(), x.max()], False],\n [\"range2\", lambda x: [[x.min(), x.max()], [x.max(), x.min()]], False],\n [\"cumsum\", lambda x: np.cumsum(x), True],\n ],\n)\n@pytest.mark.parametrize(\n \"input_shape, axis\",\n [[(10, 15, 20), 0], [(10, 15, 20), 1], [(10, 15, 20), 2], [(10, 15, 20), -1]],\n)\ndef test_apply_along_axis(func1d_name, func1d, specify_output_props, input_shape, axis):\n a = np.random.randint(0, 10, input_shape)\n d = da.from_array(a, chunks=(len(input_shape) * (5,)))\n\n output_shape = None\n output_dtype = None\n\n if specify_output_props:\n slices = [0] * a.ndim\n slices[axis] = slice(None)\n slices = tuple(slices)\n sample = np.array(func1d(a[slices]))\n output_shape = sample.shape\n output_dtype = sample.dtype\n\n assert_eq(\n da.apply_along_axis(func1d, axis, d, dtype=output_dtype, shape=output_shape),\n np.apply_along_axis(func1d, axis, a),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_apply_over_axes_test_apply_over_axes.assert_eq_da_apply_over_a": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_apply_over_axes_test_apply_over_axes.assert_eq_da_apply_over_a", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 415, "end_line": 443, "span_ids": ["test_apply_over_axes"], "tokens": 256}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"func_name, func\",\n [\n [\"sum0\", lambda x, axis: x.sum(axis=axis)],\n [\"sum1\", lambda x, axis: x.sum(axis=axis, keepdims=True)],\n [\n \"range\",\n lambda x, axis: np.concatenate(\n [x.min(axis=axis, keepdims=True), x.max(axis=axis, keepdims=True)],\n axis=axis,\n ),\n ],\n ],\n)\n@pytest.mark.parametrize(\n \"shape, axes\",\n [\n [(10, 15, 20), tuple()],\n [(10, 15, 20), 0],\n [(10, 15, 20), (1,)],\n [(10, 15, 20), (-1, 1)],\n [(10, 15, 20), (2, 0, 1)],\n ],\n)\ndef test_apply_over_axes(func_name, func, shape, axes):\n a = np.random.randint(0, 10, shape)\n d = da.from_array(a, chunks=(len(shape) * (5,)))\n\n assert_eq(da.apply_over_axes(func, d, axes), np.apply_over_axes(func, a, axes))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_ptp_test_ptp.assert_eq_da_ptp_d_axis_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_ptp_test_ptp.assert_eq_da_ptp_d_axis_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 446, "end_line": 460, "span_ids": ["test_ptp"], "tokens": 136}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"shape, axis\",\n [\n [(10, 15, 20), None],\n [(10, 15, 20), 0],\n [(10, 15, 20), 1],\n [(10, 15, 20), 2],\n [(10, 15, 20), -1],\n ],\n)\ndef test_ptp(shape, axis):\n a = np.random.randint(0, 10, shape)\n d = da.from_array(a, chunks=(len(shape) * (5,)))\n\n assert_eq(da.ptp(d, axis), np.ptp(a, axis))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_diff_test_diff.assert_eq_da_diff_a_n_a": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_diff_test_diff.assert_eq_da_diff_a_n_a", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 463, "end_line": 472, "span_ids": ["test_diff"], "tokens": 136}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"shape, axis\",\n [[(10, 15, 20), 0], [(10, 15, 20), 1], [(10, 15, 20), 2], [(10, 15, 20), -1]],\n)\n@pytest.mark.parametrize(\"n\", [0, 1, 2])\ndef test_diff(shape, n, axis):\n x = np.random.randint(0, 10, shape)\n a = da.from_array(x, chunks=(len(shape) * (5,)))\n\n assert_eq(da.diff(a, n, axis), np.diff(x, n, axis))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_ediff1d_test_ediff1d.assert_eq_da_ediff1d_a_t": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_ediff1d_test_ediff1d.assert_eq_da_ediff1d_a_t", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 475, "end_line": 481, "span_ids": ["test_ediff1d"], "tokens": 124}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"shape\", [(10,), (10, 15)])\n@pytest.mark.parametrize(\"to_end, to_begin\", [[None, None], [0, 0], [[1, 2], [3, 4]]])\ndef test_ediff1d(shape, to_end, to_begin):\n x = np.random.randint(0, 10, shape)\n a = da.from_array(x, chunks=(len(shape) * (5,)))\n\n assert_eq(da.ediff1d(a, to_end, to_begin), np.ediff1d(x, to_end, to_begin))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_gradient_test_gradient.if_isinstance_axis_Numbe.else_.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_gradient_test_gradient.if_isinstance_axis_Numbe.else_.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 484, "end_line": 518, "span_ids": ["test_gradient"], "tokens": 421}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"shape, varargs, axis\",\n [\n [(10, 15, 20), (), None],\n [(10, 15, 20), (2,), None],\n [(10, 15, 20), (1.0, 1.5, 2.0), None],\n [(10, 15, 20), (), 0],\n [(10, 15, 20), (), 1],\n [(10, 15, 20), (), 2],\n [(10, 15, 20), (), -1],\n [(10, 15, 20), (), (0, 2)],\n [(10, 15, 20), (np.exp(np.arange(10)), np.exp(np.arange(20))), (0, 2)],\n [(10, 15, 20), (0.5, np.exp(np.arange(20))), (0, 2)],\n [(10, 15, 20), (np.exp(np.arange(20)),), -1],\n ],\n)\n@pytest.mark.parametrize(\"edge_order\", [1, 2])\ndef test_gradient(shape, varargs, axis, edge_order):\n a = np.random.randint(0, 10, shape)\n d_a = da.from_array(a, chunks=(len(shape) * (5,)))\n\n r_a = np.gradient(a, *varargs, axis=axis, edge_order=edge_order)\n r_d_a = da.gradient(d_a, *varargs, axis=axis, edge_order=edge_order)\n\n if isinstance(axis, Number):\n assert_eq(r_d_a, r_a)\n else:\n assert len(r_d_a) == len(r_a)\n\n for e_r_d_a, e_r_a in zip(r_d_a, r_a):\n assert_eq(e_r_d_a, e_r_a)\n\n assert_eq(\n da.sqrt(sum(map(da.square, r_d_a))), np.sqrt(sum(map(np.square, r_a)))\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_bincount_test_bincount.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_bincount_test_bincount.None_2", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 521, "end_line": 529, "span_ids": ["test_bincount"], "tokens": 125}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_bincount():\n x = np.array([2, 1, 5, 2, 1])\n d = da.from_array(x, chunks=2)\n e = da.bincount(d, minlength=6)\n assert_eq(e, np.bincount(x, minlength=6))\n assert same_keys(da.bincount(d, minlength=6), e)\n\n assert da.bincount(d, minlength=6).name != da.bincount(d, minlength=7).name\n assert da.bincount(d, minlength=6).name == da.bincount(d, minlength=6).name", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_bincount_with_weights_test_bincount_unspecified_minlength._shape_is_nan_so_must": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_bincount_with_weights_test_bincount_unspecified_minlength._shape_is_nan_so_must", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 532, "end_line": 549, "span_ids": ["test_bincount_with_weights", "test_bincount_unspecified_minlength"], "tokens": 220}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_bincount_with_weights():\n x = np.array([2, 1, 5, 2, 1])\n d = da.from_array(x, chunks=2)\n weights = np.array([1, 2, 1, 0.5, 1])\n\n dweights = da.from_array(weights, chunks=2)\n e = da.bincount(d, weights=dweights, minlength=6)\n assert_eq(e, np.bincount(x, weights=dweights.compute(), minlength=6))\n assert same_keys(da.bincount(d, weights=dweights, minlength=6), e)\n\n\ndef test_bincount_unspecified_minlength():\n x = np.array([1, 1, 3, 7, 0])\n d = da.from_array(x, chunks=2)\n e = da.bincount(d)\n assert_eq(e, np.bincount(x))\n assert same_keys(da.bincount(d), e)\n assert len(e.compute()) == 8 # shape is (nan,) so must compute for len()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_digitize_test_digitize.for_chunks_in_10_10_.for_right_in_False_True.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_digitize_test_digitize.for_chunks_in_10_10_.for_right_in_False_True.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 552, "end_line": 570, "span_ids": ["test_digitize"], "tokens": 215}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_digitize():\n x = np.array([2, 4, 5, 6, 1])\n bins = np.array([1, 2, 3, 4, 5])\n for chunks in [2, 4]:\n for right in [False, True]:\n d = da.from_array(x, chunks=chunks)\n assert_eq(\n da.digitize(d, bins, right=right), np.digitize(x, bins, right=right)\n )\n\n x = np.random.random(size=(100, 100))\n bins = np.random.random(size=13)\n bins.sort()\n for chunks in [(10, 10), (10, 20), (13, 17), (87, 54)]:\n for right in [False, True]:\n d = da.from_array(x, chunks=chunks)\n assert_eq(\n da.digitize(d, bins, right=right), np.digitize(x, bins, right=right)\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogram_test_histogram.assert_same_keys_da_histo": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogram_test_histogram.assert_same_keys_da_histo", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 573, "end_line": 585, "span_ids": ["test_histogram"], "tokens": 148}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_histogram():\n # Test for normal, flattened input\n n = 100\n v = da.random.random(n, chunks=10)\n bins = np.arange(0, 1.01, 0.01)\n (a1, b1) = da.histogram(v, bins=bins)\n (a2, b2) = np.histogram(v, bins=bins)\n\n # Check if the sum of the bins equals the number of samples\n assert a2.sum(axis=0) == n\n assert a1.sum(axis=0) == n\n assert_eq(a1, a2)\n assert same_keys(da.histogram(v, bins=bins)[0], a1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogram_alternative_bins_range_test_histogram_return_type.assert_eq_da_histogram_v_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogram_alternative_bins_range_test_histogram_return_type.assert_eq_da_histogram_v_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 588, "end_line": 611, "span_ids": ["test_histogram_return_type", "test_histogram_bins_range_with_nan_array", "test_histogram_alternative_bins_range"], "tokens": 302}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_histogram_alternative_bins_range():\n v = da.random.random(100, chunks=10)\n (a1, b1) = da.histogram(v, bins=10, range=(0, 1))\n (a2, b2) = np.histogram(v, bins=10, range=(0, 1))\n assert_eq(a1, a2)\n assert_eq(b1, b2)\n\n\n@pytest.mark.filterwarnings(\"ignore:invalid value:RuntimeWarning\")\ndef test_histogram_bins_range_with_nan_array():\n # Regression test for issue #3977\n v = da.from_array(np.array([-2, np.nan, 2]), chunks=1)\n (a1, b1) = da.histogram(v, bins=10, range=(-3, 3))\n (a2, b2) = np.histogram(v, bins=10, range=(-3, 3))\n assert_eq(a1, a2)\n assert_eq(b1, b2)\n\n\ndef test_histogram_return_type():\n v = da.random.random(100, chunks=10)\n bins = np.arange(0, 1.01, 0.01)\n # Check if return type is same as hist\n bins = np.arange(0, 11, 1, dtype=\"i4\")\n assert_eq(da.histogram(v * 10, bins=bins)[0], np.histogram(v * 10, bins=bins)[0])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogram_extra_args_and_shapes_test_histogram_extra_args_and_shapes.for_v_bins_w_in_data_.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogram_extra_args_and_shapes_test_histogram_extra_args_and_shapes.for_v_bins_w_in_data_.None_2", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 614, "end_line": 639, "span_ids": ["test_histogram_extra_args_and_shapes"], "tokens": 233}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_histogram_extra_args_and_shapes():\n # Check for extra args and shapes\n bins = np.arange(0, 1.01, 0.01)\n v = da.random.random(100, chunks=10)\n data = [\n (v, bins, da.ones(100, chunks=v.chunks) * 5),\n (da.random.random((50, 50), chunks=10), bins, da.ones((50, 50), chunks=10) * 5),\n ]\n\n for v, bins, w in data:\n # density\n assert_eq(\n da.histogram(v, bins=bins, density=True)[0],\n np.histogram(v, bins=bins, density=True)[0],\n )\n\n # weights\n assert_eq(\n da.histogram(v, bins=bins, weights=w)[0],\n np.histogram(v, bins=bins, weights=w)[0],\n )\n\n assert_eq(\n da.histogram(v, bins=bins, weights=w, density=True)[0],\n da.histogram(v, bins=bins, weights=w, density=True)[0],\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogram_normed_deprecation_test_histogram_bin_range_raises.assert_bins_in_err_msg_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogram_normed_deprecation_test_histogram_bin_range_raises.assert_bins_in_err_msg_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 642, "end_line": 672, "span_ids": ["test_histogram_normed_deprecation", "test_histogram_bin_range_raises"], "tokens": 276}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_histogram_normed_deprecation():\n x = da.arange(10)\n with pytest.raises(ValueError) as info:\n da.histogram(x, bins=[1, 2, 3], normed=True)\n\n assert \"density\" in str(info.value)\n assert \"deprecated\" in str(info.value).lower()\n\n\n@pytest.mark.parametrize(\n \"bins, hist_range\",\n [\n (None, None),\n (10, None),\n (10, 1),\n (None, (1, 10)),\n (10, [0, 1, 2]),\n (10, [0]),\n (10, np.array([[0, 1]])),\n (10, da.array([[0, 1]])),\n ([[0, 1, 2]], None),\n (np.array([[0, 1, 2]]), None),\n (da.array([[0, 1, 2]]), None),\n ],\n)\ndef test_histogram_bin_range_raises(bins, hist_range):\n data = da.random.random(10, chunks=2)\n with pytest.raises((ValueError, TypeError)) as info:\n da.histogram(data, bins=bins, range=hist_range)\n err_msg = str(info.value)\n assert \"bins\" in err_msg or \"range\" in err_msg", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogram_delayed_range_test_histogram_delayed_range.assert_eq_bins_d_bins_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogram_delayed_range_test_histogram_delayed_range.assert_eq_bins_d_bins_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 675, "end_line": 708, "span_ids": ["test_histogram_delayed_range"], "tokens": 276}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"density\", [True, False])\n@pytest.mark.parametrize(\"weighted\", [True, False])\n@pytest.mark.parametrize(\"non_delayed_i\", [None, 0, 1])\n@pytest.mark.parametrize(\"delay_n_bins\", [False, True])\ndef test_histogram_delayed_range(density, weighted, non_delayed_i, delay_n_bins):\n n = 100\n v = np.random.random(n)\n vd = da.from_array(v, chunks=10)\n\n if weighted:\n weights = np.random.random(n)\n weights_d = da.from_array(weights, chunks=vd.chunks)\n\n d_range = [vd.min(), vd.max()]\n if non_delayed_i is not None:\n d_range[non_delayed_i] = d_range[non_delayed_i].compute()\n hist_d, bins_d = da.histogram(\n vd,\n bins=da.array(n) if delay_n_bins and not density else n,\n range=d_range,\n density=density,\n weights=weights_d if weighted else None,\n )\n\n hist, bins = np.histogram(\n v,\n bins=n,\n range=[v.min(), v.max()],\n density=density,\n weights=weights if weighted else None,\n )\n\n assert_eq(hist_d, hist)\n assert_eq(bins_d, bins)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogram_delayed_bins_test_histogram_delayed_bins.assert_eq_bins_d2_bins_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogram_delayed_bins_test_histogram_delayed_bins.assert_eq_bins_d2_bins_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 711, "end_line": 743, "span_ids": ["test_histogram_delayed_bins"], "tokens": 246}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"density\", [True, False])\n@pytest.mark.parametrize(\"weighted\", [True, False])\ndef test_histogram_delayed_bins(density, weighted):\n n = 100\n v = np.random.random(n)\n bins = np.array([0, 0.2, 0.5, 0.8, 1])\n\n vd = da.from_array(v, chunks=10)\n bins_d = da.from_array(bins, chunks=2)\n\n if weighted:\n weights = np.random.random(n)\n weights_d = da.from_array(weights, chunks=vd.chunks)\n\n hist_d, bins_d2 = da.histogram(\n vd,\n bins=bins_d,\n range=[bins_d[0], bins_d[-1]],\n density=density,\n weights=weights_d if weighted else None,\n )\n\n hist, bins = np.histogram(\n v,\n bins=bins,\n range=[bins[0], bins[-1]],\n density=density,\n weights=weights if weighted else None,\n )\n\n assert bins_d is bins_d2\n assert_eq(hist_d, hist)\n assert_eq(bins_d2, bins)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogram_delayed_n_bins_raises_with_density_test_cov.with_pytest_raises_ValueE.da_cov_d_ddof_1_5_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogram_delayed_n_bins_raises_with_density_test_cov.with_pytest_raises_ValueE.da_cov_d_ddof_1_5_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 746, "end_line": 772, "span_ids": ["test_cov", "test_histogram_delayed_n_bins_raises_with_density"], "tokens": 291}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_histogram_delayed_n_bins_raises_with_density():\n data = da.random.random(10, chunks=2)\n with pytest.raises(\n NotImplementedError, match=\"`bins` cannot be a scalar Dask object\"\n ):\n da.histogram(data, bins=da.array(10), range=[0, 1], density=True)\n\n\ndef test_cov():\n x = np.arange(56).reshape((7, 8))\n d = da.from_array(x, chunks=(4, 4))\n\n assert_eq(da.cov(d), np.cov(x))\n assert_eq(da.cov(d, rowvar=0), np.cov(x, rowvar=0))\n with pytest.warns(None): # warning dof <= 0 for slice\n assert_eq(da.cov(d, ddof=10), np.cov(x, ddof=10))\n assert_eq(da.cov(d, bias=1), np.cov(x, bias=1))\n assert_eq(da.cov(d, d), np.cov(x, x))\n\n y = np.arange(8)\n e = da.from_array(y, chunks=(4,))\n\n assert_eq(da.cov(d, e), np.cov(x, y))\n assert_eq(da.cov(e, d), np.cov(y, x))\n\n with pytest.raises(ValueError):\n da.cov(d, ddof=1.5)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_corrcoef_test_round.assert_eq_d_round_2_da_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_corrcoef_test_round.assert_eq_d_round_2_da_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 775, "end_line": 797, "span_ids": ["test_round", "test_corrcoef"], "tokens": 220}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_corrcoef():\n x = np.arange(56).reshape((7, 8))\n d = da.from_array(x, chunks=(4, 4))\n\n assert_eq(da.corrcoef(d), np.corrcoef(x))\n assert_eq(da.corrcoef(d, rowvar=0), np.corrcoef(x, rowvar=0))\n assert_eq(da.corrcoef(d, d), np.corrcoef(x, x))\n\n y = np.arange(8)\n e = da.from_array(y, chunks=(4,))\n\n assert_eq(da.corrcoef(d, e), np.corrcoef(x, y))\n assert_eq(da.corrcoef(e, d), np.corrcoef(y, x))\n\n\ndef test_round():\n x = np.random.random(10)\n d = da.from_array(x, chunks=4)\n\n for i in (0, 1, 4, 5):\n assert_eq(x.round(i), d.round(i))\n\n assert_eq(d.round(2), da.round(d, 2))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_unique_kwargs_test_unique_kwargs.for_e_r_a_e_r_d_in_zip_r.assert_eq_e_r_d_e_r_a_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_unique_kwargs_test_unique_kwargs.for_e_r_a_e_r_d_in_zip_r.assert_eq_e_r_d_e_r_a_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 800, "end_line": 830, "span_ids": ["test_unique_kwargs"], "tokens": 251}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"return_index\", [False, True])\n@pytest.mark.parametrize(\"return_inverse\", [False, True])\n@pytest.mark.parametrize(\"return_counts\", [False, True])\ndef test_unique_kwargs(return_index, return_inverse, return_counts):\n kwargs = dict(\n return_index=return_index,\n return_inverse=return_inverse,\n return_counts=return_counts,\n )\n\n a = np.array([1, 2, 4, 4, 5, 2])\n d = da.from_array(a, chunks=(3,))\n\n r_a = np.unique(a, **kwargs)\n r_d = da.unique(d, **kwargs)\n\n if not any([return_index, return_inverse, return_counts]):\n assert isinstance(r_a, np.ndarray)\n assert isinstance(r_d, da.Array)\n\n r_a = (r_a,)\n r_d = (r_d,)\n\n assert len(r_a) == len(r_d)\n\n if return_inverse:\n i = 1 + int(return_index)\n assert (d.size,) == r_d[i].shape\n\n for e_r_a, e_r_d in zip(r_a, r_d):\n assert_eq(e_r_d, e_r_a)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_unique_rand_test_unique_rand.for_e_r_a_e_r_d_in_zip_r.assert_eq_e_r_d_e_r_a_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_unique_rand_test_unique_rand.for_e_r_a_e_r_d_in_zip_r.assert_eq_e_r_d_e_r_a_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 833, "end_line": 855, "span_ids": ["test_unique_rand"], "tokens": 215}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"seed\", [23, 796])\n@pytest.mark.parametrize(\"low, high\", [[0, 10]])\n@pytest.mark.parametrize(\n \"shape, chunks\",\n [[(10,), (5,)], [(10,), (3,)], [(4, 5), (3, 2)], [(20, 20), (4, 5)]],\n)\ndef test_unique_rand(seed, low, high, shape, chunks):\n np.random.seed(seed)\n\n a = np.random.randint(low, high, size=shape)\n d = da.from_array(a, chunks=chunks)\n\n kwargs = dict(return_index=True, return_inverse=True, return_counts=True)\n\n r_a = np.unique(a, **kwargs)\n r_d = da.unique(d, **kwargs)\n\n assert len(r_a) == len(r_d)\n\n assert (d.size,) == r_d[2].shape\n\n for e_r_a, e_r_d in zip(r_a, r_d):\n assert_eq(e_r_d, e_r_a)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_isin_rand_test_isin_rand.assert_eq_r_a_r_d_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_isin_rand_test_isin_rand.assert_eq_r_a_r_d_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 858, "end_line": 883, "span_ids": ["test_isin_rand"], "tokens": 294}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"seed\", [23, 796])\n@pytest.mark.parametrize(\"low, high\", [[0, 10]])\n@pytest.mark.parametrize(\n \"elements_shape, elements_chunks\",\n [[(10,), (5,)], [(10,), (3,)], [(4, 5), (3, 2)], [(20, 20), (4, 5)]],\n)\n@pytest.mark.parametrize(\n \"test_shape, test_chunks\",\n [[(10,), (5,)], [(10,), (3,)], [(4, 5), (3, 2)], [(20, 20), (4, 5)]],\n)\n@pytest.mark.parametrize(\"invert\", [True, False])\ndef test_isin_rand(\n seed, low, high, elements_shape, elements_chunks, test_shape, test_chunks, invert\n):\n rng = np.random.RandomState(seed)\n\n a1 = rng.randint(low, high, size=elements_shape)\n d1 = da.from_array(a1, chunks=elements_chunks)\n\n a2 = rng.randint(low, high, size=test_shape) - 5\n d2 = da.from_array(a2, chunks=test_chunks)\n\n with pytest.warns(None):\n r_a = np.isin(a1, a2, invert=invert)\n r_d = da.isin(d1, d2, invert=invert)\n assert_eq(r_a, r_d)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_isin_assume_unique__maybe_len.try_.except_TypeError_.return.0": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_isin_assume_unique__maybe_len.try_.except_TypeError_.return.0", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 886, "end_line": 901, "span_ids": ["_maybe_len", "test_isin_assume_unique"], "tokens": 131}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"assume_unique\", [True, False])\ndef test_isin_assume_unique(assume_unique):\n a1 = np.arange(10)\n d1 = da.from_array(a1, chunks=(5,))\n\n test_elements = np.arange(0, 10, 2)\n r_a = np.isin(a1, test_elements, assume_unique=assume_unique)\n r_d = da.isin(d1, test_elements, assume_unique=assume_unique)\n assert_eq(r_a, r_d)\n\n\ndef _maybe_len(l):\n try:\n return len(l)\n except TypeError:\n return 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_roll_test_shape.assert_np_shape_x_sha": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_roll_test_shape.assert_np_shape_x_sha", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 904, "end_line": 921, "span_ids": ["test_shape", "test_roll"], "tokens": 214}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"chunks\", [(4, 6), (2, 6)])\n@pytest.mark.parametrize(\"shift\", [3, 7, 9, (3, 9), (7, 2)])\n@pytest.mark.parametrize(\"axis\", [None, 0, 1, -1, (0, 1), (1, 0)])\ndef test_roll(chunks, shift, axis):\n x = np.random.randint(10, size=(4, 6))\n a = da.from_array(x, chunks=chunks)\n\n if _maybe_len(shift) != _maybe_len(axis):\n with pytest.raises(TypeError if axis is None else ValueError):\n da.roll(a, shift, axis)\n else:\n assert_eq(np.roll(x, shift, axis), da.roll(a, shift, axis))\n\n\n@pytest.mark.parametrize(\"shape\", [(10,), (5, 10), (5, 10, 10)])\ndef test_shape(shape):\n x = da.random.random(shape)\n assert np.shape(x) == shape", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_union1d_test_union1d.assert_eq_result_expecte": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_union1d_test_union1d.assert_eq_result_expecte", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 924, "end_line": 948, "span_ids": ["test_union1d"], "tokens": 212}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"shape\", [((12,), (12,)), ((4, 3), (3, 4)), ((12,), (1, 6, 2))]\n)\n@pytest.mark.parametrize(\"reverse\", [True, False])\ndef test_union1d(shape, reverse):\n if any(len(x) > 1 for x in shape) and not _numpy_115:\n pytest.skip(\"NumPy-10563.\")\n\n s1, s2 = shape\n x1 = np.arange(12).reshape(s1)\n x2 = np.arange(6, 18).reshape(s2)\n\n if reverse:\n x1 = x1[::-1]\n\n dx1 = da.from_array(x1)\n dx2 = da.from_array(x2)\n\n result = np.union1d(dx1, dx2)\n expected = np.union1d(x1, x2)\n\n if IS_NEP18_ACTIVE:\n assert isinstance(result, da.Array)\n\n assert_eq(result, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_ravel_test_ravel_1D_no_op.assert_eq_dx_dx_2_rave": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_ravel_test_ravel_1D_no_op.assert_eq_dx_dx_2_rave", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 951, "end_line": 983, "span_ids": ["test_ravel_1D_no_op", "test_ravel"], "tokens": 299}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_ravel():\n x = np.random.randint(10, size=(4, 6))\n\n # 2d\n for chunks in [(4, 6), (2, 6)]:\n a = da.from_array(x, chunks=chunks)\n assert_eq(x.ravel(), a.ravel())\n assert len(a.ravel().dask) == len(a.dask) + len(a.chunks[0])\n\n # 0d\n assert_eq(x[0, 0].ravel(), a[0, 0].ravel())\n\n # 1d\n a_flat = a.ravel()\n assert_eq(a_flat.ravel(), a_flat)\n\n # 3d\n x = np.random.randint(10, size=(2, 3, 4))\n for chunks in [4, (1, 3, 4)]:\n a = da.from_array(x, chunks=chunks)\n assert_eq(x.ravel(), a.ravel())\n\n assert_eq(x.flatten(), a.flatten())\n assert_eq(np.ravel(x), da.ravel(a))\n\n\ndef test_ravel_1D_no_op():\n x = np.random.randint(10, size=100)\n dx = da.from_array(x, chunks=10)\n # known dims\n assert_eq(dx.ravel(), x.ravel())\n # Unknown dims\n assert_eq(dx[dx > 2].ravel(), x[x > 2].ravel())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_squeeze_test_squeeze.assert_d_s_chunks_exp_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_squeeze_test_squeeze.assert_d_s_chunks_exp_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 986, "end_line": 1010, "span_ids": ["test_squeeze"], "tokens": 260}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"is_func\", [True, False])\n@pytest.mark.parametrize(\"axis\", [None, 0, -1, (0, -1)])\ndef test_squeeze(is_func, axis):\n a = np.arange(10)[None, :, None, None]\n d = da.from_array(a, chunks=(1, 3, 1, 1))\n\n if is_func:\n a_s = np.squeeze(a, axis=axis)\n d_s = da.squeeze(d, axis=axis)\n else:\n a_s = a.squeeze(axis=axis)\n d_s = d.squeeze(axis=axis)\n\n assert_eq(d_s, a_s)\n assert same_keys(d_s, da.squeeze(d, axis=axis))\n\n if axis is None:\n axis = tuple(range(a.ndim))\n else:\n axis = axis if isinstance(axis, tuple) else (axis,)\n axis = tuple(i % a.ndim for i in axis)\n axis = tuple(i for i, c in enumerate(d.chunks) if i in axis and len(c) == 1)\n\n exp_d_s_chunks = tuple(c for i, c in enumerate(d.chunks) if i not in axis)\n assert d_s.chunks == exp_d_s_chunks", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_vstack_test_hstack.assert_eq_np_hstack_x_y": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_vstack_test_hstack.assert_eq_np_hstack_x_y", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1013, "end_line": 1030, "span_ids": ["test_vstack", "test_hstack"], "tokens": 181}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_vstack():\n x = np.arange(5)\n y = np.ones(5)\n a = da.arange(5, chunks=2)\n b = da.ones(5, chunks=2)\n\n assert_eq(np.vstack((x, y)), da.vstack((a, b)))\n assert_eq(np.vstack((x, y[None, :])), da.vstack((a, b[None, :])))\n\n\ndef test_hstack():\n x = np.arange(5)\n y = np.ones(5)\n a = da.arange(5, chunks=2)\n b = da.ones(5, chunks=2)\n\n assert_eq(np.hstack((x[None, :], y[None, :])), da.hstack((a[None, :], b[None, :])))\n assert_eq(np.hstack((x, y)), da.hstack((a, b)))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_dstack_test_dstack.assert_eq_np_dstack_x_y": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_dstack_test_dstack.assert_eq_np_dstack_x_y", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1033, "end_line": 1044, "span_ids": ["test_dstack"], "tokens": 143}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_dstack():\n x = np.arange(5)\n y = np.ones(5)\n a = da.arange(5, chunks=2)\n b = da.ones(5, chunks=2)\n\n assert_eq(\n np.dstack((x[None, None, :], y[None, None, :])),\n da.dstack((a[None, None, :], b[None, None, :])),\n )\n assert_eq(np.dstack((x[None, :], y[None, :])), da.dstack((a[None, :], b[None, :])))\n assert_eq(np.dstack((x, y)), da.dstack((a, b)))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_stack_unknown_chunk_sizes_test_stack_unknown_chunk_sizes.assert_eq_np_stacked_dsk": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_stack_unknown_chunk_sizes_test_stack_unknown_chunk_sizes.assert_eq_np_stacked_dsk", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1047, "end_line": 1065, "span_ids": ["test_stack_unknown_chunk_sizes"], "tokens": 191}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"np_func,dsk_func,nan_chunk\",\n [(np.hstack, da.hstack, 0), (np.dstack, da.dstack, 1), (np.vstack, da.vstack, 2)],\n)\ndef test_stack_unknown_chunk_sizes(np_func, dsk_func, nan_chunk):\n shape = (100, 100, 100)\n x = da.ones(shape, chunks=(50, 50, 50))\n y = np.ones(shape)\n\n tmp = list(x._chunks)\n tmp[nan_chunk] = (np.nan,) * 2\n x._chunks = tuple(tmp)\n\n with pytest.raises(ValueError):\n dsk_func((x, x))\n\n np_stacked = np_func((y, y))\n dsk_stacked = dsk_func((x, x), allow_unknown_chunksizes=True)\n assert_eq(np_stacked, dsk_stacked)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_take_test_take.assert_same_keys_da_take_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_take_test_take.assert_same_keys_da_take_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1068, "end_line": 1078, "span_ids": ["test_take"], "tokens": 152}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_take():\n x = np.arange(400).reshape((20, 20))\n a = da.from_array(x, chunks=(5, 5))\n\n assert_eq(np.take(x, 3, axis=0), da.take(a, 3, axis=0))\n assert_eq(np.take(x, [3, 4, 5], axis=-1), da.take(a, [3, 4, 5], axis=-1))\n\n with pytest.raises(ValueError):\n da.take(a, 3, axis=2)\n\n assert same_keys(da.take(a, [3, 4, 5], axis=-1), da.take(a, [3, 4, 5], axis=-1))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_take_dask_from_numpy_test_take_dask_from_numpy.assert_eq_z_np_array_2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_take_dask_from_numpy_test_take_dask_from_numpy.assert_eq_z_np_array_2_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1081, "end_line": 1088, "span_ids": ["test_take_dask_from_numpy"], "tokens": 110}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_take_dask_from_numpy():\n x = np.arange(5).astype(\"f8\")\n y = da.from_array(np.array([1, 2, 3, 3, 2, 1]), chunks=3)\n\n z = da.take(x * 2, y)\n\n assert z.chunks == y.chunks\n assert_eq(z, np.array([2.0, 4.0, 6.0, 6.0, 4.0, 2.0]))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_compress_test_compress.None_2.da_compress_True_Fal": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_compress_test_compress.None_2.da_compress_True_Fal", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1091, "end_line": 1123, "span_ids": ["test_compress"], "tokens": 376}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_compress():\n x = np.arange(25).reshape((5, 5))\n a = da.from_array(x, chunks=(2, 2))\n\n c1 = np.array([True, False, True, False, True])\n c2 = np.array([True, False])\n c3 = [True, False]\n dc1 = da.from_array(c1, chunks=3)\n dc2 = da.from_array(c2, chunks=2)\n\n for c, dc in [(c1, c1), (c2, c2), (c3, c3), (c1, dc1), (c2, dc2), (c3, dc2)]:\n for axis in [None, 0, 1]:\n res = da.compress(dc, a, axis=axis)\n assert_eq(np.compress(c, x, axis=axis), res)\n if isinstance(dc, da.Array):\n # If condition is a dask array then we expect the shape of the\n # compressed array to be nan, because we won't know that until\n # the result is computed.\n axis = axis or 0\n assert np.isnan(res.shape[axis]).all()\n assert np.isnan(res.chunks[axis]).all()\n else:\n # If condition is a not a dask array then we expect the shape of the\n # compressed axis to be known, i.e., not nan.\n axis = axis or 0\n assert np.count_nonzero(dc) == res.shape[axis]\n assert not np.isnan(res.chunks[axis]).any()\n\n with pytest.raises(ValueError):\n da.compress([True, False], a, axis=100)\n\n with pytest.raises(ValueError):\n da.compress([[True], [False]], a, axis=100)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_extract_test_extract.for_c_dc_in_c1_c1_.if_isinstance_dc_da_Arra.assert_np_isnan_res_chunk": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_extract_test_extract.for_c_dc_in_c1_c1_.if_isinstance_dc_da_Arra.assert_np_isnan_res_chunk", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1126, "end_line": 1141, "span_ids": ["test_extract"], "tokens": 212}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_extract():\n x = np.arange(25).reshape((5, 5))\n a = da.from_array(x, chunks=(2, 2))\n\n c1 = np.array([True, False, True, False, True])\n c2 = np.array([[True, False], [True, False]])\n c3 = np.array([True, False])\n dc1 = da.from_array(c1, chunks=3)\n dc2 = da.from_array(c2, chunks=(2, 1))\n dc3 = da.from_array(c3, chunks=2)\n\n for c, dc in [(c1, c1), (c2, c2), (c3, c3), (c1, dc1), (c2, dc2), (c3, dc3)]:\n res = da.extract(dc, a)\n assert_eq(np.extract(c, x), res)\n if isinstance(dc, da.Array):\n assert np.isnan(res.chunks[0]).all()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_isnull_test_isclose.assert_eq_da_isclose_a_b": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_isnull_test_isclose.assert_eq_da_isclose_a_b", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1144, "end_line": 1165, "span_ids": ["test_isnull_result_is_an_array", "test_isnull", "test_isclose"], "tokens": 229}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_isnull():\n x = np.array([1, np.nan])\n a = da.from_array(x, chunks=(2,))\n with ignoring(ImportError):\n assert_eq(da.isnull(a), np.isnan(x))\n assert_eq(da.notnull(a), ~(np.isnan(x)))\n\n\ndef test_isnull_result_is_an_array():\n # regression test for https://github.com/dask/dask/issues/3822\n arr = da.from_array(np.arange(3, dtype=np.int64), chunks=-1)\n with ignoring(ImportError):\n result = da.isnull(arr[0]).compute()\n assert type(result) is np.ndarray\n\n\ndef test_isclose():\n x = np.array([0, np.nan, 1, 1.5])\n y = np.array([1e-9, np.nan, 1, 2])\n a = da.from_array(x, chunks=(2,))\n b = da.from_array(y, chunks=(2,))\n assert_eq(da.isclose(a, b, equal_nan=True), np.isclose(x, y, equal_nan=True))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_allclose_test_allclose.assert_eq_np_array_n_r_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_allclose_test_allclose.assert_eq_np_array_n_r_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1168, "end_line": 1178, "span_ids": ["test_allclose"], "tokens": 121}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_allclose():\n n_a = np.array([0, np.nan, 1, 1.5])\n n_b = np.array([1e-9, np.nan, 1, 2])\n\n d_a = da.from_array(n_a, chunks=(2,))\n d_b = da.from_array(n_b, chunks=(2,))\n\n n_r = np.allclose(n_a, n_b, equal_nan=True)\n d_r = da.allclose(d_a, d_b, equal_nan=True)\n\n assert_eq(np.array(n_r)[()], d_r)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_choose_test_choose.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_choose_test_choose.None_3", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1181, "end_line": 1193, "span_ids": ["test_choose"], "tokens": 160}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_choose():\n # test choose function\n x = np.random.randint(10, size=(15, 16))\n d = da.from_array(x, chunks=(4, 5))\n\n assert_eq(da.choose(d > 5, [0, d]), np.choose(x > 5, [0, x]))\n assert_eq(da.choose(d > 5, [-d, d]), np.choose(x > 5, [-x, x]))\n\n # test choose method\n index_dask = d > 5\n index_numpy = x > 5\n assert_eq(index_dask.choose([0, d]), index_numpy.choose([0, x]))\n assert_eq(index_dask.choose([-d, d]), index_numpy.choose([-x, x]))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_piecewise_test_piecewise.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_piecewise_test_piecewise.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1196, "end_line": 1205, "span_ids": ["test_piecewise"], "tokens": 130}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_piecewise():\n np.random.seed(1337)\n\n x = np.random.randint(10, size=(15, 16))\n d = da.from_array(x, chunks=(4, 5))\n\n assert_eq(\n np.piecewise(x, [x < 5, x >= 5], [lambda e, v, k: e + 1, 5], 1, k=2),\n da.piecewise(d, [d < 5, d >= 5], [lambda e, v, k: e + 1, 5], 1, k=2),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_piecewise_otherwise_test_piecewise_otherwise.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_piecewise_otherwise_test_piecewise_otherwise.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1208, "end_line": 1229, "span_ids": ["test_piecewise_otherwise"], "tokens": 184}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_piecewise_otherwise():\n np.random.seed(1337)\n\n x = np.random.randint(10, size=(15, 16))\n d = da.from_array(x, chunks=(4, 5))\n\n assert_eq(\n np.piecewise(\n x,\n [x > 5, x <= 2],\n [lambda e, v, k: e + 1, lambda e, v, k: v * e, lambda e, v, k: 0],\n 1,\n k=2,\n ),\n da.piecewise(\n d,\n [d > 5, d <= 2],\n [lambda e, v, k: e + 1, lambda e, v, k: v * e, lambda e, v, k: 0],\n 1,\n k=2,\n ),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_argwhere_test_argwhere_str.assert_eq_d_nz_x_nz_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_argwhere_test_argwhere_str.assert_eq_d_nz_x_nz_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1232, "end_line": 1263, "span_ids": ["test_argwhere", "test_argwhere_str", "test_argwhere_obj"], "tokens": 270}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_argwhere():\n for shape, chunks in [(0, ()), ((0, 0), (0, 0)), ((15, 16), (4, 5))]:\n x = np.random.randint(10, size=shape)\n d = da.from_array(x, chunks=chunks)\n\n x_nz = np.argwhere(x)\n d_nz = da.argwhere(d)\n\n assert_eq(d_nz, x_nz)\n\n\ndef test_argwhere_obj():\n x = np.random.randint(10, size=(15, 16)).astype(object)\n d = da.from_array(x, chunks=(4, 5))\n\n x_nz = np.argwhere(x)\n d_nz = da.argwhere(d)\n\n assert_eq(d_nz, x_nz)\n\n\ndef test_argwhere_str():\n # We may have behavior differences with NumPy for strings\n # with just spaces, depending on the version of NumPy.\n # https://github.com/numpy/numpy/issues/9875\n x = np.array(list(\"Hello world\"))\n d = da.from_array(x, chunks=(4,))\n\n x_nz = np.argwhere(x)\n d_nz = da.argwhere(d)\n\n assert_eq(d_nz, x_nz)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_where_test_where.for_c1_c2_in_.for_b1_b2_in_0_0_.assert_eq_w1_w2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_where_test_where.for_c1_c2_in_.for_b1_b2_in_0_0_.assert_eq_w1_w2_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1266, "end_line": 1287, "span_ids": ["test_where"], "tokens": 244}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_where():\n x = np.random.randint(10, size=(15, 14))\n x[5, 5] = x[4, 4] = 0 # Ensure some false elements\n d = da.from_array(x, chunks=(4, 5))\n y = np.random.randint(10, size=15).astype(np.uint8)\n e = da.from_array(y, chunks=(4,))\n\n for c1, c2 in [\n (d > 5, x > 5),\n (d, x),\n (1, 1),\n (0, 0),\n (5, 5),\n (True, True),\n (np.True_, np.True_),\n (False, False),\n (np.False_, np.False_),\n ]:\n for b1, b2 in [(0, 0), (-e[:, None], -y[:, None]), (e[:14], y[:14])]:\n w1 = da.where(c1, d, b1)\n w2 = np.where(c2, x, b2)\n assert_eq(w1, w2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_where_scalar_dtype_test_where_scalar_dtype.assert_eq_w3_w4_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_where_scalar_dtype_test_where_scalar_dtype.assert_eq_w3_w4_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1290, "end_line": 1302, "span_ids": ["test_where_scalar_dtype"], "tokens": 159}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_where_scalar_dtype():\n x = np.int32(3)\n y1 = np.array([4, 5, 6], dtype=np.int16)\n c1 = np.array([1, 0, 1])\n y2 = da.from_array(y1, chunks=2)\n c2 = da.from_array(c1, chunks=2)\n w1 = np.where(c1, x, y1)\n w2 = da.where(c2, x, y2)\n assert_eq(w1, w2)\n # Test again for the bool optimization\n w3 = np.where(True, x, y1)\n w4 = da.where(True, x, y1)\n assert_eq(w3, w4)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_where_bool_optimization_test_where_bool_optimization.for_c_in_True_False_np.assert_w1_is_ex_w1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_where_bool_optimization_test_where_bool_optimization.for_c_in_True_False_np.assert_w1_is_ex_w1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1305, "end_line": 1319, "span_ids": ["test_where_bool_optimization"], "tokens": 142}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_where_bool_optimization():\n x = np.random.randint(10, size=(15, 16))\n d = da.from_array(x, chunks=(4, 5))\n y = np.random.randint(10, size=(15, 16))\n e = da.from_array(y, chunks=(4, 5))\n\n for c in [True, False, np.True_, np.False_, 1, 0]:\n w1 = da.where(c, d, e)\n w2 = np.where(c, x, y)\n\n assert_eq(w1, w2)\n\n ex_w1 = d if c else e\n\n assert w1 is ex_w1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_where_nonzero_test_where_nonzero.for_shape_chunks_in_0_.for_i_in_range_len_x_w_.assert_eq_d_w_i_x_w_i_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_where_nonzero_test_where_nonzero.for_shape_chunks_in_0_.for_i_in_range_len_x_w_.assert_eq_d_w_i_x_w_i_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1322, "end_line": 1334, "span_ids": ["test_where_nonzero"], "tokens": 124}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_where_nonzero():\n for shape, chunks in [(0, ()), ((0, 0), (0, 0)), ((15, 16), (4, 5))]:\n x = np.random.randint(10, size=shape)\n d = da.from_array(x, chunks=chunks)\n\n x_w = np.where(x)\n d_w = da.where(d)\n\n assert isinstance(d_w, type(x_w))\n assert len(d_w) == len(x_w)\n\n for i in range(len(x_w)):\n assert_eq(d_w[i], x_w[i])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_where_incorrect_args_test_count_nonzero.for_shape_chunks_in_0_.if_d_c_shape_tuple_.else_.assert_eq_x_c_d_c_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_where_incorrect_args_test_count_nonzero.for_shape_chunks_in_0_.if_d_c_shape_tuple_.else_.assert_eq_x_c_d_c_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1337, "end_line": 1359, "span_ids": ["test_count_nonzero", "test_where_incorrect_args"], "tokens": 194}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_where_incorrect_args():\n a = da.ones(5, chunks=3)\n\n for kwd in [\"x\", \"y\"]:\n kwargs = {kwd: a}\n try:\n da.where(a > 0, **kwargs)\n except ValueError as e:\n assert \"either both or neither of x and y should be given\" in str(e)\n\n\ndef test_count_nonzero():\n for shape, chunks in [(0, ()), ((0, 0), (0, 0)), ((15, 16), (4, 5))]:\n x = np.random.randint(10, size=shape)\n d = da.from_array(x, chunks=chunks)\n\n x_c = np.count_nonzero(x)\n d_c = da.count_nonzero(d)\n\n if d_c.shape == tuple():\n assert x_c == d_c.compute()\n else:\n assert_eq(x_c, d_c)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_count_nonzero_axis_test_count_nonzero_obj.if_d_c_shape_tuple_.else_.assert_eq_x_c_d_c_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_count_nonzero_axis_test_count_nonzero_obj.if_d_c_shape_tuple_.else_.assert_eq_x_c_d_c_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1362, "end_line": 1387, "span_ids": ["test_count_nonzero_axis", "test_count_nonzero_obj"], "tokens": 229}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"axis\", [None, 0, (1,), (0, 1)])\ndef test_count_nonzero_axis(axis):\n for shape, chunks in [((0, 0), (0, 0)), ((15, 16), (4, 5))]:\n x = np.random.randint(10, size=shape)\n d = da.from_array(x, chunks=chunks)\n\n x_c = np.count_nonzero(x, axis)\n d_c = da.count_nonzero(d, axis)\n\n if d_c.shape == tuple():\n assert x_c == d_c.compute()\n else:\n assert_eq(x_c, d_c)\n\n\ndef test_count_nonzero_obj():\n x = np.random.randint(10, size=(15, 16)).astype(object)\n d = da.from_array(x, chunks=(4, 5))\n\n x_c = np.count_nonzero(x)\n d_c = da.count_nonzero(d)\n\n if d_c.shape == tuple():\n assert x_c == d_c.compute()\n else:\n assert_eq(x_c, d_c)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_count_nonzero_obj_axis_test_count_nonzero_obj_axis.if_d_c_shape_tuple_.else_.assert_eq_x_c_astype_np_i": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_count_nonzero_obj_axis_test_count_nonzero_obj_axis.if_d_c_shape_tuple_.else_.assert_eq_x_c_astype_np_i", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1390, "end_line": 1406, "span_ids": ["test_count_nonzero_obj_axis"], "tokens": 164}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"axis\", [None, 0, (1,), (0, 1)])\ndef test_count_nonzero_obj_axis(axis):\n x = np.random.randint(10, size=(15, 16)).astype(object)\n d = da.from_array(x, chunks=(4, 5))\n\n x_c = np.count_nonzero(x, axis)\n d_c = da.count_nonzero(d, axis)\n\n if d_c.shape == tuple():\n assert x_c == d_c.compute()\n else:\n #######################################################\n # Workaround oddness with Windows and object arrays. #\n # #\n # xref: https://github.com/numpy/numpy/issues/9468 #\n #######################################################\n assert_eq(x_c.astype(np.intp), d_c)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_count_nonzero_str_test_flatnonzero.for_shape_chunks_in_0_.assert_eq_d_fnz_x_fnz_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_count_nonzero_str_test_flatnonzero.for_shape_chunks_in_0_.assert_eq_d_fnz_x_fnz_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1409, "end_line": 1430, "span_ids": ["test_count_nonzero_str", "test_flatnonzero"], "tokens": 203}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_count_nonzero_str():\n # We may have behavior differences with NumPy for strings\n # with just spaces, depending on the version of NumPy.\n # https://github.com/numpy/numpy/issues/9875\n x = np.array(list(\"Hellow orld\"))\n d = da.from_array(x, chunks=(4,))\n\n x_c = np.count_nonzero(x)\n d_c = da.count_nonzero(d)\n\n assert x_c == d_c.compute()\n\n\ndef test_flatnonzero():\n for shape, chunks in [(0, ()), ((0, 0), (0, 0)), ((15, 16), (4, 5))]:\n x = np.random.randint(10, size=shape)\n d = da.from_array(x, chunks=chunks)\n\n x_fnz = np.flatnonzero(x)\n d_fnz = da.flatnonzero(d)\n\n assert_eq(d_fnz, x_fnz)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_nonzero_test_nonzero.for_shape_chunks_in_0_.for_i_in_range_len_x_nz_.assert_eq_d_nz_i_x_nz_i": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_nonzero_test_nonzero.for_shape_chunks_in_0_.for_i_in_range_len_x_nz_.assert_eq_d_nz_i_x_nz_i", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1433, "end_line": 1445, "span_ids": ["test_nonzero"], "tokens": 134}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_nonzero():\n for shape, chunks in [(0, ()), ((0, 0), (0, 0)), ((15, 16), (4, 5))]:\n x = np.random.randint(10, size=shape)\n d = da.from_array(x, chunks=chunks)\n\n x_nz = np.nonzero(x)\n d_nz = da.nonzero(d)\n\n assert isinstance(d_nz, type(x_nz))\n assert len(d_nz) == len(x_nz)\n\n for i in range(len(x_nz)):\n assert_eq(d_nz[i], x_nz[i])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_nonzero_method_test_nonzero_method.for_shape_chunks_in_0_.for_i_in_range_len_x_nz_.assert_eq_d_nz_i_x_nz_i": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_nonzero_method_test_nonzero_method.for_shape_chunks_in_0_.for_i_in_range_len_x_nz_.assert_eq_d_nz_i_x_nz_i", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1448, "end_line": 1460, "span_ids": ["test_nonzero_method"], "tokens": 133}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_nonzero_method():\n for shape, chunks in [(0, ()), ((0, 0), (0, 0)), ((15, 16), (4, 5))]:\n x = np.random.randint(10, size=shape)\n d = da.from_array(x, chunks=chunks)\n\n x_nz = x.nonzero()\n d_nz = d.nonzero()\n\n assert isinstance(d_nz, type(x_nz))\n assert len(d_nz) == len(x_nz)\n\n for i in range(len(x_nz)):\n assert_eq(d_nz[i], x_nz[i])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_unravel_index_empty_test_unravel_index_empty.assert_len_d_indices_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_unravel_index_empty_test_unravel_index_empty.assert_len_d_indices_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1463, "end_line": 1476, "span_ids": ["test_unravel_index_empty"], "tokens": 139}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n LooseVersion(np.__version__) < LooseVersion(\"1.14.0\"),\n reason=\"NumPy 1.14.0+ needed for `unravel_index` to take an empty shape.\",\n)\ndef test_unravel_index_empty():\n shape = tuple()\n findices = np.array(0, dtype=int)\n d_findices = da.from_array(findices, chunks=1)\n\n indices = np.unravel_index(findices, shape)\n d_indices = da.unravel_index(d_findices, shape)\n\n assert isinstance(d_indices, type(indices))\n assert len(d_indices) == len(indices) == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_unravel_index_test_unravel_index.for_nindices_shape_orde.assert_eq_darr_vindex_d_i": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_unravel_index_test_unravel_index.for_nindices_shape_orde.assert_eq_darr_vindex_d_i", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1479, "end_line": 1503, "span_ids": ["test_unravel_index"], "tokens": 223}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_unravel_index():\n for nindices, shape, order in [\n (0, (15,), \"C\"),\n (1, (15,), \"C\"),\n (3, (15,), \"C\"),\n (3, (15,), \"F\"),\n (2, (15, 16), \"C\"),\n (2, (15, 16), \"F\"),\n ]:\n arr = np.random.random(shape)\n darr = da.from_array(arr, chunks=1)\n\n findices = np.random.randint(np.prod(shape, dtype=int), size=nindices)\n d_findices = da.from_array(findices, chunks=1)\n\n indices = np.unravel_index(findices, shape, order)\n d_indices = da.unravel_index(d_findices, shape, order)\n\n assert isinstance(d_indices, type(indices))\n assert len(d_indices) == len(indices)\n\n for i in range(len(indices)):\n assert_eq(d_indices[i], indices[i])\n\n assert_eq(darr.vindex[d_indices], arr[indices])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_coarsen_test_coarsen.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_coarsen_test_coarsen.None_2", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1506, "end_line": 1519, "span_ids": ["test_coarsen"], "tokens": 198}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_coarsen():\n x = np.random.randint(10, size=(24, 24))\n d = da.from_array(x, chunks=(4, 8))\n\n assert_eq(\n da.chunk.coarsen(np.sum, x, {0: 2, 1: 4}), da.coarsen(np.sum, d, {0: 2, 1: 4})\n )\n assert_eq(\n da.chunk.coarsen(np.sum, x, {0: 2, 1: 4}), da.coarsen(da.sum, d, {0: 2, 1: 4})\n )\n assert_eq(\n da.chunk.coarsen(np.mean, x, {0: 2, 1: 4}, dtype=\"float32\"),\n da.coarsen(da.mean, d, {0: 2, 1: 4}, dtype=\"float32\"),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_coarsen_with_excess_test_coarsen_bad_chunks.assert_eq_da_coarsen_np_s": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_coarsen_with_excess_test_coarsen_bad_chunks.assert_eq_da_coarsen_np_s", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1522, "end_line": 1535, "span_ids": ["test_coarsen_bad_chunks", "test_coarsen_with_excess"], "tokens": 185}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_coarsen_with_excess():\n x = da.arange(10, chunks=5)\n assert_eq(da.coarsen(np.min, x, {0: 5}, trim_excess=True), np.array([0, 5]))\n assert_eq(\n da.coarsen(np.sum, x, {0: 3}, trim_excess=True),\n np.array([0 + 1 + 2, 3 + 4 + 5, 6 + 7 + 8]),\n )\n\n\ndef test_coarsen_bad_chunks():\n\n x1 = da.arange(10, chunks=5)\n x2 = x1.rechunk((1, 2, 3, 4))\n assert_eq(da.coarsen(np.sum, x1, {0: 5}), da.coarsen(np.sum, x2, {0: 5}))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_aligned_coarsen_chunks_test_aligned_coarsen_chunks.assert_acc_10_20_30_4": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_aligned_coarsen_chunks_test_aligned_coarsen_chunks.assert_acc_10_20_30_4", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1538, "end_line": 1546, "span_ids": ["test_aligned_coarsen_chunks"], "tokens": 234}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_aligned_coarsen_chunks():\n\n from ..routines import aligned_coarsen_chunks as acc\n\n assert acc((20, 10, 15, 23, 24), 10) == (20, 10, 20, 20, 20, 2)\n assert acc((20, 10, 15, 42, 23, 24), 10) == (20, 10, 20, 40, 20, 20, 4)\n assert acc((20, 10, 15, 47, 23, 24), 10) == (20, 10, 20, 50, 20, 10, 9)\n assert acc((2, 10, 15, 47, 23, 24), 10) == (10, 20, 50, 20, 20, 1)\n assert acc((10, 20, 30, 40, 2), 10) == (10, 20, 30, 40, 2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_insert_test_insert.None_2.da_insert_a_3_1_axi": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_insert_test_insert.None_2.da_insert_a_3_1_axi", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1549, "end_line": 1585, "span_ids": ["test_insert"], "tokens": 571}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_insert():\n x = np.random.randint(10, size=(10, 10))\n a = da.from_array(x, chunks=(5, 5))\n y = np.random.randint(10, size=(5, 10))\n b = da.from_array(y, chunks=(4, 4))\n\n assert_eq(np.insert(x, 0, -1, axis=0), da.insert(a, 0, -1, axis=0))\n assert_eq(np.insert(x, 3, -1, axis=-1), da.insert(a, 3, -1, axis=-1))\n assert_eq(np.insert(x, 5, -1, axis=1), da.insert(a, 5, -1, axis=1))\n assert_eq(np.insert(x, -1, -1, axis=-2), da.insert(a, -1, -1, axis=-2))\n assert_eq(np.insert(x, [2, 3, 3], -1, axis=1), da.insert(a, [2, 3, 3], -1, axis=1))\n assert_eq(\n np.insert(x, [2, 3, 8, 8, -2, -2], -1, axis=0),\n da.insert(a, [2, 3, 8, 8, -2, -2], -1, axis=0),\n )\n assert_eq(\n np.insert(x, slice(1, 4), -1, axis=1), da.insert(a, slice(1, 4), -1, axis=1)\n )\n assert_eq(\n np.insert(x, [2] * 3 + [5] * 2, y, axis=0),\n da.insert(a, [2] * 3 + [5] * 2, b, axis=0),\n )\n assert_eq(np.insert(x, 0, y[0], axis=1), da.insert(a, 0, b[0], axis=1))\n\n assert same_keys(\n da.insert(a, [2, 3, 8, 8, -2, -2], -1, axis=0),\n da.insert(a, [2, 3, 8, 8, -2, -2], -1, axis=0),\n )\n\n with pytest.raises(NotImplementedError):\n da.insert(a, [4, 2], -1, axis=0)\n\n with pytest.raises(AxisError):\n da.insert(a, [3], -1, axis=2)\n\n with pytest.raises(AxisError):\n da.insert(a, [3], -1, axis=-3)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_multi_insert_test_result_type.None_10": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_multi_insert_test_result_type.None_10", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1588, "end_line": 1615, "span_ids": ["test_result_type", "test_multi_insert"], "tokens": 416}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_multi_insert():\n z = np.random.randint(10, size=(1, 2))\n c = da.from_array(z, chunks=(1, 2))\n assert_eq(\n np.insert(np.insert(z, [0, 1], -1, axis=0), [1], -1, axis=1),\n da.insert(da.insert(c, [0, 1], -1, axis=0), [1], -1, axis=1),\n )\n\n\ndef test_result_type():\n a = da.from_array(np.ones(5, np.float32), chunks=(3,))\n b = da.from_array(np.ones(5, np.int16), chunks=(3,))\n c = da.from_array(np.ones(5, np.int64), chunks=(3,))\n x = np.ones(5, np.float32)\n assert da.result_type(b, c) == np.int64\n assert da.result_type(a, b, c) == np.float64\n assert da.result_type(b, np.float32) == np.float32\n assert da.result_type(b, np.dtype(np.float32)) == np.float32\n assert da.result_type(b, x) == np.float32\n # Effect of scalars depends on their value\n assert da.result_type(1, b) == np.int16\n assert da.result_type(1.0, a) == np.float32\n assert da.result_type(np.int64(1), b) == np.int16\n assert da.result_type(np.ones((), np.int64), b) == np.int16 # 0d array\n assert da.result_type(1e200, a) == np.float64 # 1e200 is too big for float32\n # dask 0d-arrays are NOT treated like scalars\n c = da.from_array(np.ones((), np.float64), chunks=())\n assert da.result_type(a, c) == np.float64", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py__numpy_and_dask_inputs__numpy_and_dask_inputs.return.np_inputs_da_inputs": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py__numpy_and_dask_inputs__numpy_and_dask_inputs.return.np_inputs_da_inputs", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1618, "end_line": 1657, "span_ids": ["_numpy_and_dask_inputs"], "tokens": 313}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _numpy_and_dask_inputs(input_sigs):\n # einsum label dimensions\n _dimensions = {\n \"a\": 5,\n \"b\": 6,\n \"c\": 7,\n \"d\": 5,\n \"e\": 6,\n \"f\": 10,\n \"g\": 1,\n \"h\": 2,\n \"*\": 11,\n }\n\n # dimension chunks sizes\n _chunks = {\n \"a\": (2, 3),\n \"b\": (2, 3, 1),\n \"c\": (2, 3, 2),\n \"d\": (4, 1),\n \"e\": (2, 4),\n \"f\": (1, 2, 3, 4),\n \"g\": 1,\n \"h\": (1, 1),\n \"*\": 11,\n }\n\n def _shape_from_string(s):\n return tuple(_dimensions[c] for c in s)\n\n def _chunks_from_string(s):\n return tuple(_chunks[c] for c in s)\n\n shapes = [_shape_from_string(s) for s in input_sigs]\n chunks = [_chunks_from_string(s) for s in input_sigs]\n\n np_inputs = [np.random.random(s) for s in shapes]\n da_inputs = [da.from_array(i, chunks=c) for i, c in zip(np_inputs, chunks)]\n\n return np_inputs, da_inputs", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_einsum_test_einsum.with_pytest_warns_None_.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_einsum_test_einsum.with_pytest_warns_None_.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1660, "end_line": 1703, "span_ids": ["test_einsum"], "tokens": 355}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"einsum_signature\",\n [\n \"abc,bad->abcd\",\n \"abcdef,bcdfg->abcdeg\",\n \"ea,fb,abcd,gc,hd->efgh\",\n \"ab,b\",\n \"aa\",\n \"a,a->\",\n \"a,a->a\",\n \"a,a\",\n \"a,b\",\n \"a,b,c\",\n \"a\",\n \"ba,b\",\n \"ba,b->\",\n \"defab,fedbc->defac\",\n \"ab...,bc...->ac...\",\n \"a...a\",\n \"abc...->cba...\",\n \"...ab->...a\",\n \"a...a->a...\",\n # Following 2 from # https://stackoverflow.com/a/19203475/1611416\n \"...abc,...abcd->...d\",\n \"ab...,b->ab...\",\n # https://github.com/dask/dask/pull/3412#discussion_r182413444\n \"aa->a\",\n \"ab,ab,c->c\",\n \"aab,bc->ac\",\n \"aab,bcc->ac\",\n \"fdf,cdd,ccd,afe->ae\",\n \"fff,fae,bef,def->abd\",\n ],\n)\ndef test_einsum(einsum_signature):\n input_sigs = einsum_signature.split(\"->\")[0].replace(\"...\", \"*\").split(\",\")\n\n np_inputs, da_inputs = _numpy_and_dask_inputs(input_sigs)\n\n with pytest.warns(None):\n assert_eq(\n np.einsum(einsum_signature, *np_inputs),\n da.einsum(einsum_signature, *da_inputs),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_einsum_optimize_test_einsum_optimize.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_einsum_optimize_test_einsum_optimize.None_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1706, "end_line": 1724, "span_ids": ["test_einsum_optimize"], "tokens": 172}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"optimize_opts\", [(True, False), (\"greedy\", False), (\"optimal\", False)]\n)\ndef test_einsum_optimize(optimize_opts):\n sig = \"ea,fb,abcd,gc,hd->efgh\"\n input_sigs = sig.split(\"->\")[0].split(\",\")\n np_inputs, da_inputs = _numpy_and_dask_inputs(input_sigs)\n\n opt1, opt2 = optimize_opts\n\n assert_eq(\n np.einsum(sig, *np_inputs, optimize=opt1),\n da.einsum(sig, *np_inputs, optimize=opt2),\n )\n\n assert_eq(\n np.einsum(sig, *np_inputs, optimize=opt2),\n da.einsum(sig, *np_inputs, optimize=opt1),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_einsum_order_test_einsum_order.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_einsum_order_test_einsum_order.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1727, "end_line": 1735, "span_ids": ["test_einsum_order"], "tokens": 109}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"order\", [\"C\", \"F\", \"A\", \"K\"])\ndef test_einsum_order(order):\n sig = \"ea,fb,abcd,gc,hd->efgh\"\n input_sigs = sig.split(\"->\")[0].split(\",\")\n np_inputs, da_inputs = _numpy_and_dask_inputs(input_sigs)\n\n assert_eq(\n np.einsum(sig, *np_inputs, order=order), da.einsum(sig, *np_inputs, order=order)\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_einsum_casting_test_einsum_casting.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_einsum_casting_test_einsum_casting.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1738, "end_line": 1747, "span_ids": ["test_einsum_casting"], "tokens": 116}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"casting\", [\"no\", \"equiv\", \"safe\", \"same_kind\", \"unsafe\"])\ndef test_einsum_casting(casting):\n sig = \"ea,fb,abcd,gc,hd->efgh\"\n input_sigs = sig.split(\"->\")[0].split(\",\")\n np_inputs, da_inputs = _numpy_and_dask_inputs(input_sigs)\n\n assert_eq(\n np.einsum(sig, *np_inputs, casting=casting),\n da.einsum(sig, *np_inputs, casting=casting),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_einsum_split_every_test_einsum_invalid_args.with_pytest_raises_TypeEr.da_einsum_a_da_inputs": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_einsum_split_every_test_einsum_invalid_args.with_pytest_raises_TypeEr.da_einsum_a_da_inputs", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1750, "end_line": 1761, "span_ids": ["test_einsum_invalid_args", "test_einsum_split_every"], "tokens": 122}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"split_every\", [None, 2])\ndef test_einsum_split_every(split_every):\n np_inputs, da_inputs = _numpy_and_dask_inputs(\"a\")\n assert_eq(\n np.einsum(\"a\", *np_inputs), da.einsum(\"a\", *da_inputs, split_every=split_every)\n )\n\n\ndef test_einsum_invalid_args():\n _, da_inputs = _numpy_and_dask_inputs(\"a\")\n with pytest.raises(TypeError):\n da.einsum(\"a\", *da_inputs, foo=1, bar=2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_einsum_broadcasting_contraction_test_einsum_broadcasting_contraction.assert_eq_np_res_mul_res": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_einsum_broadcasting_contraction_test_einsum_broadcasting_contraction.assert_eq_np_res_mul_res", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1764, "end_line": 1784, "span_ids": ["test_einsum_broadcasting_contraction"], "tokens": 280}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_einsum_broadcasting_contraction():\n a = np.random.rand(1, 5, 4)\n b = np.random.rand(4, 6)\n c = np.random.rand(5, 6)\n d = np.random.rand(10)\n\n d_a = da.from_array(a, chunks=(1, (2, 3), (2, 2)))\n d_b = da.from_array(b, chunks=((2, 2), (4, 2)))\n d_c = da.from_array(c, chunks=((2, 3), (4, 2)))\n d_d = da.from_array(d, chunks=((7, 3)))\n\n np_res = np.einsum(\"ijk,kl,jl\", a, b, c)\n da_res = da.einsum(\"ijk,kl,jl\", d_a, d_b, d_c)\n assert_eq(np_res, da_res)\n\n mul_res = da_res * d\n\n np_res = np.einsum(\"ijk,kl,jl,i->i\", a, b, c, d)\n da_res = da.einsum(\"ijk,kl,jl,i->i\", d_a, d_b, d_c, d_d)\n assert_eq(np_res, da_res)\n assert_eq(np_res, mul_res)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_einsum_broadcasting_contraction2_test_einsum_broadcasting_contraction2.assert_eq_np_res_mul_res": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_einsum_broadcasting_contraction2_test_einsum_broadcasting_contraction2.assert_eq_np_res_mul_res", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1787, "end_line": 1807, "span_ids": ["test_einsum_broadcasting_contraction2"], "tokens": 296}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_einsum_broadcasting_contraction2():\n a = np.random.rand(1, 1, 5, 4)\n b = np.random.rand(4, 6)\n c = np.random.rand(5, 6)\n d = np.random.rand(7, 7)\n\n d_a = da.from_array(a, chunks=(1, 1, (2, 3), (2, 2)))\n d_b = da.from_array(b, chunks=((2, 2), (4, 2)))\n d_c = da.from_array(c, chunks=((2, 3), (4, 2)))\n d_d = da.from_array(d, chunks=((7, 3)))\n\n np_res = np.einsum(\"abjk,kl,jl\", a, b, c)\n da_res = da.einsum(\"abjk,kl,jl\", d_a, d_b, d_c)\n assert_eq(np_res, da_res)\n\n mul_res = da_res * d\n\n np_res = np.einsum(\"abjk,kl,jl,ab->ab\", a, b, c, d)\n da_res = da.einsum(\"abjk,kl,jl,ab->ab\", d_a, d_b, d_c, d_d)\n assert_eq(np_res, da_res)\n assert_eq(np_res, mul_res)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_einsum_broadcasting_contraction3_test_einsum_broadcasting_contraction3.assert_eq_np_res_da_res_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_einsum_broadcasting_contraction3_test_einsum_broadcasting_contraction3.assert_eq_np_res_da_res_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1810, "end_line": 1823, "span_ids": ["test_einsum_broadcasting_contraction3"], "tokens": 222}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_einsum_broadcasting_contraction3():\n a = np.random.rand(1, 5, 4)\n b = np.random.rand(4, 1, 6)\n c = np.random.rand(5, 6)\n d = np.random.rand(7, 7)\n\n d_a = da.from_array(a, chunks=(1, (2, 3), (2, 2)))\n d_b = da.from_array(b, chunks=((2, 2), 1, (4, 2)))\n d_c = da.from_array(c, chunks=((2, 3), (4, 2)))\n d_d = da.from_array(d, chunks=((7, 3)))\n\n np_res = np.einsum(\"ajk,kbl,jl,ab->ab\", a, b, c, d)\n da_res = da.einsum(\"ajk,kbl,jl,ab->ab\", d_a, d_b, d_c, d_d)\n assert_eq(np_res, da_res)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_average_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_average_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1826, "end_line": 1866, "span_ids": ["test_average", "test_average_raises", "test_average_weights", "test_iscomplexobj"], "tokens": 322}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"a\", [np.arange(11), np.arange(6).reshape((3, 2))])\n@pytest.mark.parametrize(\"returned\", [True, False])\ndef test_average(a, returned):\n d_a = da.from_array(a, chunks=2)\n\n np_avg = np.average(a, returned=returned)\n da_avg = da.average(d_a, returned=returned)\n\n assert_eq(np_avg, da_avg)\n\n\ndef test_average_weights():\n a = np.arange(6).reshape((3, 2))\n d_a = da.from_array(a, chunks=2)\n\n weights = np.array([0.25, 0.75])\n d_weights = da.from_array(weights, chunks=2)\n\n np_avg = np.average(a, weights=weights, axis=1)\n da_avg = da.average(d_a, weights=d_weights, axis=1)\n\n assert_eq(np_avg, da_avg)\n\n\ndef test_average_raises():\n d_a = da.arange(11, chunks=2)\n\n with pytest.raises(TypeError):\n da.average(d_a, weights=[1, 2, 3])\n\n with pytest.warns(RuntimeWarning):\n da.average(d_a, weights=da.zeros_like(d_a)).compute()\n\n\ndef test_iscomplexobj():\n a = da.from_array(np.array([1, 2]), 2)\n assert np.iscomplexobj(a) is False\n\n a = da.from_array(np.array([1, 2 + 0j]), 2)\n assert np.iscomplexobj(a) is True", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_itertools_from_dask_array_utils_imp": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_itertools_from_dask_array_utils_imp", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 35, "span_ids": ["imports"], "tokens": 162}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import itertools\nfrom operator import getitem\n\nimport pytest\nfrom tlz import merge\n\nnp = pytest.importorskip(\"numpy\")\n\nimport dask\nfrom dask import config\nimport dask.array as da\nfrom dask.array.slicing import (\n _sanitize_index_element,\n _slice_1d,\n new_blockdim,\n sanitize_index,\n slice_array,\n take,\n normalize_index,\n slicing_plan,\n make_block_sorted_slices,\n shuffle_slice,\n)\nfrom dask.array.slicing import (\n _sanitize_index_element,\n _slice_1d,\n new_blockdim,\n sanitize_index,\n slice_array,\n take,\n normalize_index,\n slicing_plan,\n cached_cumsum,\n)\nfrom dask.array.utils import assert_eq, same_keys", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slice_1d_test_slice_1d._x_1_8_1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slice_1d_test_slice_1d._x_1_8_1_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 38, "end_line": 109, "span_ids": ["test_slice_1d"], "tokens": 784}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_slice_1d():\n expected = {0: slice(10, 25, 1), 1: slice(None, None, None), 2: slice(0, 1, 1)}\n result = _slice_1d(100, [25] * 4, slice(10, 51, None))\n assert expected == result\n\n # x[100:12:-3]\n expected = {\n 0: slice(-2, -8, -3),\n 1: slice(-1, -21, -3),\n 2: slice(-3, -21, -3),\n 3: slice(-2, -21, -3),\n 4: slice(-1, -21, -3),\n }\n result = _slice_1d(100, [20] * 5, slice(100, 12, -3))\n assert expected == result\n\n # x[102::-3]\n expected = {\n 0: slice(-2, -21, -3),\n 1: slice(-1, -21, -3),\n 2: slice(-3, -21, -3),\n 3: slice(-2, -21, -3),\n 4: slice(-1, -21, -3),\n }\n result = _slice_1d(100, [20] * 5, slice(102, None, -3))\n assert expected == result\n\n # x[::-4]\n expected = {\n 0: slice(-1, -21, -4),\n 1: slice(-1, -21, -4),\n 2: slice(-1, -21, -4),\n 3: slice(-1, -21, -4),\n 4: slice(-1, -21, -4),\n }\n result = _slice_1d(100, [20] * 5, slice(None, None, -4))\n assert expected == result\n\n # x[::-7]\n expected = {\n 0: slice(-5, -21, -7),\n 1: slice(-4, -21, -7),\n 2: slice(-3, -21, -7),\n 3: slice(-2, -21, -7),\n 4: slice(-1, -21, -7),\n }\n result = _slice_1d(100, [20] * 5, slice(None, None, -7))\n assert expected == result\n\n # x=range(115)\n # x[::-7]\n expected = {\n 0: slice(-7, -24, -7),\n 1: slice(-2, -24, -7),\n 2: slice(-4, -24, -7),\n 3: slice(-6, -24, -7),\n 4: slice(-1, -24, -7),\n }\n result = _slice_1d(115, [23] * 5, slice(None, None, -7))\n assert expected == result\n\n # x[79::-3]\n expected = {\n 0: slice(-1, -21, -3),\n 1: slice(-3, -21, -3),\n 2: slice(-2, -21, -3),\n 3: slice(-1, -21, -3),\n }\n result = _slice_1d(100, [20] * 5, slice(79, None, -3))\n assert expected == result\n\n # x[-1:-8:-1]\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slice_1d.expected_14_test_slice_1d.None_14": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slice_1d.expected_14_test_slice_1d.None_14", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 110, "end_line": 179, "span_ids": ["test_slice_1d"], "tokens": 850}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_slice_1d():\n # ... other code\n expected = {4: slice(-1, -8, -1)}\n result = _slice_1d(100, [20, 20, 20, 20, 20], slice(-1, 92, -1))\n assert expected == result\n\n # x[20:0:-1]\n expected = {0: slice(-1, -20, -1), 1: slice(-20, -21, -1)}\n result = _slice_1d(100, [20, 20, 20, 20, 20], slice(20, 0, -1))\n assert expected == result\n\n # x[:0]\n expected = {}\n result = _slice_1d(100, [20, 20, 20, 20, 20], slice(0))\n assert result\n\n # x=range(99)\n expected = {\n 0: slice(-3, -21, -3),\n 1: slice(-2, -21, -3),\n 2: slice(-1, -21, -3),\n 3: slice(-2, -20, -3),\n 4: slice(-1, -21, -3),\n }\n # This array has non-uniformly sized blocks\n result = _slice_1d(99, [20, 20, 20, 19, 20], slice(100, None, -3))\n assert expected == result\n\n # x=range(104)\n # x[::-3]\n expected = {\n 0: slice(-1, -21, -3),\n 1: slice(-3, -24, -3),\n 2: slice(-3, -28, -3),\n 3: slice(-1, -14, -3),\n 4: slice(-1, -22, -3),\n }\n # This array has non-uniformly sized blocks\n result = _slice_1d(104, [20, 23, 27, 13, 21], slice(None, None, -3))\n assert expected == result\n\n # x=range(104)\n # x[:27:-3]\n expected = {\n 1: slice(-3, -16, -3),\n 2: slice(-3, -28, -3),\n 3: slice(-1, -14, -3),\n 4: slice(-1, -22, -3),\n }\n # This array has non-uniformly sized blocks\n result = _slice_1d(104, [20, 23, 27, 13, 21], slice(None, 27, -3))\n assert expected == result\n\n # x=range(104)\n # x[100:27:-3]\n expected = {\n 1: slice(-3, -16, -3),\n 2: slice(-3, -28, -3),\n 3: slice(-1, -14, -3),\n 4: slice(-4, -22, -3),\n }\n # This array has non-uniformly sized blocks\n result = _slice_1d(104, [20, 23, 27, 13, 21], slice(100, 27, -3))\n assert expected == result\n\n # x=range(1000000000000)\n # x[1000:]\n expected = {0: slice(1000, 1000000000, 1)}\n expected.update({ii: slice(None, None, None) for ii in range(1, 1000)})\n # This array is large\n result = _slice_1d(1000000000000, [1000000000] * 1000, slice(1000, None, None))\n assert expected == result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slice_singleton_value_on_boundary_test_slice_array_1d.None_7": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slice_singleton_value_on_boundary_test_slice_array_1d.None_7", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 182, "end_line": 228, "span_ids": ["test_slice_array_1d", "test_slice_singleton_value_on_boundary"], "tokens": 688}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_slice_singleton_value_on_boundary():\n assert _slice_1d(15, [5, 5, 5], 10) == {2: 0}\n assert _slice_1d(30, (5, 5, 5, 5, 5, 5), 10) == {2: 0}\n\n\ndef test_slice_array_1d():\n # x[24::2]\n expected = {\n (\"y\", 0): (getitem, (\"x\", 0), (slice(24, 25, 2),)),\n (\"y\", 1): (getitem, (\"x\", 1), (slice(1, 25, 2),)),\n (\"y\", 2): (getitem, (\"x\", 2), (slice(0, 25, 2),)),\n (\"y\", 3): (getitem, (\"x\", 3), (slice(1, 25, 2),)),\n }\n result, chunks = slice_array(\"y\", \"x\", [[25] * 4], [slice(24, None, 2)], 8)\n\n assert expected == result\n\n # x[26::2]\n expected = {\n (\"y\", 0): (getitem, (\"x\", 1), (slice(1, 25, 2),)),\n (\"y\", 1): (getitem, (\"x\", 2), (slice(0, 25, 2),)),\n (\"y\", 2): (getitem, (\"x\", 3), (slice(1, 25, 2),)),\n }\n\n result, chunks = slice_array(\"y\", \"x\", [[25] * 4], [slice(26, None, 2)], 8)\n assert expected == result\n\n # x[24::2]\n expected = {\n (\"y\", 0): (getitem, (\"x\", 0), (slice(24, 25, 2),)),\n (\"y\", 1): (getitem, (\"x\", 1), (slice(1, 25, 2),)),\n (\"y\", 2): (getitem, (\"x\", 2), (slice(0, 25, 2),)),\n (\"y\", 3): (getitem, (\"x\", 3), (slice(1, 25, 2),)),\n }\n result, chunks = slice_array(\"y\", \"x\", [(25,) * 4], (slice(24, None, 2),), 8)\n\n assert expected == result\n\n # x[26::2]\n expected = {\n (\"y\", 0): (getitem, (\"x\", 1), (slice(1, 25, 2),)),\n (\"y\", 1): (getitem, (\"x\", 2), (slice(0, 25, 2),)),\n (\"y\", 2): (getitem, (\"x\", 3), (slice(1, 25, 2),)),\n }\n\n result, chunks = slice_array(\"y\", \"x\", [(25,) * 4], (slice(26, None, 2),), 8)\n assert expected == result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slice_array_2d_test_slice_array_2d.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slice_array_2d_test_slice_array_2d.None_3", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 231, "end_line": 268, "span_ids": ["test_slice_array_2d"], "tokens": 392}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_slice_array_2d():\n # 2d slices: x[13::2,10::1]\n expected = {\n (\"y\", 0, 0): (getitem, (\"x\", 0, 0), (slice(13, 20, 2), slice(10, 20, 1))),\n (\"y\", 0, 1): (\n getitem,\n (\"x\", 0, 1),\n (slice(13, 20, 2), slice(None, None, None)),\n ),\n (\"y\", 0, 2): (\n getitem,\n (\"x\", 0, 2),\n (slice(13, 20, 2), slice(None, None, None)),\n ),\n }\n\n result, chunks = slice_array(\n \"y\",\n \"x\",\n [[20], [20, 20, 5]],\n [slice(13, None, 2), slice(10, None, 1)],\n itemsize=8,\n )\n\n assert expected == result\n\n # 2d slices with one dimension: x[5,10::1]\n expected = {\n (\"y\", 0): (getitem, (\"x\", 0, 0), (5, slice(10, 20, 1))),\n (\"y\", 1): (getitem, (\"x\", 0, 1), (5, slice(None, None, None))),\n (\"y\", 2): (getitem, (\"x\", 0, 2), (5, slice(None, None, None))),\n }\n\n result, chunks = slice_array(\n \"y\", \"x\", ([20], [20, 20, 5]), [5, slice(10, None, 1)], 8\n )\n\n assert expected == result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slice_optimizations_test_slicing_with_singleton_indices.assert_expected_result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slice_optimizations_test_slicing_with_singleton_indices.assert_expected_result", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 271, "end_line": 296, "span_ids": ["test_slice_optimizations", "test_slicing_with_singleton_indices"], "tokens": 264}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_slice_optimizations():\n # bar[:]\n expected = {(\"foo\", 0): (\"bar\", 0)}\n result, chunks = slice_array(\"foo\", \"bar\", [[100]], (slice(None, None, None),), 8)\n assert expected == result\n\n # bar[:,:,:]\n expected = {(\"foo\", 0): (\"bar\", 0), (\"foo\", 1): (\"bar\", 1), (\"foo\", 2): (\"bar\", 2)}\n result, chunks = slice_array(\n \"foo\",\n \"bar\",\n [(100, 1000, 10000)],\n (slice(None, None, None), slice(None, None, None), slice(None, None, None)),\n itemsize=8,\n )\n assert expected == result\n\n\ndef test_slicing_with_singleton_indices():\n result, chunks = slice_array(\n \"y\", \"x\", ([5, 5], [5, 5]), (slice(0, 5), 8), itemsize=8\n )\n\n expected = {(\"y\", 0): (getitem, (\"x\", 0, 1), (slice(None, None, None), 3))}\n\n assert expected == result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slicing_with_newaxis_test_slicing_with_newaxis.assert_chunks_3_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slicing_with_newaxis_test_slicing_with_newaxis.assert_chunks_3_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 299, "end_line": 322, "span_ids": ["test_slicing_with_newaxis"], "tokens": 193}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_slicing_with_newaxis():\n result, chunks = slice_array(\n \"y\",\n \"x\",\n ([5, 5], [5, 5]),\n (slice(0, 3), None, slice(None, None, None)),\n itemsize=8,\n )\n\n expected = {\n (\"y\", 0, 0, 0): (\n getitem,\n (\"x\", 0, 0),\n (slice(0, 3, 1), None, slice(None, None, None)),\n ),\n (\"y\", 0, 0, 1): (\n getitem,\n (\"x\", 0, 1),\n (slice(0, 3, 1), None, slice(None, None, None)),\n ),\n }\n\n assert expected == result\n assert chunks == ((3,), (1,), (5, 5))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_take_test_take.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_take_test_take.None_3", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 325, "end_line": 355, "span_ids": ["test_take"], "tokens": 479}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_take():\n chunks, dsk = take(\"y\", \"x\", [(20, 20, 20, 20)], [5, 1, 47, 3], itemsize=8, axis=0)\n expected = {\n (\"y\", 0): (getitem, (\"x\", 0), (np.array([5, 1]),)),\n (\"y\", 1): (getitem, (\"x\", 2), (np.array([7]),)),\n (\"y\", 2): (getitem, (\"x\", 0), (np.array([3]),)),\n }\n np.testing.assert_equal(sorted(dsk.items()), sorted(expected.items()))\n assert chunks == ((2, 1, 1),)\n\n chunks, dsk = take(\n \"y\", \"x\", [(20, 20, 20, 20), (20, 20)], [5, 1, 47, 3], itemsize=8, axis=0\n )\n expected = {\n (\"y\", 0, 0): (\n getitem,\n (\"x\", 0, 0),\n (np.array([5, 1]), slice(None, None, None)),\n ),\n (\"y\", 0, 1): (\n getitem,\n (\"x\", 0, 1),\n (np.array([5, 1]), slice(None, None, None)),\n ),\n (\"y\", 1, 0): (getitem, (\"x\", 2, 0), (np.array([7]), slice(None, None, None))),\n (\"y\", 1, 1): (getitem, (\"x\", 2, 1), (np.array([7]), slice(None, None, None))),\n (\"y\", 2, 0): (getitem, (\"x\", 0, 0), (np.array([3]), slice(None, None, None))),\n (\"y\", 2, 1): (getitem, (\"x\", 0, 1), (np.array([3]), slice(None, None, None))),\n }\n np.testing.assert_equal(sorted(dsk.items()), sorted(expected.items()))\n assert chunks == ((2, 1, 1), (20, 20))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_take_sorted_test_take_sorted.assert_chunks_20_20": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_take_sorted_test_take_sorted.assert_chunks_20_20", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 358, "end_line": 381, "span_ids": ["test_take_sorted"], "tokens": 313}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_take_sorted():\n chunks, dsk = take(\"y\", \"x\", [(20, 20, 20, 20)], [1, 3, 5, 47], itemsize=8, axis=0)\n expected = {\n (\"y\", 0): (getitem, (\"x\", 0), ([1, 3, 5],)),\n (\"y\", 1): (getitem, (\"x\", 2), ([7],)),\n }\n np.testing.assert_equal(dsk, expected)\n assert chunks == ((3, 1),)\n\n chunks, dsk = take(\n \"y\", \"x\", [(20, 20, 20, 20), (20, 20)], [1, 3, 5, 37], itemsize=8, axis=1\n )\n expected = merge(\n dict(\n ((\"y\", i, 0), (getitem, (\"x\", i, 0), (slice(None, None, None), [1, 3, 5])))\n for i in range(4)\n ),\n dict(\n ((\"y\", i, 1), (getitem, (\"x\", i, 1), (slice(None, None, None), [17])))\n for i in range(4)\n ),\n )\n np.testing.assert_equal(dsk, expected)\n assert chunks == ((20, 20, 20, 20), (3, 1))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slicing_chunks_test_slicing_chunks.None_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slicing_chunks_test_slicing_chunks.None_5", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 384, "end_line": 398, "span_ids": ["test_slicing_chunks"], "tokens": 185}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_slicing_chunks():\n result, chunks = slice_array(\n \"y\", \"x\", ([5, 5], [5, 5]), (1, np.array([2, 0, 3])), itemsize=8\n )\n assert chunks == ((3,),)\n\n result, chunks = slice_array(\n \"y\", \"x\", ([5, 5], [5, 5]), (slice(0, 7), np.array([2, 0, 3])), itemsize=8\n )\n assert chunks == ((5, 2), (3,))\n\n result, chunks = slice_array(\n \"y\", \"x\", ([5, 5], [5, 5]), (slice(0, 7), 1), itemsize=8\n )\n assert chunks == ((5, 2),)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slicing_with_numpy_arrays_test_slicing_with_numpy_arrays.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slicing_with_numpy_arrays_test_slicing_with_numpy_arrays.None_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 401, "end_line": 425, "span_ids": ["test_slicing_with_numpy_arrays"], "tokens": 277}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_slicing_with_numpy_arrays():\n a, bd1 = slice_array(\n \"y\",\n \"x\",\n ((3, 3, 3, 1), (3, 3, 3, 1)),\n (np.array([1, 2, 9]), slice(None, None, None)),\n itemsize=8,\n )\n b, bd2 = slice_array(\n \"y\",\n \"x\",\n ((3, 3, 3, 1), (3, 3, 3, 1)),\n (np.array([1, 2, 9]), slice(None, None, None)),\n itemsize=8,\n )\n\n assert bd1 == bd2\n np.testing.assert_equal(a, b)\n\n i = [False, True, True, False, False, False, False, False, False, True]\n index = (i, slice(None, None, None))\n index = normalize_index(index, (10, 10))\n c, bd3 = slice_array(\"y\", \"x\", ((3, 3, 3, 1), (3, 3, 3, 1)), index, itemsize=8)\n assert bd1 == bd3\n np.testing.assert_equal(a, c)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slicing_and_chunks_test_slicing_identities.None_9": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slicing_and_chunks_test_slicing_identities.None_9", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 428, "end_line": 446, "span_ids": ["test_slicing_and_chunks", "test_slicing_identities"], "tokens": 211}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_slicing_and_chunks():\n o = da.ones((24, 16), chunks=((4, 8, 8, 4), (2, 6, 6, 2)))\n t = o[4:-4, 2:-2]\n assert t.chunks == ((8, 8), (6, 6))\n\n\ndef test_slicing_identities():\n a = da.ones((24, 16), chunks=((4, 8, 8, 4), (2, 6, 6, 2)))\n\n assert a is a[slice(None)]\n assert a is a[:]\n assert a is a[::]\n assert a is a[...]\n assert a is a[0:]\n assert a is a[0::]\n assert a is a[::1]\n assert a is a[0 : len(a)]\n assert a is a[0::1]\n assert a is a[0 : len(a) : 1]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slice_stop_0_ReturnItem.__getitem__.return.key": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slice_stop_0_ReturnItem.__getitem__.return.key", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 449, "end_line": 465, "span_ids": ["test_slice_stop_0", "ReturnItem.__getitem__", "ReturnItem", "test_slice_list_then_None"], "tokens": 120}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_slice_stop_0():\n # from gh-125\n a = da.ones(10, chunks=(10,))[:0].compute()\n b = np.ones(10)[:0]\n assert_eq(a, b)\n\n\ndef test_slice_list_then_None():\n x = da.zeros(shape=(5, 5), chunks=(3, 3))\n y = x[[2, 1]][None]\n\n assert_eq(y, np.zeros((1, 2, 5)))\n\n\nclass ReturnItem(object):\n def __getitem__(self, key):\n return key", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slicing_exhaustively_test_slicing_exhaustively.for_i_in_first_indexers_.for_j_in_second_indexers_.assert_eq_x_i_j_a_i_j": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slicing_exhaustively_test_slicing_exhaustively.for_i_in_first_indexers_.for_j_in_second_indexers_.assert_eq_x_i_j_a_i_j", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 468, "end_line": 489, "span_ids": ["test_slicing_exhaustively"], "tokens": 337}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skip(reason=\"really long test\")\ndef test_slicing_exhaustively():\n x = np.random.rand(6, 7, 8)\n a = da.from_array(x, chunks=(3, 3, 3))\n I = ReturnItem()\n\n # independent indexing along different axes\n indexers = [0, -2, I[:], I[:5], [0, 1], [0, 1, 2], [4, 2], I[::-1], None, I[:0], []]\n for i in indexers:\n assert_eq(x[i], a[i]), i\n for j in indexers:\n assert_eq(x[i][:, j], a[i][:, j]), (i, j)\n assert_eq(x[:, i][j], a[:, i][j]), (i, j)\n for k in indexers:\n assert_eq(x[..., i][:, j][k], a[..., i][:, j][k]), (i, j, k)\n\n # repeated indexing along the first axis\n first_indexers = [I[:], I[:5], np.arange(5), [3, 1, 4, 5, 0], np.arange(6) < 6]\n second_indexers = [0, -1, 3, I[:], I[:3], I[2:-1], [2, 4], [], I[:0]]\n for i in first_indexers:\n for j in second_indexers:\n assert_eq(x[i][j], a[i][j]), (i, j)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slicing_with_negative_step_flops_keys_test_slicing_with_negative_step_flops_keys.assert_y_dask_y_name_1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slicing_with_negative_step_flops_keys_test_slicing_with_negative_step_flops_keys.assert_y_dask_y_name_1_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 492, "end_line": 503, "span_ids": ["test_slicing_with_negative_step_flops_keys"], "tokens": 169}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_slicing_with_negative_step_flops_keys():\n x = da.arange(10, chunks=5)\n y = x[:1:-1]\n assert (x.name, 1) in y.dask[(y.name, 0)]\n assert (x.name, 0) in y.dask[(y.name, 1)]\n\n assert_eq(y, np.arange(10)[:1:-1])\n\n assert y.chunks == ((5, 3),)\n\n assert y.dask[(y.name, 0)] == (getitem, (x.name, 1), (slice(-1, -6, -1),))\n assert y.dask[(y.name, 1)] == (getitem, (x.name, 0), (slice(-1, -4, -1),))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_empty_slice_test_multiple_list_slicing.assert_eq_x_0_1_2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_empty_slice_test_multiple_list_slicing.assert_eq_x_0_1_2_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 506, "end_line": 516, "span_ids": ["test_empty_slice", "test_multiple_list_slicing"], "tokens": 133}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_empty_slice():\n x = da.ones((5, 5), chunks=(2, 2), dtype=\"i4\")\n y = x[:0]\n\n assert_eq(y, np.ones((5, 5), dtype=\"i4\")[:0])\n\n\ndef test_multiple_list_slicing():\n x = np.random.rand(6, 7, 8)\n a = da.from_array(x, chunks=(3, 3, 3))\n assert_eq(x[:, [0, 1, 2]][[0, 1]], a[:, [0, 1, 2]][[0, 1]])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_boolean_list_slicing_test_boolean_list_slicing.assert_eq_da_asarray_0_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_boolean_list_slicing_test_boolean_list_slicing.assert_eq_da_asarray_0_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 519, "end_line": 529, "span_ids": ["test_boolean_list_slicing"], "tokens": 117}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_boolean_list_slicing():\n with pytest.raises(IndexError):\n da.asarray(range(2))[[True]]\n with pytest.raises(IndexError):\n da.asarray(range(2))[[False, False, False]]\n x = np.arange(5)\n ind = [True, False, False, False, True]\n assert_eq(da.asarray(x)[ind], x[ind])\n # https://github.com/dask/dask/issues/3706\n ind = [True]\n assert_eq(da.asarray([0])[ind], np.arange(1)[ind])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_boolean_numpy_array_slicing_test_boolean_numpy_array_slicing.assert_eq_da_asarray_0_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_boolean_numpy_array_slicing_test_boolean_numpy_array_slicing.assert_eq_da_asarray_0_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 532, "end_line": 542, "span_ids": ["test_boolean_numpy_array_slicing"], "tokens": 126}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_boolean_numpy_array_slicing():\n with pytest.raises(IndexError):\n da.asarray(range(2))[np.array([True])]\n with pytest.raises(IndexError):\n da.asarray(range(2))[np.array([False, False, False])]\n x = np.arange(5)\n ind = np.array([True, False, False, False, True])\n assert_eq(da.asarray(x)[ind], x[ind])\n # https://github.com/dask/dask/issues/3706\n ind = np.array([True])\n assert_eq(da.asarray([0])[ind], np.arange(1)[ind])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_empty_list_test_new_blockdim.assert_new_blockdim_20_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_empty_list_test_new_blockdim.assert_new_blockdim_20_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 545, "end_line": 559, "span_ids": ["test_new_blockdim", "test_empty_list", "test_uneven_chunks"], "tokens": 181}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_empty_list():\n x = np.ones((5, 5, 5), dtype=\"i4\")\n dx = da.from_array(x, chunks=2)\n\n assert_eq(dx[[], :3, :2], x[[], :3, :2])\n assert_eq(dx[:3, [], :2], x[:3, [], :2])\n assert_eq(dx[:3, :2, []], x[:3, :2, []])\n\n\ndef test_uneven_chunks():\n assert da.ones(20, chunks=5)[::2].chunks == ((3, 2, 3, 2),)\n\n\ndef test_new_blockdim():\n assert new_blockdim(20, [5, 5, 5, 5], slice(0, None, 2)) == [3, 2, 3, 2]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slicing_consistent_names_test_slicing_consistent_names.assert_same_keys_a_0_1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slicing_consistent_names_test_slicing_consistent_names.assert_same_keys_a_0_1_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 562, "end_line": 574, "span_ids": ["test_slicing_consistent_names"], "tokens": 207}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_slicing_consistent_names():\n x = np.arange(100).reshape((10, 10))\n a = da.from_array(x, chunks=(5, 5))\n assert same_keys(a[0], a[0])\n assert same_keys(a[:, [1, 2, 3]], a[:, [1, 2, 3]])\n assert same_keys(a[:, 5:2:-1], a[:, 5:2:-1])\n assert same_keys(a[0, ...], a[0, ...])\n assert same_keys(a[...], a[...])\n assert same_keys(a[[1, 3, 5]], a[[1, 3, 5]])\n assert same_keys(a[-11:11], a[:])\n assert same_keys(a[-11:-9], a[:1])\n assert same_keys(a[-1], a[9])\n assert same_keys(a[0::-1], a[0:-11:-1])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slicing_consistent_names_after_normalization_test_sanitize_index.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slicing_consistent_names_after_normalization_test_sanitize_index.None_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 577, "end_line": 596, "span_ids": ["test_slicing_consistent_names_after_normalization", "test_sanitize_index_element", "test_sanitize_index"], "tokens": 185}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_slicing_consistent_names_after_normalization():\n x = da.zeros(10, chunks=(5,))\n assert same_keys(x[0:], x[:10])\n assert same_keys(x[0:], x[0:10])\n assert same_keys(x[0:], x[0:10:1])\n assert same_keys(x[:], x[0:10:1])\n\n\ndef test_sanitize_index_element():\n with pytest.raises(TypeError):\n _sanitize_index_element(\"Hello!\")\n\n\ndef test_sanitize_index():\n pd = pytest.importorskip(\"pandas\")\n with pytest.raises(TypeError):\n sanitize_index(\"Hello!\")\n\n np.testing.assert_equal(sanitize_index(pd.Series([1, 2, 3])), [1, 2, 3])\n np.testing.assert_equal(sanitize_index((1, 2, 3)), [1, 2, 3])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_uneven_blockdims_test_uneven_blockdims.None_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_uneven_blockdims_test_uneven_blockdims.None_5", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 599, "end_line": 622, "span_ids": ["test_uneven_blockdims"], "tokens": 553}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_uneven_blockdims():\n blockdims = ((31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30), (100,))\n index = (slice(240, 270), slice(None))\n dsk_out, bd_out = slice_array(\"in\", \"out\", blockdims, index, itemsize=8)\n sol = {\n (\"in\", 0, 0): (getitem, (\"out\", 7, 0), (slice(28, 31, 1), slice(None))),\n (\"in\", 1, 0): (getitem, (\"out\", 8, 0), (slice(0, 27, 1), slice(None))),\n }\n assert dsk_out == sol\n assert bd_out == ((3, 27), (100,))\n\n blockdims = ((31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30),) * 2\n index = (slice(240, 270), slice(180, 230))\n dsk_out, bd_out = slice_array(\"in\", \"out\", blockdims, index, itemsize=8)\n sol = {\n (\"in\", 0, 0): (getitem, (\"out\", 7, 5), (slice(28, 31, 1), slice(29, 30, 1))),\n (\"in\", 0, 1): (getitem, (\"out\", 7, 6), (slice(28, 31, 1), slice(None))),\n (\"in\", 0, 2): (getitem, (\"out\", 7, 7), (slice(28, 31, 1), slice(0, 18, 1))),\n (\"in\", 1, 0): (getitem, (\"out\", 8, 5), (slice(0, 27, 1), slice(29, 30, 1))),\n (\"in\", 1, 1): (getitem, (\"out\", 8, 6), (slice(0, 27, 1), slice(None))),\n (\"in\", 1, 2): (getitem, (\"out\", 8, 7), (slice(0, 27, 1), slice(0, 18, 1))),\n }\n assert dsk_out == sol\n assert bd_out == ((3, 27), (1, 31, 18))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_oob_check_test_index_with_int_dask_array.assert_eq_x_T_idx_ex": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_oob_check_test_index_with_int_dask_array.assert_eq_x_T_idx_ex", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 625, "end_line": 658, "span_ids": ["test_oob_check", "test_index_with_int_dask_array"], "tokens": 354}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_oob_check():\n x = da.ones(5, chunks=(2,))\n with pytest.raises(IndexError):\n x[6]\n with pytest.raises(IndexError):\n x[[6]]\n with pytest.raises(IndexError):\n x[-10]\n with pytest.raises(IndexError):\n x[[-10]]\n with pytest.raises(IndexError):\n x[0, 0]\n\n\n@pytest.mark.parametrize(\"idx_chunks\", [None, 3, 2, 1])\n@pytest.mark.parametrize(\"x_chunks\", [None, (3, 5), (2, 3), (1, 2), (1, 1)])\ndef test_index_with_int_dask_array(x_chunks, idx_chunks):\n # test data is crafted to stress use cases:\n # - pick from different chunks of x out of order\n # - a chunk of x contains no matches\n # - only one chunk of x\n x = np.array(\n [[10, 20, 30, 40, 50], [60, 70, 80, 90, 100], [110, 120, 130, 140, 150]]\n )\n idx = np.array([3, 0, 1])\n expect = np.array([[40, 10, 20], [90, 60, 70], [140, 110, 120]])\n\n if x_chunks is not None:\n x = da.from_array(x, chunks=x_chunks)\n if idx_chunks is not None:\n idx = da.from_array(idx, chunks=idx_chunks)\n\n assert_eq(x[:, idx], expect)\n assert_eq(x.T[idx, :], expect.T)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_index_with_int_dask_array_0d_test_index_with_int_dask_array_0d.assert_eq_x_idx0_x_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_index_with_int_dask_array_0d_test_index_with_int_dask_array_0d.assert_eq_x_idx0_x_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 661, "end_line": 667, "span_ids": ["test_index_with_int_dask_array_0d"], "tokens": 107}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"chunks\", [1, 2, 3])\ndef test_index_with_int_dask_array_0d(chunks):\n # Slice by 0-dimensional array\n x = da.from_array([[10, 20, 30], [40, 50, 60]], chunks=chunks)\n idx0 = da.from_array(1, chunks=1)\n assert_eq(x[idx0, :], x[1, :])\n assert_eq(x[:, idx0], x[:, 1])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_index_with_int_dask_array_nanchunks_test_index_with_int_dask_array_nanchunks.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_index_with_int_dask_array_nanchunks_test_index_with_int_dask_array_nanchunks.None_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 670, "end_line": 677, "span_ids": ["test_index_with_int_dask_array_nanchunks"], "tokens": 121}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"chunks\", [1, 2, 3, 4, 5])\ndef test_index_with_int_dask_array_nanchunks(chunks):\n # Slice by array with nan-sized chunks\n a = da.arange(-2, 3, chunks=chunks)\n assert_eq(a[a.nonzero()], np.array([-2, -1, 1, 2]))\n # Edge case: the nan-sized chunks resolve to size 0\n a = da.zeros(5, chunks=chunks)\n assert_eq(a[a.nonzero()], np.array([]))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_index_with_int_dask_array_negindex_test_index_with_int_dask_array_indexerror.None_1.a_idx_compute_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_index_with_int_dask_array_negindex_test_index_with_int_dask_array_indexerror.None_1.a_idx_compute_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 680, "end_line": 695, "span_ids": ["test_index_with_int_dask_array_negindex", "test_index_with_int_dask_array_indexerror"], "tokens": 157}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"chunks\", [2, 4])\ndef test_index_with_int_dask_array_negindex(chunks):\n a = da.arange(4, chunks=chunks)\n idx = da.from_array([-1, -4], chunks=1)\n assert_eq(a[idx], np.array([3, 0]))\n\n\n@pytest.mark.parametrize(\"chunks\", [2, 4])\ndef test_index_with_int_dask_array_indexerror(chunks):\n a = da.arange(4, chunks=chunks)\n idx = da.from_array([4], chunks=1)\n with pytest.raises(IndexError):\n a[idx].compute()\n idx = da.from_array([-5], chunks=1)\n with pytest.raises(IndexError):\n a[idx].compute()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_index_with_int_dask_array_dtypes_test_index_with_int_dask_array_nocompute.with_pytest_raises_NotImp.result_compute_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_index_with_int_dask_array_dtypes_test_index_with_int_dask_array_nocompute.with_pytest_raises_NotImp.result_compute_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 698, "end_line": 719, "span_ids": ["test_index_with_int_dask_array_dtypes", "test_index_with_int_dask_array_nocompute"], "tokens": 215}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"dtype\", [\"int8\", \"int16\", \"int32\", \"int64\", \"uint8\", \"uint16\", \"uint32\", \"uint64\"]\n)\ndef test_index_with_int_dask_array_dtypes(dtype):\n a = da.from_array([10, 20, 30, 40], chunks=-1)\n idx = da.from_array(np.array([1, 2]).astype(dtype), chunks=1)\n assert_eq(a[idx], np.array([20, 30]))\n\n\ndef test_index_with_int_dask_array_nocompute():\n \"\"\"Test that when the indices are a dask array\n they are not accidentally computed\n \"\"\"\n\n def crash():\n raise NotImplementedError()\n\n x = da.arange(5, chunks=-1)\n idx = da.Array({(\"x\", 0): (crash,)}, name=\"x\", chunks=((2,),), dtype=np.int64)\n result = x[idx]\n with pytest.raises(NotImplementedError):\n result.compute()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_index_with_bool_dask_array_test_index_with_bool_dask_array.for_index_in_ind_slice.assert_eq_x_x_index_d_i": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_index_with_bool_dask_array_test_index_with_bool_dask_array.for_index_in_ind_slice.assert_eq_x_x_index_d_i", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 722, "end_line": 729, "span_ids": ["test_index_with_bool_dask_array"], "tokens": 127}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_index_with_bool_dask_array():\n x = np.arange(36).reshape((6, 6))\n d = da.from_array(x, chunks=(3, 3))\n ind = np.asarray([True, True, False, True, False, False], dtype=bool)\n ind = da.from_array(ind, chunks=2)\n for index in [ind, (slice(1, 9, 2), ind), (ind, slice(2, 8, 1))]:\n x_index = dask.compute(index)[0]\n assert_eq(x[x_index], d[index])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_index_with_bool_dask_array_2_test_index_with_bool_dask_array_2.for_i_in_range_x_ndim_.assert_eq_x_tuple_index3_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_index_with_bool_dask_array_2_test_index_with_bool_dask_array_2.for_i_in_range_x_ndim_.assert_eq_x_tuple_index3_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 732, "end_line": 748, "span_ids": ["test_index_with_bool_dask_array_2"], "tokens": 141}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_index_with_bool_dask_array_2():\n x = np.random.random((10, 10, 10))\n ind = np.random.random(10) > 0.5\n\n d = da.from_array(x, chunks=(3, 4, 5))\n dind = da.from_array(ind, chunks=4)\n\n index = [slice(1, 9, 1), slice(None)]\n\n for i in range(x.ndim):\n index2 = index[:]\n index2.insert(i, dind)\n\n index3 = index[:]\n index3.insert(i, ind)\n\n assert_eq(x[tuple(index3)], d[tuple(index2)])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_cull_test_negative_list_slicing.assert_eq_dx_4_1_x_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_cull_test_negative_list_slicing.assert_eq_dx_4_1_x_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 751, "end_line": 833, "span_ids": ["impl:3", "test_slicing_with_Nones", "test_slicing_integer_no_warnings", "test_slicing_none_int_ellipes", "test_cull", "test_None_overlap_int", "test_negative_n_slicing", "test_negative_list_slicing"], "tokens": 723}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.xfail\ndef test_cull():\n x = da.ones(1000, chunks=(10,))\n\n for slc in [1, slice(0, 30), slice(0, None, 100)]:\n y = x[slc]\n assert len(y.dask) < len(x.dask)\n\n\n@pytest.mark.parametrize(\"shape\", [(2,), (2, 3), (2, 3, 5)])\n@pytest.mark.parametrize(\n \"index\", [(Ellipsis,), (None, Ellipsis), (Ellipsis, None), (None, Ellipsis, None)]\n)\ndef test_slicing_with_Nones(shape, index):\n x = np.random.random(shape)\n d = da.from_array(x, chunks=shape)\n\n assert_eq(x[index], d[index])\n\n\nindexers = [Ellipsis, slice(2), 0, 1, -2, -1, slice(-2, None), None]\n\n\n\"\"\"\n# We comment this out because it is 4096 tests\n@pytest.mark.parametrize('a', indexers)\n@pytest.mark.parametrize('b', indexers)\n@pytest.mark.parametrize('c', indexers)\n@pytest.mark.parametrize('d', indexers)\ndef test_slicing_none_int_ellipses(a, b, c, d):\n if (a, b, c, d).count(Ellipsis) > 1:\n return\n shape = (2,3,5,7,11)\n x = np.arange(np.prod(shape)).reshape(shape)\n y = da.core.asarray(x)\n\n xx = x[a, b, c, d]\n yy = y[a, b, c, d]\n assert_eq(xx, yy)\n\"\"\"\n\n\ndef test_slicing_integer_no_warnings():\n # https://github.com/dask/dask/pull/2457/\n X = da.random.random((100, 2), (2, 2))\n idx = np.array([0, 0, 1, 1])\n with pytest.warns(None) as rec:\n X[idx].compute()\n assert len(rec) == 0\n\n\n@pytest.mark.slow\ndef test_slicing_none_int_ellipes():\n shape = (2, 3, 5, 7, 11)\n x = np.arange(np.prod(shape)).reshape(shape)\n y = da.core.asarray(x)\n for ind in itertools.product(indexers, indexers, indexers, indexers):\n if ind.count(Ellipsis) > 1:\n continue\n\n assert_eq(x[ind], y[ind])\n\n\ndef test_None_overlap_int():\n a, b, c, d = (0, slice(None, 2, None), None, Ellipsis)\n shape = (2, 3, 5, 7, 11)\n x = np.arange(np.prod(shape)).reshape(shape)\n y = da.core.asarray(x)\n\n xx = x[a, b, c, d]\n yy = y[a, b, c, d]\n assert_eq(xx, yy)\n\n\ndef test_negative_n_slicing():\n assert_eq(da.ones(2, chunks=2)[-2], np.ones(2)[-2])\n\n\ndef test_negative_list_slicing():\n x = np.arange(5)\n dx = da.from_array(x, chunks=2)\n assert_eq(dx[[0, -5]], x[[0, -5]])\n assert_eq(dx[[4, -1]], x[[4, -1]])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_permit_oob_slices_test_take_semi_sorted.assert_y_chunks_5_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_permit_oob_slices_test_take_semi_sorted.assert_y_chunks_5_5", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 836, "end_line": 860, "span_ids": ["test_normalize_index", "test_take_semi_sorted", "test_permit_oob_slices"], "tokens": 244}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_permit_oob_slices():\n x = np.arange(5)\n dx = da.from_array(x, chunks=2)\n\n assert_eq(x[-102:], dx[-102:])\n assert_eq(x[102:], dx[102:])\n assert_eq(x[:102], dx[:102])\n assert_eq(x[:-102], dx[:-102])\n\n\ndef test_normalize_index():\n assert normalize_index((Ellipsis, None), (10,)) == (slice(None), None)\n assert normalize_index(5, (np.nan,)) == (5,)\n assert normalize_index(-5, (np.nan,)) == (-5,)\n (result,) = normalize_index([-5, -2, 1], (np.nan,))\n assert result.tolist() == [-5, -2, 1]\n assert normalize_index(slice(-5, -2), (np.nan,)) == (slice(-5, -2),)\n\n\ndef test_take_semi_sorted():\n x = da.ones(10, chunks=(5,))\n index = np.arange(15) % 10\n\n y = x[index]\n assert y.chunks == ((5, 5, 5),)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slicing_plan_test_slicing_plan.for_i_x_j_y_in_zip.assert_x_y_all_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slicing_plan_test_slicing_plan.for_i_x_j_y_in_zip.assert_x_y_all_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 863, "end_line": 881, "span_ids": ["test_slicing_plan"], "tokens": 222}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"chunks,index,expected\",\n [\n ((5, 5, 5), np.arange(5, 15) % 10, [(1, np.arange(5)), (0, np.arange(5))]),\n (\n (5, 5, 5, 5),\n np.arange(20) // 2,\n [(0, np.arange(10) // 2), (1, np.arange(10) // 2)],\n ),\n ((10, 10), [15, 2, 3, 15], [(1, [5]), (0, [2, 3]), (1, [5])]),\n ],\n)\ndef test_slicing_plan(chunks, index, expected):\n plan = slicing_plan(chunks, index=index)\n assert len(plan) == len(expected)\n for (i, x), (j, y) in zip(plan, expected):\n assert i == j\n assert len(x) == len(y)\n assert (x == y).all()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_pathological_unsorted_slicing_test_pathological_unsorted_slicing.assert_out_of_order_in_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_pathological_unsorted_slicing_test_pathological_unsorted_slicing.assert_out_of_order_in_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 954, "end_line": 964, "span_ids": ["test_pathological_unsorted_slicing"], "tokens": 115}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_pathological_unsorted_slicing():\n x = da.ones(100, chunks=10)\n\n # [0, 10, 20, ... 90, 1, 11, 21, ... 91, ...]\n index = np.arange(100).reshape(10, 10).ravel(order=\"F\")\n\n with pytest.warns(da.PerformanceWarning) as info:\n x[index]\n\n assert \"10\" in str(info.list[0])\n assert \"out-of-order\" in str(info.list[0])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_cached_cumsum_test_cached_cumsum_non_tuple.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_cached_cumsum_test_cached_cumsum_non_tuple.None_2", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 967, "end_line": 987, "span_ids": ["test_cached_cumsum_nan", "test_cached_cumsum_non_tuple", "test_cached_cumsum"], "tokens": 213}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_cached_cumsum():\n a = (1, 2, 3, 4)\n x = cached_cumsum(a)\n y = cached_cumsum(a, initial_zero=True)\n assert x == (1, 3, 6, 10)\n assert y == (0, 1, 3, 6, 10)\n\n\ndef test_cached_cumsum_nan():\n a = (1, np.nan, 3)\n x = cached_cumsum(a)\n y = cached_cumsum(a, initial_zero=True)\n np.testing.assert_equal(x, (1, np.nan, np.nan))\n np.testing.assert_equal(y, (0, 1, np.nan, np.nan))\n\n\ndef test_cached_cumsum_non_tuple():\n a = [1, 2, 3]\n assert cached_cumsum(a) == (1, 3, 6)\n a[1] = 4\n assert cached_cumsum(a) == (1, 5, 8)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_setitem_with_different_chunks_preserves_shape_test_setitem_with_different_chunks_preserves_shape.assert_x_shape_result_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_setitem_with_different_chunks_preserves_shape_test_setitem_with_different_chunks_preserves_shape.assert_x_shape_result_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 990, "end_line": 1003, "span_ids": ["test_setitem_with_different_chunks_preserves_shape"], "tokens": 167}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"params\", [(2, 2, 1), (5, 3, 2)])\ndef test_setitem_with_different_chunks_preserves_shape(params):\n \"\"\"Reproducer for https://github.com/dask/dask/issues/3730.\n\n Mutating based on an array with different chunks can cause new chunks to be\n used. We need to ensure those new chunk sizes are applied to the mutated\n array, otherwise the array won't generate the correct keys.\n \"\"\"\n array_size, chunk_size1, chunk_size2 = params\n x = da.zeros(array_size, chunks=chunk_size1)\n mask = da.zeros(array_size, chunks=chunk_size2)\n x[mask] = 1\n result = x.compute()\n assert x.shape == result.shape", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_gh3579_test_make_blockwise_sorted_slice.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_gh3579_test_make_blockwise_sorted_slice.None_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 1006, "end_line": 1020, "span_ids": ["test_make_blockwise_sorted_slice", "test_gh3579"], "tokens": 201}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_gh3579():\n assert_eq(np.arange(10)[0::-1], da.arange(10, chunks=3)[0::-1])\n assert_eq(np.arange(10)[::-1], da.arange(10, chunks=3)[::-1])\n\n\ndef test_make_blockwise_sorted_slice():\n x = da.arange(8, chunks=4)\n index = np.array([6, 0, 4, 2, 7, 1, 5, 3])\n\n a, b = make_block_sorted_slices(index, x.chunks)\n\n index2 = np.array([0, 2, 4, 6, 1, 3, 5, 7])\n index3 = np.array([3, 0, 2, 1, 7, 4, 6, 5])\n np.testing.assert_array_equal(a, index2)\n np.testing.assert_array_equal(b, index3)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_shuffle_slice_test_shuffle_slice.assert_eq_a_b_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_shuffle_slice_test_shuffle_slice.assert_eq_a_b_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 1023, "end_line": 1034, "span_ids": ["test_shuffle_slice"], "tokens": 114}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.filterwarnings(\"ignore\")\n@pytest.mark.parametrize(\n \"size, chunks\", [((100, 2), (50, 2)), ((100, 2), (37, 1)), ((100,), (55,))]\n)\ndef test_shuffle_slice(size, chunks):\n x = da.random.randint(0, 1000, size=size, chunks=chunks)\n index = np.arange(len(x))\n np.random.shuffle(index)\n\n a = x[index]\n b = shuffle_slice(x, index)\n assert_eq(a, b)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_gh4043_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_gh4043_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 1037, "end_line": 1055, "span_ids": ["test_slice_array_3d_with_bool_numpy_array", "test_gh4043"], "tokens": 226}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"lock\", [True, False])\n@pytest.mark.parametrize(\"asarray\", [True, False])\n@pytest.mark.parametrize(\"fancy\", [True, False])\ndef test_gh4043(lock, asarray, fancy):\n a1 = da.from_array(np.zeros(3), chunks=1, asarray=asarray, lock=lock, fancy=fancy)\n a2 = da.from_array(np.ones(3), chunks=1, asarray=asarray, lock=lock, fancy=fancy)\n al = da.stack([a1, a2])\n assert_eq(al, al)\n\n\ndef test_slice_array_3d_with_bool_numpy_array():\n # https://github.com/dask/dask/issues/6089\n array = da.arange(0, 24).reshape((4, 3, 2))\n mask = np.arange(0, 24).reshape((4, 3, 2)) > 12\n\n actual = array[mask].compute()\n expected = np.arange(13, 24)\n assert_eq(actual, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_sparse.py_random_numpy_120_xfail.pytest_mark_xfail_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_sparse.py_random_numpy_120_xfail.pytest_mark_xfail_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_sparse.py", "file_name": "test_sparse.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 20, "span_ids": ["imports"], "tokens": 162}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import random\n\nimport numpy as np\nimport pytest\n\nimport dask\nimport dask.array as da\nfrom dask.array.numpy_compat import _numpy_117, _numpy_120\nfrom dask.array.utils import assert_eq, IS_NEP18_ACTIVE\n\nsparse = pytest.importorskip(\"sparse\")\nif sparse:\n # Test failures on older versions of Numba.\n # Conda-Forge provides 0.35.0 on windows right now, causing failures like\n # searchsorted() got an unexpected keyword argument 'side'\n pytest.importorskip(\"numba\", minversion=\"0.40.0\")\n\nnumpy_120_xfail = pytest.mark.xfail(\n _numpy_120, reason=\"https://github.com/pydata/sparse/issues/383\"\n)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_sparse.py_functions_functions._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_sparse.py_functions_functions._", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_sparse.py", "file_name": "test_sparse.py", "file_type": "text/x-python", "category": "test", "start_line": 23, "end_line": 72, "span_ids": ["imports"], "tokens": 636}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "functions = [\n lambda x: x,\n lambda x: da.expm1(x),\n lambda x: 2 * x,\n lambda x: x / 2,\n lambda x: x ** 2,\n pytest.param(lambda x: x + x, marks=numpy_120_xfail),\n pytest.param(lambda x: x * x, marks=numpy_120_xfail),\n pytest.param(lambda x: x[0], marks=numpy_120_xfail),\n pytest.param(lambda x: x[:, 1], marks=numpy_120_xfail),\n pytest.param(lambda x: x[:1, None, 1:3], marks=numpy_120_xfail),\n lambda x: x.T,\n lambda x: da.transpose(x, (1, 2, 0)),\n pytest.param(lambda x: x.sum(), marks=numpy_120_xfail),\n pytest.param(lambda x: x.mean(), marks=numpy_120_xfail),\n lambda x: x.moment(order=0),\n pytest.param(\n lambda x: x.std(),\n marks=pytest.mark.xfail(\n reason=\"fixed in https://github.com/pydata/sparse/pull/243\"\n ),\n ),\n pytest.param(\n lambda x: x.var(),\n marks=pytest.mark.xfail(\n reason=\"fixed in https://github.com/pydata/sparse/pull/243\"\n ),\n ),\n pytest.param(lambda x: x.dot(np.arange(x.shape[-1])), marks=numpy_120_xfail),\n pytest.param(lambda x: x.dot(np.eye(x.shape[-1])), marks=numpy_120_xfail),\n pytest.param(\n lambda x: da.tensordot(x, np.ones(x.shape[:2]), axes=[(0, 1), (0, 1)]),\n marks=numpy_120_xfail,\n ),\n pytest.param(lambda x: x.sum(axis=0), marks=numpy_120_xfail),\n pytest.param(lambda x: x.max(axis=0), marks=numpy_120_xfail),\n pytest.param(lambda x: x.sum(axis=(1, 2)), marks=numpy_120_xfail),\n lambda x: x.astype(np.complex128),\n lambda x: x.map_blocks(lambda x: x * 2),\n lambda x: x.map_overlap(lambda x: x * 2, depth=0, trim=True),\n lambda x: x.map_overlap(lambda x: x * 2, depth=0, trim=False),\n lambda x: x.round(1),\n lambda x: x.reshape((x.shape[0] * x.shape[1], x.shape[2])),\n lambda x: abs(x),\n lambda x: x > 0.5,\n lambda x: x.rechunk((4, 4, 4)),\n pytest.param(lambda x: x.rechunk((2, 2, 1)), marks=numpy_120_xfail),\n lambda x: np.isneginf(x),\n lambda x: np.isposinf(x),\n]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_sparse.py_test_basic_test_basic.if_yy_shape_.if_not_isinstance_zz_spa._mostly_dense": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_sparse.py_test_basic_test_basic.if_yy_shape_.if_not_isinstance_zz_spa._mostly_dense", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_sparse.py", "file_name": "test_sparse.py", "file_type": "text/x-python", "category": "test", "start_line": 75, "end_line": 90, "span_ids": ["test_basic"], "tokens": 129}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"func\", functions)\ndef test_basic(func):\n x = da.random.random((2, 3, 4), chunks=(1, 2, 2))\n x[x < 0.8] = 0\n\n y = x.map_blocks(sparse.COO.from_numpy)\n\n xx = func(x)\n yy = func(y)\n\n assert_eq(xx, yy)\n\n if yy.shape:\n zz = yy.compute()\n if not isinstance(zz, sparse.COO):\n assert (zz != 1).sum() > np.prod(zz.shape) / 2 # mostly dense", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_sparse.py_test_tensordot_test_tensordot.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_sparse.py_test_tensordot_test_tensordot.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_sparse.py", "file_name": "test_sparse.py", "file_type": "text/x-python", "category": "test", "start_line": 93, "end_line": 111, "span_ids": ["test_tensordot"], "tokens": 270}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n sparse.__version__ < \"0.7.0+10\",\n reason=\"fixed in https://github.com/pydata/sparse/pull/256\",\n)\ndef test_tensordot():\n x = da.random.random((2, 3, 4), chunks=(1, 2, 2))\n x[x < 0.8] = 0\n y = da.random.random((4, 3, 2), chunks=(2, 2, 1))\n y[y < 0.8] = 0\n\n xx = x.map_blocks(sparse.COO.from_numpy)\n yy = y.map_blocks(sparse.COO.from_numpy)\n\n assert_eq(da.tensordot(x, y, axes=(2, 0)), da.tensordot(xx, yy, axes=(2, 0)))\n assert_eq(da.tensordot(x, y, axes=(1, 1)), da.tensordot(xx, yy, axes=(1, 1)))\n assert_eq(\n da.tensordot(x, y, axes=((1, 2), (1, 0))),\n da.tensordot(xx, yy, axes=((1, 2), (1, 0))),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_sparse.py_test_mixed_concatenate_test_mixed_concatenate.assert_eq_dd_ss_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_sparse.py_test_mixed_concatenate_test_mixed_concatenate.assert_eq_dd_ss_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_sparse.py", "file_name": "test_sparse.py", "file_type": "text/x-python", "category": "test", "start_line": 114, "end_line": 129, "span_ids": ["test_mixed_concatenate"], "tokens": 152}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.xfail(reason=\"upstream change\", strict=False)\n@pytest.mark.parametrize(\"func\", functions)\ndef test_mixed_concatenate(func):\n x = da.random.random((2, 3, 4), chunks=(1, 2, 2))\n\n y = da.random.random((2, 3, 4), chunks=(1, 2, 2))\n y[y < 0.8] = 0\n yy = y.map_blocks(sparse.COO.from_numpy)\n\n d = da.concatenate([x, y], axis=0)\n s = da.concatenate([x, yy], axis=0)\n\n dd = func(d)\n ss = func(s)\n\n assert_eq(dd, ss)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_sparse.py_test_mixed_random_test_mixed_random.assert_eq_dd_ss_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_sparse.py_test_mixed_random_test_mixed_random.assert_eq_dd_ss_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_sparse.py", "file_name": "test_sparse.py", "file_type": "text/x-python", "category": "test", "start_line": 132, "end_line": 144, "span_ids": ["test_mixed_random"], "tokens": 118}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.xfail(reason=\"upstream change\", strict=False)\n@pytest.mark.parametrize(\"func\", functions)\ndef test_mixed_random(func):\n d = da.random.random((4, 3, 4), chunks=(1, 2, 2))\n d[d < 0.7] = 0\n\n fn = lambda x: sparse.COO.from_numpy(x) if random.random() < 0.5 else x\n s = d.map_blocks(fn)\n\n dd = func(d)\n ss = func(s)\n\n assert_eq(dd, ss)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_sparse.py_test_mixed_output_type_test_mixed_output_type.assert_zz_nnz_y_comput": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_sparse.py_test_mixed_output_type_test_mixed_output_type.assert_zz_nnz_y_comput", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_sparse.py", "file_name": "test_sparse.py", "file_type": "text/x-python", "category": "test", "start_line": 147, "end_line": 161, "span_ids": ["test_mixed_output_type"], "tokens": 135}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.xfail(reason=\"upstream change\", strict=False)\ndef test_mixed_output_type():\n y = da.random.random((10, 10), chunks=(5, 5))\n y[y < 0.8] = 0\n y = y.map_blocks(sparse.COO.from_numpy)\n\n x = da.zeros((10, 1), chunks=(5, 1))\n\n z = da.concatenate([x, y], axis=1)\n\n assert z.shape == (10, 11)\n\n zz = z.compute()\n assert isinstance(zz, sparse.COO)\n assert zz.nnz == y.compute().nnz", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_sparse.py_test_metadata_test_metadata.if_IS_NEP18_ACTIVE_.if__numpy_117_.assert_isinstance_np_conc": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_sparse.py_test_metadata_test_metadata.if_IS_NEP18_ACTIVE_.if__numpy_117_.assert_isinstance_np_conc", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_sparse.py", "file_name": "test_sparse.py", "file_type": "text/x-python", "category": "test", "start_line": 164, "end_line": 185, "span_ids": ["test_metadata"], "tokens": 296}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@numpy_120_xfail\ndef test_metadata():\n y = da.random.random((10, 10), chunks=(5, 5))\n y[y < 0.8] = 0\n z = sparse.COO.from_numpy(y.compute())\n y = y.map_blocks(sparse.COO.from_numpy)\n\n assert isinstance(y._meta, sparse.COO)\n assert isinstance((y + 1)._meta, sparse.COO)\n assert isinstance(y.sum(axis=0)._meta, sparse.COO)\n assert isinstance(y.var(axis=0)._meta, sparse.COO)\n assert isinstance(y[:5, ::2]._meta, sparse.COO)\n assert isinstance(y.rechunk((2, 2))._meta, sparse.COO)\n assert isinstance((y - z)._meta, sparse.COO)\n assert isinstance(y.persist()._meta, sparse.COO)\n if IS_NEP18_ACTIVE:\n assert isinstance(np.concatenate([y, y])._meta, sparse.COO)\n assert isinstance(np.concatenate([y, y[:0], y])._meta, sparse.COO)\n assert isinstance(np.stack([y, y])._meta, sparse.COO)\n if _numpy_117:\n assert isinstance(np.stack([y[:0], y[:0]])._meta, sparse.COO)\n assert isinstance(np.concatenate([y[:0], y[:0]])._meta, sparse.COO)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_sparse.py_test_html_repr_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_sparse.py_test_html_repr_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_sparse.py", "file_name": "test_sparse.py", "file_type": "text/x-python", "category": "test", "start_line": 188, "end_line": 234, "span_ids": ["test_html_repr", "test_map_blocks", "test_meta_from_array", "test_from_delayed_meta", "test_from_array"], "tokens": 356}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_html_repr():\n y = da.random.random((10, 10), chunks=(5, 5))\n y[y < 0.8] = 0\n y = y.map_blocks(sparse.COO.from_numpy)\n\n text = y._repr_html_()\n\n assert \"COO\" in text\n assert \"sparse\" in text\n assert \"Bytes\" not in text\n\n\n@numpy_120_xfail\ndef test_from_delayed_meta():\n def f():\n return sparse.COO.from_numpy(np.eye(3))\n\n d = dask.delayed(f)()\n x = da.from_delayed(d, shape=(3, 3), meta=sparse.COO.from_numpy(np.eye(1)))\n assert isinstance(x._meta, sparse.COO)\n assert_eq(x, x)\n\n\n@numpy_120_xfail\ndef test_from_array():\n x = sparse.COO.from_numpy(np.eye(10))\n d = da.from_array(x, chunks=(5, 5))\n\n assert isinstance(d._meta, sparse.COO)\n assert_eq(d, d)\n assert isinstance(d.compute(), sparse.COO)\n\n\n@numpy_120_xfail\ndef test_map_blocks():\n x = da.eye(10, chunks=5)\n y = x.map_blocks(sparse.COO.from_numpy, meta=sparse.COO.from_numpy(np.eye(1)))\n assert isinstance(y._meta, sparse.COO)\n assert_eq(y, y)\n\n\n@numpy_120_xfail\ndef test_meta_from_array():\n x = sparse.COO.from_numpy(np.eye(1))\n y = da.utils.meta_from_array(x, ndim=2)\n assert isinstance(y, sparse.COO)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_stats.py_pytest_test_measures.assert_isinstance_result_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_stats.py_pytest_test_measures.assert_isinstance_result_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_stats.py", "file_name": "test_stats.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 32, "span_ids": ["imports", "test_measures"], "tokens": 255}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\n\nscipy = pytest.importorskip(\"scipy\")\nimport numpy as np\nimport dask.array as da\nfrom dask.array.utils import assert_eq\nfrom dask.delayed import Delayed\nimport dask.array.stats\nfrom dask.array.utils import allclose\n\n\n@pytest.mark.parametrize(\n \"kind, kwargs\", [(\"skew\", {}), (\"kurtosis\", {}), (\"kurtosis\", {\"fisher\": False})]\n)\n@pytest.mark.parametrize(\"single_dim\", [True, False])\ndef test_measures(kind, kwargs, single_dim):\n np.random.seed(seed=1337)\n if single_dim:\n x = np.random.random(size=(30,))\n else:\n x = np.random.random(size=(30, 2))\n y = da.from_array(x, 3)\n dfunc = getattr(dask.array.stats, kind)\n sfunc = getattr(scipy.stats, kind)\n\n expected = sfunc(x, **kwargs)\n result = dfunc(y, **kwargs)\n if np.isscalar(expected):\n # make it an array to account for possible numeric errors\n expected = np.array(expected)\n assert_eq(result, expected)\n assert isinstance(result, da.Array)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_stats.py_test_bias_raises_test_one.assert_allclose_result_co": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_stats.py_test_bias_raises_test_one.assert_allclose_result_co", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_stats.py", "file_name": "test_stats.py", "file_type": "text/x-python", "category": "test", "start_line": 35, "end_line": 60, "span_ids": ["test_bias_raises", "test_one"], "tokens": 191}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_bias_raises():\n x = np.random.random(size=(30, 2))\n y = da.from_array(x, 3)\n\n with pytest.raises(NotImplementedError):\n dask.array.stats.skew(y, bias=False)\n\n with pytest.raises(NotImplementedError):\n dask.array.stats.kurtosis(y, bias=False)\n\n\n@pytest.mark.parametrize(\n \"kind\", [\"chisquare\", \"power_divergence\", \"normaltest\", \"skewtest\", \"kurtosistest\"]\n)\ndef test_one(kind):\n a = np.random.random(size=30)\n a_ = da.from_array(a, 3)\n\n dask_test = getattr(dask.array.stats, kind)\n scipy_test = getattr(scipy.stats, kind)\n\n result = dask_test(a_)\n expected = scipy_test(a)\n\n assert isinstance(result, Delayed)\n assert allclose(result.compute(), expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_stats.py_test_two_test_two._assert_dask_compute_re": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_stats.py_test_two_test_two._assert_dask_compute_re", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_stats.py", "file_name": "test_stats.py", "file_type": "text/x-python", "category": "test", "start_line": 63, "end_line": 93, "span_ids": ["test_two"], "tokens": 266}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"kind, kwargs\",\n [\n (\"ttest_ind\", {}),\n (\"ttest_ind\", {\"equal_var\": False}),\n (\"ttest_1samp\", {}),\n (\"ttest_rel\", {}),\n (\"chisquare\", {}),\n (\"power_divergence\", {}),\n (\"power_divergence\", {\"lambda_\": 0}),\n (\"power_divergence\", {\"lambda_\": -1}),\n (\"power_divergence\", {\"lambda_\": \"neyman\"}),\n ],\n)\ndef test_two(kind, kwargs):\n a = np.random.random(size=30)\n b = np.random.random(size=30)\n a_ = da.from_array(a, 3)\n b_ = da.from_array(b, 3)\n\n dask_test = getattr(dask.array.stats, kind)\n scipy_test = getattr(scipy.stats, kind)\n\n with pytest.warns(None): # maybe overflow warning (powrer_divergence)\n result = dask_test(a_, b_, **kwargs)\n expected = scipy_test(a, b, **kwargs)\n\n assert isinstance(result, Delayed)\n assert allclose(result.compute(), expected)\n # fails occasionally. shouldn't this be exact?\n # assert dask.compute(*result) == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_stats.py_test_moments_test_anova.assert_allclose_result_co": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_stats.py_test_moments_test_anova.assert_allclose_result_co", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_stats.py", "file_name": "test_stats.py", "file_type": "text/x-python", "category": "test", "start_line": 96, "end_line": 113, "span_ids": ["test_moments", "test_anova"], "tokens": 154}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"k\", range(5))\ndef test_moments(k):\n x = np.random.random(size=(30, 2))\n y = da.from_array(x, 3)\n\n expected = scipy.stats.moment(x, k)\n result = dask.array.stats.moment(y, k)\n assert_eq(result, expected)\n\n\ndef test_anova():\n np_args = [i * np.random.random(size=(30,)) for i in range(4)]\n da_args = [da.from_array(x, chunks=10) for x in np_args]\n\n result = dask.array.stats.f_oneway(*da_args)\n expected = scipy.stats.f_oneway(*np_args)\n\n assert allclose(result.compute(), expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_stats.py_test_nan_raises_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_stats.py_test_nan_raises_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_stats.py", "file_name": "test_stats.py", "file_type": "text/x-python", "category": "test", "start_line": 116, "end_line": 146, "span_ids": ["test_skew_raises", "test_power_divergence_invalid", "test_nan_raises"], "tokens": 246}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"func, nargs\",\n [\n (dask.array.stats.ttest_1samp, 2),\n (dask.array.stats.ttest_rel, 2),\n (dask.array.stats.skewtest, 1),\n (dask.array.stats.kurtosis, 1),\n (dask.array.stats.kurtosistest, 1),\n (dask.array.stats.normaltest, 1),\n (dask.array.stats.moment, 1),\n ],\n)\n@pytest.mark.parametrize(\"nan_policy\", [\"omit\", \"raise\"])\ndef test_nan_raises(func, nargs, nan_policy):\n with pytest.raises(NotImplementedError):\n func(*(None,) * nargs, nan_policy=nan_policy)\n\n\ndef test_power_divergence_invalid():\n a = np.random.random(size=30)\n a_ = da.from_array(a, 3)\n\n with pytest.raises(ValueError):\n dask.array.stats.power_divergence(a_, lambda_=\"wrong\")\n\n\ndef test_skew_raises():\n a = da.ones((7,), chunks=(7,))\n with pytest.raises(ValueError, match=\"7 samples\"):\n dask.array.stats.skewtest(a)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_svg.py_da_test_basic.None_6": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_svg.py_da_test_basic.None_6", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_svg.py", "file_name": "test_svg.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 19, "span_ids": ["imports", "parses", "test_basic"], "tokens": 208}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import dask.array as da\nfrom dask.array.svg import draw_sizes\nimport xml.etree.ElementTree\nimport pytest\n\n\ndef parses(text):\n cleaned = text.replace(\"→\", \"\") # xml doesn't like righarrow character\n assert xml.etree.ElementTree.fromstring(cleaned) is not None # parses cleanly\n\n\ndef test_basic():\n parses(da.ones(10).to_svg())\n parses(da.ones((10, 10)).to_svg())\n parses(da.ones((10, 10, 10)).to_svg())\n parses(da.ones((10, 10, 10, 10)).to_svg())\n parses(da.ones((10, 10, 10, 10, 10)).to_svg())\n parses(da.ones((10, 10, 10, 10, 10, 10)).to_svg())\n parses(da.ones((10, 10, 10, 10, 10, 10, 10)).to_svg())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_svg.py_test_repr_html_test_errors.assert_unknown_chunk_siz": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_svg.py_test_repr_html_test_errors.assert_unknown_chunk_siz", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_svg.py", "file_name": "test_svg.py", "file_type": "text/x-python", "category": "test", "start_line": 22, "end_line": 52, "span_ids": ["test_errors", "test_repr_html"], "tokens": 267}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_repr_html():\n assert da.ones([])._repr_html_()\n assert da.ones(10)[:0]._repr_html_()\n assert da.ones(10)._repr_html_()\n assert da.ones((10, 10))._repr_html_()\n assert da.ones((10, 10, 10))._repr_html_()\n assert da.ones((10, 10, 10, 10))._repr_html_()\n\n\ndef test_errors():\n # empty arrays\n with pytest.raises(NotImplementedError) as excpt:\n da.ones([]).to_svg()\n assert \"0 dimensions\" in str(excpt.value)\n\n # Scalars\n with pytest.raises(NotImplementedError) as excpt:\n da.asarray(1).to_svg()\n assert \"0 dimensions\" in str(excpt.value)\n\n # 0-length dims arrays\n with pytest.raises(NotImplementedError) as excpt:\n da.ones(10)[:0].to_svg()\n assert \"0-length dimensions\" in str(excpt.value)\n\n # unknown chunk sizes\n with pytest.raises(NotImplementedError) as excpt:\n x = da.ones(10)\n x = x[x > 5]\n x.to_svg()\n assert \"unknown chunk sizes\" in str(excpt.value)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_svg.py_test_repr_html_size_units_test_repr_html_size_units.parses_x__repr_html__": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_svg.py_test_repr_html_size_units_test_repr_html_size_units.parses_x__repr_html__", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_svg.py", "file_name": "test_svg.py", "file_type": "text/x-python", "category": "test", "start_line": 55, "end_line": 67, "span_ids": ["test_repr_html_size_units"], "tokens": 124}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_repr_html_size_units():\n x = da.ones((10000, 5000))\n x = da.ones((3000, 10000), chunks=(1000, 1000))\n text = x._repr_html_()\n\n assert \"MB\" in text or \"MiB\" in text\n assert str(x.shape) in text\n assert str(x.dtype) in text\n\n parses(text)\n\n x = da.ones((3000, 10000, 50), chunks=(1000, 1000, 10))\n parses(x._repr_html_())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_testing.py_sys_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_testing.py_sys_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_testing.py", "file_name": "test_testing.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 20, "span_ids": ["imports", "test_assert_eq_checks_scalars"], "tokens": 129}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import sys\n\nimport pytest\nimport numpy as np\n\nimport dask.array as da\nfrom dask.array.utils import assert_eq\n\n\n@pytest.mark.skipif(sys.flags.optimize, reason=\"Assertions disabled.\")\ndef test_assert_eq_checks_scalars():\n # https://github.com/dask/dask/issues/2680\n with pytest.raises(AssertionError):\n assert_eq(np.array(0), np.array(1))\n\n a = da.from_array(np.array([0]), 1)[0]\n b = np.array([1])[0]\n with pytest.raises(AssertionError):\n assert_eq(a, b)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_pickle_unary_ufuncs._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_pickle_unary_ufuncs._", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_ufunc.py", "file_name": "test_ufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 131, "span_ids": ["impl:5", "imports", "test_ufunc", "test_ufunc_meta"], "tokens": 689}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pickle\nfrom functools import partial\nfrom operator import add\n\nimport pytest\n\nnp = pytest.importorskip(\"numpy\")\n\nimport dask.array as da\nfrom dask.array.ufunc import da_frompyfunc\nfrom dask.array.utils import assert_eq\nfrom dask.base import tokenize\n\n\nDISCLAIMER = \"\"\"\nThis docstring was copied from numpy.{name}.\n\nSome inconsistencies with the Dask version may exist.\n\"\"\"\n\n\n@pytest.mark.parametrize(\"name\", [\"log\", \"modf\", \"frexp\"])\ndef test_ufunc_meta(name):\n disclaimer = DISCLAIMER.format(name=name)\n skip_test = \" # doctest: +SKIP\"\n ufunc = getattr(da, name)\n assert ufunc.__name__ == name\n assert disclaimer in ufunc.__doc__\n\n assert (\n ufunc.__doc__.replace(disclaimer, \"\").replace(skip_test, \"\")\n == getattr(np, name).__doc__\n )\n\n\ndef test_ufunc():\n for attr in [\"nin\", \"nargs\", \"nout\", \"ntypes\", \"identity\", \"signature\", \"types\"]:\n assert getattr(da.log, attr) == getattr(np.log, attr)\n\n with pytest.raises(AttributeError):\n da.log.not_an_attribute\n\n assert repr(da.log) == repr(np.log)\n assert \"nin\" in dir(da.log)\n assert \"outer\" in dir(da.log)\n\n\nbinary_ufuncs = [\n \"add\",\n \"arctan2\",\n \"copysign\",\n \"divide\",\n \"equal\",\n \"bitwise_and\",\n \"bitwise_or\",\n \"bitwise_xor\",\n \"floor_divide\",\n \"fmax\",\n \"fmin\",\n \"fmod\",\n \"greater\",\n \"greater_equal\",\n \"hypot\",\n \"ldexp\",\n \"less\",\n \"less_equal\",\n \"logaddexp\",\n \"logaddexp2\",\n \"logical_and\",\n \"logical_or\",\n \"logical_xor\",\n \"maximum\",\n \"minimum\",\n \"mod\",\n \"multiply\",\n \"nextafter\",\n \"not_equal\",\n \"power\",\n \"remainder\",\n \"subtract\",\n \"true_divide\",\n \"float_power\",\n]\n\nunary_ufuncs = [\n \"absolute\",\n \"arccos\",\n \"arccosh\",\n \"arcsin\",\n \"arcsinh\",\n \"arctan\",\n \"arctanh\",\n \"bitwise_not\",\n \"cbrt\",\n \"ceil\",\n \"conj\",\n \"cos\",\n \"cosh\",\n \"deg2rad\",\n \"degrees\",\n \"exp\",\n \"exp2\",\n \"expm1\",\n \"fabs\",\n \"fix\",\n \"floor\",\n \"invert\",\n \"isfinite\",\n \"isinf\",\n \"isnan\",\n \"log\",\n \"log10\",\n \"log1p\",\n \"log2\",\n \"logical_not\",\n \"negative\",\n \"rad2deg\",\n \"radians\",\n \"reciprocal\",\n \"rint\",\n \"sign\",\n \"signbit\",\n \"sin\",\n \"sinh\",\n \"spacing\",\n \"sqrt\",\n \"square\",\n \"tan\",\n \"tanh\",\n \"trunc\",\n]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_unary_ufunc_test_unary_ufunc.None_3.assert_eq_dafunc_arr_np": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_unary_ufunc_test_unary_ufunc.None_3.assert_eq_dafunc_arr_np", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_ufunc.py", "file_name": "test_ufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 134, "end_line": 160, "span_ids": ["test_unary_ufunc"], "tokens": 309}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"ufunc\", unary_ufuncs)\ndef test_unary_ufunc(ufunc):\n if ufunc == \"fix\":\n pytest.skip(\"fix calls floor in a way that we do not yet support\")\n dafunc = getattr(da, ufunc)\n npfunc = getattr(np, ufunc)\n\n arr = np.random.randint(1, 100, size=(20, 20))\n darr = da.from_array(arr, 3)\n\n with pytest.warns(None): # some invalid values (arccos, arcsin, etc.)\n # applying Dask ufunc doesn't trigger computation\n assert isinstance(dafunc(darr), da.Array)\n assert_eq(dafunc(darr), npfunc(arr), equal_nan=True)\n\n with pytest.warns(None): # some invalid values (arccos, arcsin, etc.)\n # applying NumPy ufunc is lazy\n if isinstance(npfunc, np.ufunc):\n assert isinstance(npfunc(darr), da.Array)\n else:\n assert isinstance(npfunc(darr), np.ndarray)\n assert_eq(npfunc(darr), npfunc(arr), equal_nan=True)\n\n with pytest.warns(None): # some invalid values (arccos, arcsin, etc.)\n # applying Dask ufunc to normal ndarray triggers computation\n assert isinstance(dafunc(arr), np.ndarray)\n assert_eq(dafunc(arr), npfunc(arr), equal_nan=True)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_binary_ufunc_test_binary_ufunc.None_1.assert_eq_dafunc_10_arr1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_binary_ufunc_test_binary_ufunc.None_1.assert_eq_dafunc_10_arr1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_ufunc.py", "file_name": "test_ufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 163, "end_line": 199, "span_ids": ["test_binary_ufunc"], "tokens": 438}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"ufunc\", binary_ufuncs)\ndef test_binary_ufunc(ufunc):\n dafunc = getattr(da, ufunc)\n npfunc = getattr(np, ufunc)\n\n arr1 = np.random.randint(1, 100, size=(20, 20))\n darr1 = da.from_array(arr1, 3)\n\n arr2 = np.random.randint(1, 100, size=(20, 20))\n darr2 = da.from_array(arr2, 3)\n\n # applying Dask ufunc doesn't trigger computation\n assert isinstance(dafunc(darr1, darr2), da.Array)\n assert_eq(dafunc(darr1, darr2), npfunc(arr1, arr2))\n\n # applying NumPy ufunc triggers computation or is lazy\n assert isinstance(npfunc(darr1, darr2), da.Array)\n assert_eq(npfunc(darr1, darr2), npfunc(arr1, arr2))\n\n # applying Dask ufunc to normal ndarray triggers computation\n assert isinstance(dafunc(arr1, arr2), np.ndarray)\n assert_eq(dafunc(arr1, arr2), npfunc(arr1, arr2))\n\n # with scalar\n assert isinstance(dafunc(darr1, 10), da.Array)\n assert_eq(dafunc(darr1, 10), npfunc(arr1, 10))\n\n with pytest.warns(None): # overflow in ldexp\n assert isinstance(dafunc(10, darr1), da.Array)\n assert_eq(dafunc(10, darr1), npfunc(10, arr1))\n\n assert isinstance(dafunc(arr1, 10), np.ndarray)\n assert_eq(dafunc(arr1, 10), npfunc(arr1, 10))\n\n with pytest.warns(None): # overflow in ldexp\n assert isinstance(dafunc(10, arr1), np.ndarray)\n assert_eq(dafunc(10, arr1), npfunc(10, arr1))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_ufunc_outer_test_ufunc_outer.None_2.da_sin_outer_darr1_darr2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_ufunc_outer_test_ufunc_outer.None_2.da_sin_outer_darr1_darr2", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_ufunc.py", "file_name": "test_ufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 202, "end_line": 241, "span_ids": ["test_ufunc_outer"], "tokens": 474}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_ufunc_outer():\n arr1 = np.random.randint(1, 100, size=20)\n darr1 = da.from_array(arr1, 3)\n\n arr2 = np.random.randint(1, 100, size=(10, 3))\n darr2 = da.from_array(arr2, 3)\n\n # Check output types\n assert isinstance(da.add.outer(darr1, darr2), da.Array)\n assert isinstance(da.add.outer(arr1, darr2), da.Array)\n assert isinstance(da.add.outer(darr1, arr2), da.Array)\n assert isinstance(da.add.outer(arr1, arr2), np.ndarray)\n\n # Check mix of dimensions, dtypes, and numpy/dask/object\n cases = [\n ((darr1, darr2), (arr1, arr2)),\n ((darr2, darr1), (arr2, arr1)),\n ((darr2, darr1.astype(\"f8\")), (arr2, arr1.astype(\"f8\"))),\n ((darr1, arr2), (arr1, arr2)),\n ((darr1, 1), (arr1, 1)),\n ((1, darr2), (1, arr2)),\n ((1.5, darr2), (1.5, arr2)),\n (([1, 2, 3], darr2), ([1, 2, 3], arr2)),\n ((darr1.sum(), darr2), (arr1.sum(), arr2)),\n ((np.array(1), darr2), (np.array(1), arr2)),\n ]\n\n for (dA, dB), (A, B) in cases:\n assert_eq(da.add.outer(dA, dB), np.add.outer(A, B))\n\n # Check dtype kwarg works\n assert_eq(\n da.add.outer(darr1, darr2, dtype=\"f8\"), np.add.outer(arr1, arr2, dtype=\"f8\")\n )\n\n with pytest.raises(ValueError):\n da.add.outer(darr1, darr2, out=arr1)\n\n with pytest.raises(ValueError):\n da.sin.outer(darr1, darr2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_complex_test_complex.for_darr_arr_in_dacomp.assert_eq_dafunc_arr_np": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_complex_test_complex.for_darr_arr_in_dacomp.assert_eq_dafunc_arr_np", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_ufunc.py", "file_name": "test_ufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 244, "end_line": 271, "span_ids": ["test_complex"], "tokens": 298}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"ufunc\", [\"isreal\", \"iscomplex\", \"real\", \"imag\"])\ndef test_complex(ufunc):\n\n dafunc = getattr(da, ufunc)\n # Note that these functions are not NumPy ufuncs\n npfunc = getattr(np, ufunc)\n\n real = np.random.randint(1, 100, size=(20, 20))\n imag = np.random.randint(1, 100, size=(20, 20)) * 1j\n comp = real + imag\n\n dareal = da.from_array(real, 3)\n daimag = da.from_array(imag, 3)\n dacomp = da.from_array(comp, 3)\n\n assert_eq(dacomp.real, comp.real)\n assert_eq(dacomp.imag, comp.imag)\n assert_eq(dacomp.conj(), comp.conj())\n\n for darr, arr in [(dacomp, comp), (dareal, real), (daimag, imag)]:\n # applying Dask ufunc doesn't trigger computation\n assert isinstance(dafunc(darr), da.Array)\n assert_eq(dafunc(darr), npfunc(arr))\n assert_eq(npfunc(darr), npfunc(arr))\n\n # applying Dask ufunc to normal ndarray triggers computation\n assert isinstance(dafunc(arr), np.ndarray)\n assert_eq(dafunc(arr), npfunc(arr))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_ufunc_2results_test_ufunc_2results.None_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_ufunc_2results_test_ufunc_2results.None_5", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_ufunc.py", "file_name": "test_ufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 274, "end_line": 305, "span_ids": ["test_ufunc_2results"], "tokens": 293}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"ufunc\", [\"frexp\", \"modf\"])\ndef test_ufunc_2results(ufunc):\n\n dafunc = getattr(da, ufunc)\n npfunc = getattr(np, ufunc)\n\n arr = np.random.randint(1, 100, size=(20, 20))\n darr = da.from_array(arr, 3)\n\n # applying Dask ufunc doesn't trigger computation\n res1, res2 = dafunc(darr)\n assert isinstance(res1, da.Array)\n assert isinstance(res2, da.Array)\n exp1, exp2 = npfunc(arr)\n assert_eq(res1, exp1)\n assert_eq(res2, exp2)\n\n # applying NumPy ufunc is now lazy\n res1, res2 = npfunc(darr)\n assert isinstance(res1, da.Array)\n assert isinstance(res2, da.Array)\n exp1, exp2 = npfunc(arr)\n assert_eq(res1, exp1)\n assert_eq(res2, exp2)\n\n # applying Dask ufunc to normal ndarray triggers computation\n res1, res2 = dafunc(arr)\n assert isinstance(res1, da.Array)\n assert isinstance(res2, da.Array)\n exp1, exp2 = npfunc(arr)\n assert_eq(res1, exp1)\n assert_eq(res2, exp2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_clip_test_clip.assert_eq_x_clip_min_1_m": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_clip_test_clip.assert_eq_x_clip_min_1_m", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_ufunc.py", "file_name": "test_ufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 308, "end_line": 317, "span_ids": ["test_clip"], "tokens": 146}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_clip():\n x = np.random.normal(0, 10, size=(10, 10))\n d = da.from_array(x, chunks=(3, 4))\n\n assert_eq(x.clip(5), d.clip(5))\n assert_eq(x.clip(1, 5), d.clip(1, 5))\n assert_eq(x.clip(min=5), d.clip(min=5))\n assert_eq(x.clip(max=5), d.clip(max=5))\n assert_eq(x.clip(max=1, min=5), d.clip(max=1, min=5))\n assert_eq(x.clip(min=1, max=5), d.clip(min=1, max=5))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_angle_test_angle.assert_eq_da_angle_comp_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_angle_test_angle.assert_eq_da_angle_comp_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_ufunc.py", "file_name": "test_ufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 320, "end_line": 329, "span_ids": ["test_angle"], "tokens": 119}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_angle():\n real = np.random.randint(1, 100, size=(20, 20))\n imag = np.random.randint(1, 100, size=(20, 20)) * 1j\n comp = real + imag\n dacomp = da.from_array(comp, 3)\n\n assert_eq(da.angle(dacomp), np.angle(comp))\n assert_eq(da.angle(dacomp, deg=True), np.angle(comp, deg=True))\n assert isinstance(da.angle(comp), np.ndarray)\n assert_eq(da.angle(comp), np.angle(comp))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_issignedinf_test_non_ufunc_others.assert_eq_dafunc_darr_n": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_issignedinf_test_non_ufunc_others.assert_eq_dafunc_darr_n", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_ufunc.py", "file_name": "test_ufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 332, "end_line": 349, "span_ids": ["test_non_ufunc_others", "test_issignedinf"], "tokens": 189}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_issignedinf():\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n arr = np.random.randint(-1, 2, size=(20, 20)).astype(float) / 0\n darr = da.from_array(arr, 3)\n\n assert_eq(np.isneginf(arr), da.isneginf(darr))\n assert_eq(np.isposinf(arr), da.isposinf(darr))\n\n\n@pytest.mark.parametrize(\"func\", [\"i0\", \"sinc\", \"nan_to_num\"])\ndef test_non_ufunc_others(func):\n arr = np.random.randint(1, 100, size=(20, 20))\n darr = da.from_array(arr, 3)\n\n dafunc = getattr(da, func)\n npfunc = getattr(np, func)\n\n assert_eq(dafunc(darr), npfunc(arr), equal_nan=True)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_frompyfunc_test_frompyfunc.with_pytest_raises_NotImp.da_frompyfunc_lambda_x_y": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_frompyfunc_test_frompyfunc.with_pytest_raises_NotImp.da_frompyfunc_lambda_x_y", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_ufunc.py", "file_name": "test_ufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 352, "end_line": 365, "span_ids": ["test_frompyfunc"], "tokens": 169}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_frompyfunc():\n myadd = da.frompyfunc(add, 2, 1)\n np_myadd = np.frompyfunc(add, 2, 1)\n\n x = np.random.normal(0, 10, size=(10, 10))\n dx = da.from_array(x, chunks=(3, 4))\n y = np.random.normal(0, 10, size=10)\n dy = da.from_array(y, chunks=2)\n\n assert_eq(myadd(dx, dy), np_myadd(x, y))\n assert_eq(myadd.outer(dx, dy), np_myadd.outer(x, y))\n\n with pytest.raises(NotImplementedError):\n da.frompyfunc(lambda x, y: (x + y, x - y), 2, 2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_frompyfunc_wrapper_test_frompyfunc_wrapper.assert_tokenize_da_frompy": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_frompyfunc_wrapper_test_frompyfunc_wrapper.assert_tokenize_da_frompy", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_ufunc.py", "file_name": "test_ufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 368, "end_line": 398, "span_ids": ["test_frompyfunc_wrapper"], "tokens": 251}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_frompyfunc_wrapper():\n f = da_frompyfunc(add, 2, 1)\n np_f = np.frompyfunc(add, 2, 1)\n x = np.array([1, 2, 3])\n\n # Callable\n np.testing.assert_equal(f(x, 1), np_f(x, 1))\n\n # picklable\n f2 = pickle.loads(pickle.dumps(f))\n np.testing.assert_equal(f2(x, 1), np_f(x, 1))\n\n # Attributes\n assert f.ntypes == np_f.ntypes\n with pytest.raises(AttributeError):\n f.not_an_attribute\n\n # Tab completion\n assert \"ntypes\" in dir(f)\n\n # Methods\n np.testing.assert_equal(f.outer(x, x), np_f.outer(x, x))\n\n # funcname\n assert f.__name__ == \"frompyfunc-add\"\n\n # repr\n assert repr(f) == \"da.frompyfunc\"\n\n # tokenize\n assert tokenize(da_frompyfunc(add, 2, 1)) == tokenize(da_frompyfunc(add, 2, 1))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_array_ufunc_test_out_shape_mismatch.with_pytest_raises_ValueE.assert_np_log_x_out_y_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_array_ufunc_test_out_shape_mismatch.with_pytest_raises_ValueE.assert_np_log_x_out_y_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_ufunc.py", "file_name": "test_ufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 401, "end_line": 449, "span_ids": ["test_array_ufunc_binop", "test_out_shape_mismatch", "test_out_numpy", "test_array_ufunc", "test_unsupported_ufunc_methods", "test_array_ufunc_out"], "tokens": 389}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_array_ufunc():\n x = np.arange(24).reshape((4, 6))\n d = da.from_array(x, chunks=(2, 3))\n\n for func in [np.sin, np.sum, np.negative, partial(np.prod, axis=0)]:\n assert isinstance(func(d), da.Array)\n assert_eq(func(d), func(x))\n\n\ndef test_array_ufunc_binop():\n x = np.arange(25).reshape((5, 5))\n d = da.from_array(x, chunks=(2, 2))\n\n for func in [np.add, np.multiply]:\n assert isinstance(func(d, d), da.Array)\n assert_eq(func(d, d), func(x, x))\n\n assert isinstance(func.outer(d, d), da.Array)\n assert_eq(func.outer(d, d), func.outer(x, x))\n\n\ndef test_array_ufunc_out():\n x = da.arange(10, chunks=(5,))\n np.sin(x, out=x)\n np.add(x, 10, out=x)\n assert_eq(x, np.sin(np.arange(10)) + 10)\n\n\ndef test_unsupported_ufunc_methods():\n x = da.arange(10, chunks=(5,))\n with pytest.raises(TypeError):\n assert np.add.reduce(x)\n\n\ndef test_out_numpy():\n x = da.arange(10, chunks=(5,))\n empty = np.empty(10, dtype=x.dtype)\n with pytest.raises((TypeError, NotImplementedError)) as info:\n np.add(x, 1, out=empty)\n\n assert \"ndarray\" in str(info.value)\n assert \"Array\" in str(info.value)\n\n\ndef test_out_shape_mismatch():\n x = da.arange(10, chunks=(5,))\n y = da.arange(15, chunks=(5,))\n with pytest.raises(ValueError):\n assert np.log(x, out=y)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_divmod_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_divmod_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_ufunc.py", "file_name": "test_ufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 452, "end_line": 478, "span_ids": ["test_divmod"], "tokens": 267}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_divmod():\n arr1 = np.random.randint(1, 100, size=(20, 20))\n arr2 = np.random.randint(1, 100, size=(20, 20))\n\n darr1 = da.from_array(arr1, 3)\n darr2 = da.from_array(arr2, 3)\n\n result = np.divmod(darr1, 2.0)\n expected = np.divmod(arr1, 2.0)\n assert_eq(result[0], expected[0])\n assert_eq(result[1], expected[1])\n\n result = np.divmod(darr1, darr2)\n expected = np.divmod(arr1, arr2)\n assert_eq(result[0], expected[0])\n assert_eq(result[1], expected[1])\n\n result = divmod(darr1, 2.0)\n expected = divmod(arr1, 2.0)\n assert_eq(result[0], expected[0])\n assert_eq(result[1], expected[1])\n\n result = divmod(darr1, darr2)\n expected = divmod(arr1, arr2)\n assert_eq(result[0], expected[0])\n assert_eq(result[1], expected[1])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_wrap.py_pytest_test_can_make_really_big_array_of_ones.ones_shape_1000000_1000": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_wrap.py_pytest_test_can_make_really_big_array_of_ones.ones_shape_1000000_1000", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_wrap.py", "file_name": "test_wrap.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 58, "span_ids": ["test_can_make_really_big_array_of_ones", "test_size_as_list", "test_full_error_nonscalar_fill_value", "imports", "test_ones", "test_full_like_error_nonscalar_fill_value", "test_kwargs", "test_singleton_size", "test_full"], "tokens": 479}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\n\npytest.importorskip(\"numpy\")\n\nfrom dask.array.wrap import ones\nimport dask.array as da\nimport numpy as np\n\n\ndef test_ones():\n a = ones((10, 10), dtype=\"i4\", chunks=(4, 4))\n x = np.array(a)\n assert (x == np.ones((10, 10), \"i4\")).all()\n\n assert a.name.startswith(\"ones-\")\n\n\ndef test_size_as_list():\n a = ones([10, 10], dtype=\"i4\", chunks=(4, 4))\n x = np.array(a)\n assert (x == np.ones((10, 10), dtype=\"i4\")).all()\n\n\ndef test_singleton_size():\n a = ones(10, dtype=\"i4\", chunks=(4,))\n x = np.array(a)\n assert (x == np.ones(10, dtype=\"i4\")).all()\n\n\ndef test_kwargs():\n a = ones(10, dtype=\"i4\", chunks=(4,))\n x = np.array(a)\n assert (x == np.ones(10, dtype=\"i4\")).all()\n\n\ndef test_full():\n a = da.full((3, 3), 100, chunks=(2, 2), dtype=\"i8\")\n\n assert (a.compute() == 100).all()\n assert a.dtype == a.compute(scheduler=\"sync\").dtype == \"i8\"\n\n assert a.name.startswith(\"full-\")\n\n\ndef test_full_error_nonscalar_fill_value():\n with pytest.raises(ValueError, match=\"fill_value must be scalar\"):\n da.full((3, 3), [100, 100], chunks=(2, 2), dtype=\"i8\")\n\n\ndef test_full_like_error_nonscalar_fill_value():\n x = np.full((3, 3), 1, dtype=\"i8\")\n with pytest.raises(ValueError, match=\"fill_value must be scalar\"):\n da.full_like(x, [100, 100], chunks=(2, 2), dtype=\"i8\")\n\n\ndef test_can_make_really_big_array_of_ones():\n ones((1000000, 1000000), chunks=(100000, 100000))\n ones(shape=(1000000, 1000000), chunks=(100000, 100000))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_wrap.py_test_wrap_consistent_names_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_wrap.py_test_wrap_consistent_names_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_wrap.py", "file_name": "test_wrap.py", "file_type": "text/x-python", "category": "test", "start_line": 61, "end_line": 74, "span_ids": ["test_wrap_consistent_names"], "tokens": 207}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_wrap_consistent_names():\n assert sorted(ones(10, dtype=\"i4\", chunks=(4,)).dask) == sorted(\n ones(10, dtype=\"i4\", chunks=(4,)).dask\n )\n assert sorted(ones(10, dtype=\"i4\", chunks=(4,)).dask) != sorted(\n ones(10, chunks=(4,)).dask\n )\n assert sorted(da.full((3, 3), 100, chunks=(2, 2), dtype=\"f8\").dask) == sorted(\n da.full((3, 3), 100, chunks=(2, 2), dtype=\"f8\").dask\n )\n assert sorted(da.full((3, 3), 100, chunks=(2, 2), dtype=\"i2\").dask) != sorted(\n da.full((3, 3), 100, chunks=(2, 2)).dask\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_xarray.py_pytest_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_xarray.py_pytest_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_xarray.py", "file_name": "test_xarray.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 25, "span_ids": ["test_asarray", "imports", "test_asanyarray", "test_mean"], "tokens": 149}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\n\nimport dask.array as da\nfrom ..utils import assert_eq\n\nxr = pytest.importorskip(\"xarray\")\n\n\ndef test_mean():\n y = da.mean(xr.DataArray([1, 2, 3.0]))\n assert isinstance(y, da.Array)\n assert_eq(y, y)\n\n\ndef test_asarray():\n y = da.asarray(xr.DataArray([1, 2, 3.0]))\n assert isinstance(y, da.Array)\n assert_eq(y, y)\n\n\ndef test_asanyarray():\n y = da.asanyarray(xr.DataArray([1, 2, 3.0]))\n assert isinstance(y, da.Array)\n assert_eq(y, y)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tiledb_io.py_core_from_tiledb.return.core_from_array_tdb_chun": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tiledb_io.py_core_from_tiledb.return.core_from_array_tdb_chun", "embedding": null, "metadata": {"file_path": "dask/array/tiledb_io.py", "file_name": "tiledb_io.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 72, "span_ids": ["imports", "from_tiledb", "_tiledb_to_chunks"], "tokens": 536}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from . import core\n\n\ndef _tiledb_to_chunks(tiledb_array):\n schema = tiledb_array.schema\n return list(schema.domain.dim(i).tile for i in range(schema.ndim))\n\n\ndef from_tiledb(uri, attribute=None, chunks=None, storage_options=None, **kwargs):\n \"\"\"Load array from the TileDB storage format\n\n See https://docs.tiledb.io for more information about TileDB.\n\n Parameters\n ----------\n uri: TileDB array or str\n Location to save the data\n attribute: str or None\n Attribute selection (single-attribute view on multi-attribute array)\n\n\n Returns\n -------\n\n A Dask Array\n\n Examples\n --------\n\n >>> # create a tiledb array\n >>> import tiledb, numpy as np, tempfile # doctest: +SKIP\n >>> uri = tempfile.NamedTemporaryFile().name # doctest: +SKIP\n >>> tiledb.from_numpy(uri, np.arange(0,9).reshape(3,3)) # doctest: +SKIP\n \n >>> # read back the array\n >>> import dask.array as da # doctest: +SKIP\n >>> tdb_ar = da.from_tiledb(uri) # doctest: +SKIP\n >>> tdb_ar.shape # doctest: +SKIP\n (3, 3)\n >>> tdb_ar.mean().compute() # doctest: +SKIP\n 4.0\n \"\"\"\n import tiledb\n\n tiledb_config = storage_options or dict()\n key = tiledb_config.pop(\"key\", None)\n\n if isinstance(uri, tiledb.Array):\n tdb = uri\n else:\n tdb = tiledb.open(uri, attr=attribute, config=tiledb_config, key=key)\n\n if tdb.schema.sparse:\n raise ValueError(\"Sparse TileDB arrays are not supported\")\n\n if not attribute:\n if tdb.schema.nattr > 1:\n raise TypeError(\n \"keyword 'attribute' must be provided\"\n \"when loading a multi-attribute TileDB array\"\n )\n else:\n attribute = tdb.schema.attr(0).name\n\n if tdb.iswritable:\n raise ValueError(\"TileDB array must be open for reading\")\n\n chunks = chunks or _tiledb_to_chunks(tdb)\n\n assert len(chunks) == tdb.schema.ndim\n\n return core.from_array(tdb, chunks, name=\"tiledb-%s\" % uri)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tiledb_io.py_to_tiledb_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tiledb_io.py_to_tiledb_", "embedding": null, "metadata": {"file_path": "dask/array/tiledb_io.py", "file_name": "tiledb_io.py", "file_type": "text/x-python", "category": "implementation", "start_line": 75, "end_line": 162, "span_ids": ["to_tiledb"], "tokens": 732}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def to_tiledb(\n darray, uri, compute=True, return_stored=False, storage_options=None, **kwargs\n):\n \"\"\"Save array to the TileDB storage format\n\n Save 'array' using the TileDB storage manager, to any TileDB-supported URI,\n including local disk, S3, or HDFS.\n\n See https://docs.tiledb.io for more information about TileDB.\n\n Parameters\n ----------\n\n darray: dask.array\n A dask array to write.\n uri:\n Any supported TileDB storage location.\n storage_options: dict\n Dict containing any configuration options for the TileDB backend.\n see https://docs.tiledb.io/en/stable/tutorials/config.html\n compute, return_stored: see ``store()``\n\n Returns\n -------\n\n None\n Unless ``return_stored`` is set to ``True`` (``False`` by default)\n\n Notes\n -----\n\n TileDB only supports regularly-chunked arrays.\n TileDB `tile extents`_ correspond to form 2 of the dask\n `chunk specification`_, and the conversion is\n done automatically for supported arrays.\n\n Examples\n --------\n\n >>> import dask.array as da, tempfile # doctest: +SKIP\n >>> uri = tempfile.NamedTemporaryFile().name # doctest: +SKIP\n >>> data = da.random.random(5,5) # doctest: +SKIP\n >>> da.to_tiledb(data, uri) # doctest: +SKIP\n >>> import tiledb # doctest: +SKIP\n >>> tdb_ar = tiledb.open(uri) # doctest: +SKIP\n >>> all(tdb_ar == data) # doctest: +SKIP\n True\n\n .. _chunk specification: https://docs.tiledb.io/en/stable/tutorials/tiling-dense.html\n .. _tile extents: http://docs.dask.org/en/latest/array-chunks.html\n \"\"\"\n import tiledb\n\n tiledb_config = storage_options or dict()\n # encryption key, if any\n key = tiledb_config.pop(\"key\", None)\n\n if not core._check_regular_chunks(darray.chunks):\n raise ValueError(\n \"Attempt to save array to TileDB with irregular \"\n \"chunking, please call `arr.rechunk(...)` first.\"\n )\n\n if isinstance(uri, str):\n chunks = [c[0] for c in darray.chunks]\n key = kwargs.pop(\"key\", None)\n # create a suitable, empty, writable TileDB array\n tdb = tiledb.empty_like(\n uri, darray, tile=chunks, config=tiledb_config, key=key, **kwargs\n )\n elif isinstance(uri, tiledb.Array):\n tdb = uri\n # sanity checks\n if not ((darray.dtype == tdb.dtype) and (darray.ndim == tdb.ndim)):\n raise ValueError(\n \"Target TileDB array layout is not compatible with source array\"\n )\n else:\n raise ValueError(\n \"'uri' must be string pointing to supported TileDB store location \"\n \"or an open, writable TileDB array.\"\n )\n\n if not (tdb.isopen and tdb.iswritable):\n raise ValueError(\"Target TileDB array is not open and writable.\")\n\n return darray.store(tdb, lock=False, compute=compute, return_stored=return_stored)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ufunc.py_from_operator_import_geti___array_wrap__.return.x___array_wrap___numpy_uf": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ufunc.py_from_operator_import_geti___array_wrap__.return.x___array_wrap___numpy_uf", "embedding": null, "metadata": {"file_path": "dask/array/ufunc.py", "file_name": "ufunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 21, "span_ids": ["imports", "__array_wrap__"], "tokens": 143}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from operator import getitem\nfrom functools import partial\n\nimport numpy as np\n\nfrom .core import Array, elemwise, blockwise, apply_infer_dtype, asarray\nfrom .utils import empty_like_safe, IS_NEP18_ACTIVE\nfrom ..base import is_dask_collection, normalize_function\nfrom .. import core\nfrom ..highlevelgraph import HighLevelGraph\nfrom ..utils import (\n funcname,\n derived_from,\n is_dataframe_like,\n is_series_like,\n is_index_like,\n)\n\n\ndef __array_wrap__(numpy_ufunc, x, *args, **kwargs):\n return x.__array_wrap__(numpy_ufunc(x, *args, **kwargs))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ufunc.py_wrap_elemwise_wrap_elemwise.return.derived_from_source_wrap": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ufunc.py_wrap_elemwise_wrap_elemwise.return.derived_from_source_wrap", "embedding": null, "metadata": {"file_path": "dask/array/ufunc.py", "file_name": "ufunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 24, "end_line": 44, "span_ids": ["wrap_elemwise"], "tokens": 214}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def wrap_elemwise(numpy_ufunc, array_wrap=False, source=np):\n \"\"\" Wrap up numpy function into dask.array \"\"\"\n\n def wrapped(*args, **kwargs):\n dsk = [arg for arg in args if hasattr(arg, \"_elemwise\")]\n if len(dsk) > 0:\n is_dataframe = (\n is_dataframe_like(dsk[0])\n or is_series_like(dsk[0])\n or is_index_like(dsk[0])\n )\n if array_wrap and (is_dataframe or not IS_NEP18_ACTIVE):\n return dsk[0]._elemwise(__array_wrap__, numpy_ufunc, *args, **kwargs)\n else:\n return dsk[0]._elemwise(numpy_ufunc, *args, **kwargs)\n else:\n return numpy_ufunc(*args, **kwargs)\n\n # functools.wraps cannot wrap ufunc in Python 2.x\n wrapped.__name__ = numpy_ufunc.__name__\n return derived_from(source)(wrapped)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ufunc.py_da_frompyfunc_da_frompyfunc.__dir__.return.list_o_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ufunc.py_da_frompyfunc_da_frompyfunc.__dir__.return.list_o_", "embedding": null, "metadata": {"file_path": "dask/array/ufunc.py", "file_name": "ufunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 47, "end_line": 79, "span_ids": ["da_frompyfunc.__init__", "da_frompyfunc.__reduce__", "da_frompyfunc.__repr__", "da_frompyfunc.__call__", "da_frompyfunc.__dir__", "da_frompyfunc.__getattr__", "da_frompyfunc.__dask_tokenize__", "da_frompyfunc"], "tokens": 299}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class da_frompyfunc(object):\n \"\"\"A serializable `frompyfunc` object\"\"\"\n\n def __init__(self, func, nin, nout):\n self._ufunc = np.frompyfunc(func, nin, nout)\n self._func = func\n self.nin = nin\n self.nout = nout\n self._name = funcname(func)\n self.__name__ = \"frompyfunc-%s\" % self._name\n\n def __repr__(self):\n return \"da.frompyfunc<%s, %d, %d>\" % (self._name, self.nin, self.nout)\n\n def __dask_tokenize__(self):\n return (normalize_function(self._func), self.nin, self.nout)\n\n def __reduce__(self):\n return (da_frompyfunc, (self._func, self.nin, self.nout))\n\n def __call__(self, *args, **kwargs):\n return self._ufunc(*args, **kwargs)\n\n def __getattr__(self, a):\n if not a.startswith(\"_\"):\n return getattr(self._ufunc, a)\n raise AttributeError(\"%r object has no attribute %r\" % (type(self).__name__, a))\n\n def __dir__(self):\n o = set(dir(type(self)))\n o.update(self.__dict__)\n o.update(dir(self._ufunc))\n return list(o)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ufunc.py_frompyfunc_ufunc.__repr__.return.repr_self__ufunc_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ufunc.py_frompyfunc_ufunc.__repr__.return.repr_self__ufunc_", "embedding": null, "metadata": {"file_path": "dask/array/ufunc.py", "file_name": "ufunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 82, "end_line": 122, "span_ids": ["ufunc.__getattr__", "ufunc", "ufunc.__init__", "frompyfunc", "ufunc.__dir__", "ufunc.__repr__"], "tokens": 292}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef frompyfunc(func, nin, nout):\n if nout > 1:\n raise NotImplementedError(\"frompyfunc with more than one output\")\n return ufunc(da_frompyfunc(func, nin, nout))\n\n\nclass ufunc(object):\n _forward_attrs = {\n \"nin\",\n \"nargs\",\n \"nout\",\n \"ntypes\",\n \"identity\",\n \"signature\",\n \"types\",\n }\n\n def __init__(self, ufunc):\n if not isinstance(ufunc, (np.ufunc, da_frompyfunc)):\n raise TypeError(\n \"must be an instance of `ufunc` or \"\n \"`da_frompyfunc`, got `%s\" % type(ufunc).__name__\n )\n self._ufunc = ufunc\n self.__name__ = ufunc.__name__\n if isinstance(ufunc, np.ufunc):\n derived_from(np)(self)\n\n def __getattr__(self, key):\n if key in self._forward_attrs:\n return getattr(self._ufunc, key)\n raise AttributeError(\n \"%r object has no attribute %r\" % (type(self).__name__, key)\n )\n\n def __dir__(self):\n return list(self._forward_attrs.union(dir(type(self)), self.__dict__))\n\n def __repr__(self):\n return repr(self._ufunc)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ufunc.py_ufunc.__call___ufunc.__call__.if_len_dsks_0_.else_.return.self__ufunc_args_kwar": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ufunc.py_ufunc.__call___ufunc.__call__.if_len_dsks_0_.else_.return.self__ufunc_args_kwar", "embedding": null, "metadata": {"file_path": "dask/array/ufunc.py", "file_name": "ufunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 124, "end_line": 135, "span_ids": ["ufunc.__call__"], "tokens": 125}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ufunc(object):\n\n def __call__(self, *args, **kwargs):\n dsks = [arg for arg in args if hasattr(arg, \"_elemwise\")]\n if len(dsks) > 0:\n for dsk in dsks:\n result = dsk._elemwise(self._ufunc, *args, **kwargs)\n if type(result) != type(NotImplemented):\n return result\n raise TypeError(\n \"Parameters of such types are not supported by \" + self.__name__\n )\n else:\n return self._ufunc(*args, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ufunc.py_ufunc.outer_ufunc.outer.return.blockwise_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ufunc.py_ufunc.outer_ufunc.outer.return.blockwise_", "embedding": null, "metadata": {"file_path": "dask/array/ufunc.py", "file_name": "ufunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 137, "end_line": 185, "span_ids": ["ufunc.outer"], "tokens": 349}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ufunc(object):\n\n @derived_from(np.ufunc)\n def outer(self, A, B, **kwargs):\n if self.nin != 2:\n raise ValueError(\"outer product only supported for binary functions\")\n if \"out\" in kwargs:\n raise ValueError(\"`out` kwarg not supported\")\n\n A_is_dask = is_dask_collection(A)\n B_is_dask = is_dask_collection(B)\n if not A_is_dask and not B_is_dask:\n return self._ufunc.outer(A, B, **kwargs)\n elif (\n A_is_dask\n and not isinstance(A, Array)\n or B_is_dask\n and not isinstance(B, Array)\n ):\n raise NotImplementedError(\n \"Dask objects besides `dask.array.Array` \"\n \"are not supported at this time.\"\n )\n\n A = asarray(A)\n B = asarray(B)\n ndim = A.ndim + B.ndim\n out_inds = tuple(range(ndim))\n A_inds = out_inds[: A.ndim]\n B_inds = out_inds[A.ndim :]\n\n dtype = apply_infer_dtype(\n self._ufunc.outer, [A, B], kwargs, \"ufunc.outer\", suggest_dtype=False\n )\n\n if \"dtype\" in kwargs:\n func = partial(self._ufunc.outer, dtype=kwargs.pop(\"dtype\"))\n else:\n func = self._ufunc.outer\n\n return blockwise(\n func,\n out_inds,\n A,\n A_inds,\n B,\n B_inds,\n dtype=dtype,\n token=self.__name__ + \".outer\",\n **kwargs\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ufunc.py__ufuncs_copied_from_thi_degrees.ufunc_np_degrees_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ufunc.py__ufuncs_copied_from_thi_degrees.ufunc_np_degrees_", "embedding": null, "metadata": {"file_path": "dask/array/ufunc.py", "file_name": "ufunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 188, "end_line": 280, "span_ids": ["impl:106", "ufunc.outer", "impl"], "tokens": 779}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# ufuncs, copied from this page:\n# https://docs.scipy.org/doc/numpy/reference/ufuncs.html\n\n# math operations\nadd = ufunc(np.add)\nsubtract = ufunc(np.subtract)\nmultiply = ufunc(np.multiply)\ndivide = ufunc(np.divide)\nlogaddexp = ufunc(np.logaddexp)\nlogaddexp2 = ufunc(np.logaddexp2)\ntrue_divide = ufunc(np.true_divide)\nfloor_divide = ufunc(np.floor_divide)\nnegative = ufunc(np.negative)\npower = ufunc(np.power)\nfloat_power = ufunc(np.float_power)\nremainder = ufunc(np.remainder)\nmod = ufunc(np.mod)\n# fmod: see below\nconj = conjugate = ufunc(np.conjugate)\nexp = ufunc(np.exp)\nexp2 = ufunc(np.exp2)\nlog = ufunc(np.log)\nlog2 = ufunc(np.log2)\nlog10 = ufunc(np.log10)\nlog1p = ufunc(np.log1p)\nexpm1 = ufunc(np.expm1)\nsqrt = ufunc(np.sqrt)\nsquare = ufunc(np.square)\ncbrt = ufunc(np.cbrt)\nreciprocal = ufunc(np.reciprocal)\n\n# trigonometric functions\nsin = ufunc(np.sin)\ncos = ufunc(np.cos)\ntan = ufunc(np.tan)\narcsin = ufunc(np.arcsin)\narccos = ufunc(np.arccos)\narctan = ufunc(np.arctan)\narctan2 = ufunc(np.arctan2)\nhypot = ufunc(np.hypot)\nsinh = ufunc(np.sinh)\ncosh = ufunc(np.cosh)\ntanh = ufunc(np.tanh)\narcsinh = ufunc(np.arcsinh)\narccosh = ufunc(np.arccosh)\narctanh = ufunc(np.arctanh)\ndeg2rad = ufunc(np.deg2rad)\nrad2deg = ufunc(np.rad2deg)\n\n# comparison functions\ngreater = ufunc(np.greater)\ngreater_equal = ufunc(np.greater_equal)\nless = ufunc(np.less)\nless_equal = ufunc(np.less_equal)\nnot_equal = ufunc(np.not_equal)\nequal = ufunc(np.equal)\nisneginf = partial(equal, -np.inf)\nisposinf = partial(equal, np.inf)\nlogical_and = ufunc(np.logical_and)\nlogical_or = ufunc(np.logical_or)\nlogical_xor = ufunc(np.logical_xor)\nlogical_not = ufunc(np.logical_not)\nmaximum = ufunc(np.maximum)\nminimum = ufunc(np.minimum)\nfmax = ufunc(np.fmax)\nfmin = ufunc(np.fmin)\n\n# bitwise functions\nbitwise_and = ufunc(np.bitwise_and)\nbitwise_or = ufunc(np.bitwise_or)\nbitwise_xor = ufunc(np.bitwise_xor)\nbitwise_not = ufunc(np.bitwise_not)\ninvert = bitwise_not\n\n# floating functions\nisfinite = ufunc(np.isfinite)\nisinf = ufunc(np.isinf)\nisnan = ufunc(np.isnan)\nsignbit = ufunc(np.signbit)\ncopysign = ufunc(np.copysign)\nnextafter = ufunc(np.nextafter)\nspacing = ufunc(np.spacing)\n# modf: see below\nldexp = ufunc(np.ldexp)\n# frexp: see below\nfmod = ufunc(np.fmod)\nfloor = ufunc(np.floor)\nceil = ufunc(np.ceil)\ntrunc = ufunc(np.trunc)\n\n# more math routines, from this page:\n# https://docs.scipy.org/doc/numpy/reference/routines.math.html\ndegrees = ufunc(np.degrees)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ufunc.py_radians_angle.return.np_angle_x_deg_deg_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ufunc.py_radians_angle.return.np_angle_x_deg_deg_", "embedding": null, "metadata": {"file_path": "dask/array/ufunc.py", "file_name": "ufunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 281, "end_line": 304, "span_ids": ["impl:106", "angle"], "tokens": 218}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "radians = ufunc(np.radians)\nrint = ufunc(np.rint)\nfabs = ufunc(np.fabs)\nsign = ufunc(np.sign)\nabsolute = ufunc(np.absolute)\n\n# non-ufunc elementwise functions\nclip = wrap_elemwise(np.clip)\nisreal = wrap_elemwise(np.isreal, array_wrap=True)\niscomplex = wrap_elemwise(np.iscomplex, array_wrap=True)\nreal = wrap_elemwise(np.real, array_wrap=True)\nimag = wrap_elemwise(np.imag, array_wrap=True)\nfix = wrap_elemwise(np.fix, array_wrap=True)\ni0 = wrap_elemwise(np.i0, array_wrap=True)\nsinc = wrap_elemwise(np.sinc, array_wrap=True)\nnan_to_num = wrap_elemwise(np.nan_to_num, array_wrap=True)\n\n\n@derived_from(np)\ndef angle(x, deg=0):\n deg = bool(deg)\n if hasattr(x, \"_elemwise\"):\n return x._elemwise(__array_wrap__, np.angle, x, deg)\n return np.angle(x, deg=deg)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ufunc.py_frexp_frexp.return.L_R": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ufunc.py_frexp_frexp.return.L_R", "embedding": null, "metadata": {"file_path": "dask/array/ufunc.py", "file_name": "ufunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 307, "end_line": 329, "span_ids": ["frexp"], "tokens": 244}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef frexp(x):\n # Not actually object dtype, just need to specify something\n tmp = elemwise(np.frexp, x, dtype=object)\n left = \"mantissa-\" + tmp.name\n right = \"exponent-\" + tmp.name\n ldsk = {\n (left,) + key[1:]: (getitem, key, 0)\n for key in core.flatten(tmp.__dask_keys__())\n }\n rdsk = {\n (right,) + key[1:]: (getitem, key, 1)\n for key in core.flatten(tmp.__dask_keys__())\n }\n\n a = empty_like_safe(getattr(x, \"_meta\", x), shape=(1,) * x.ndim, dtype=x.dtype)\n l, r = np.frexp(a)\n\n graph = HighLevelGraph.from_collections(left, ldsk, dependencies=[tmp])\n L = Array(graph, left, chunks=tmp.chunks, meta=l)\n graph = HighLevelGraph.from_collections(right, rdsk, dependencies=[tmp])\n R = Array(graph, right, chunks=tmp.chunks, meta=r)\n return L, R", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ufunc.py_modf_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ufunc.py_modf_", "embedding": null, "metadata": {"file_path": "dask/array/ufunc.py", "file_name": "ufunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 332, "end_line": 362, "span_ids": ["divmod", "modf"], "tokens": 282}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef modf(x):\n # Not actually object dtype, just need to specify something\n tmp = elemwise(np.modf, x, dtype=object)\n left = \"modf1-\" + tmp.name\n right = \"modf2-\" + tmp.name\n ldsk = {\n (left,) + key[1:]: (getitem, key, 0)\n for key in core.flatten(tmp.__dask_keys__())\n }\n rdsk = {\n (right,) + key[1:]: (getitem, key, 1)\n for key in core.flatten(tmp.__dask_keys__())\n }\n\n a = empty_like_safe(getattr(x, \"_meta\", x), shape=(1,) * x.ndim, dtype=x.dtype)\n l, r = np.modf(a)\n\n graph = HighLevelGraph.from_collections(left, ldsk, dependencies=[tmp])\n L = Array(graph, left, chunks=tmp.chunks, meta=l)\n graph = HighLevelGraph.from_collections(right, rdsk, dependencies=[tmp])\n R = Array(graph, right, chunks=tmp.chunks, meta=r)\n return L, R\n\n\n@derived_from(np)\ndef divmod(x, y):\n res1 = x // y\n res2 = x % y\n return res1, res2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_difflib_normalize_to_array.if_cupy_in_str_type_x_.else_.return.x": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_difflib_normalize_to_array.if_cupy_in_str_type_x_.else_.return.x", "embedding": null, "metadata": {"file_path": "dask/array/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 28, "span_ids": ["imports", "normalize_to_array"], "tokens": 141}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import difflib\nimport functools\nimport math\nimport numbers\nimport os\nimport warnings\n\nimport numpy as np\nfrom tlz import frequencies, concat\n\nfrom .core import Array\nfrom ..highlevelgraph import HighLevelGraph\nfrom ..utils import has_keyword, ignoring, is_arraylike\n\ntry:\n AxisError = np.AxisError\nexcept AttributeError:\n try:\n np.array([0]).sum(axis=5)\n except Exception as e:\n AxisError = type(e)\n\n\ndef normalize_to_array(x):\n if \"cupy\" in str(type(x)): # TODO: avoid explicit reference to cupy\n return x.get()\n else:\n return x", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_meta_from_array_meta_from_array.return.meta": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_meta_from_array_meta_from_array.return.meta", "embedding": null, "metadata": {"file_path": "dask/array/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 31, "end_line": 121, "span_ids": ["meta_from_array"], "tokens": 641}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def meta_from_array(x, ndim=None, dtype=None):\n \"\"\"Normalize an array to appropriate meta object\n\n Parameters\n ----------\n x: array-like, callable\n Either an object that looks sufficiently like a Numpy array,\n or a callable that accepts shape and dtype keywords\n ndim: int\n Number of dimensions of the array\n dtype: Numpy dtype\n A valid input for ``np.dtype``\n\n Returns\n -------\n array-like with zero elements of the correct dtype\n \"\"\"\n # If using x._meta, x must be a Dask Array, some libraries (e.g. zarr)\n # implement a _meta attribute that are incompatible with Dask Array._meta\n if hasattr(x, \"_meta\") and isinstance(x, Array):\n x = x._meta\n\n if dtype is None and x is None:\n raise ValueError(\"You must specify the meta or dtype of the array\")\n\n if np.isscalar(x):\n x = np.array(x)\n\n if x is None:\n x = np.ndarray\n\n if isinstance(x, type):\n x = x(shape=(0,) * (ndim or 0), dtype=dtype)\n\n if (\n not hasattr(x, \"shape\")\n or not hasattr(x, \"dtype\")\n or not isinstance(x.shape, tuple)\n ):\n return x\n\n if isinstance(x, list) or isinstance(x, tuple):\n ndims = [\n 0\n if isinstance(a, numbers.Number)\n else a.ndim\n if hasattr(a, \"ndim\")\n else len(a)\n for a in x\n ]\n a = [a if nd == 0 else meta_from_array(a, nd) for a, nd in zip(x, ndims)]\n return a if isinstance(x, list) else tuple(x)\n\n if ndim is None:\n ndim = x.ndim\n\n try:\n meta = x[tuple(slice(0, 0, None) for _ in range(x.ndim))]\n if meta.ndim != ndim:\n if ndim > x.ndim:\n meta = meta[(Ellipsis,) + tuple(None for _ in range(ndim - meta.ndim))]\n meta = meta[tuple(slice(0, 0, None) for _ in range(meta.ndim))]\n elif ndim == 0:\n meta = meta.sum()\n else:\n meta = meta.reshape((0,) * ndim)\n except Exception:\n meta = np.empty((0,) * ndim, dtype=dtype or x.dtype)\n\n if np.isscalar(meta):\n meta = np.array(meta)\n\n if dtype and meta.dtype != dtype:\n try:\n meta = meta.astype(dtype)\n except ValueError as e:\n if (\n any(\n s in str(e)\n for s in [\n \"invalid literal\",\n \"could not convert string to float\",\n ]\n )\n and meta.dtype.kind in \"SU\"\n ):\n meta = np.array([]).astype(dtype)\n else:\n raise e\n\n return meta", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_compute_meta_compute_meta.with_np_errstate_all_ign.return.meta": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_compute_meta_compute_meta.with_np_errstate_all_ign.return.meta", "embedding": null, "metadata": {"file_path": "dask/array/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 124, "end_line": 171, "span_ids": ["compute_meta"], "tokens": 366}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def compute_meta(func, _dtype, *args, **kwargs):\n with np.errstate(all=\"ignore\"), warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n\n args_meta = [meta_from_array(x) if is_arraylike(x) else x for x in args]\n kwargs_meta = {\n k: meta_from_array(v) if is_arraylike(v) else v for k, v in kwargs.items()\n }\n\n # todo: look for alternative to this, causes issues when using map_blocks()\n # with np.vectorize, such as dask.array.routines._isnonzero_vec().\n if isinstance(func, np.vectorize):\n meta = func(*args_meta)\n else:\n try:\n # some reduction functions need to know they are computing meta\n if has_keyword(func, \"computing_meta\"):\n kwargs_meta[\"computing_meta\"] = True\n meta = func(*args_meta, **kwargs_meta)\n except TypeError as e:\n if any(\n s in str(e)\n for s in [\n \"unexpected keyword argument\",\n \"is an invalid keyword for\",\n \"Did not understand the following kwargs\",\n ]\n ):\n raise\n else:\n return None\n except ValueError as e:\n # min/max functions have no identity, attempt to use the first meta\n if \"zero-size array to reduction operation\" in str(e):\n meta = args_meta[0]\n else:\n return None\n except Exception:\n return None\n\n if _dtype and getattr(meta, \"dtype\", None) != _dtype:\n with ignoring(AttributeError):\n meta = meta.astype(_dtype)\n\n if np.isscalar(meta):\n meta = np.array(meta)\n\n return meta", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_allclose_allclose.return._a_b_all_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_allclose_allclose.return._a_b_all_", "embedding": null, "metadata": {"file_path": "dask/array/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 150, "end_line": 159, "span_ids": ["allclose"], "tokens": 116}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def allclose(a, b, equal_nan=False, **kwargs):\n a = normalize_to_array(a)\n b = normalize_to_array(b)\n if getattr(a, \"dtype\", None) != \"O\":\n return np.allclose(a, b, equal_nan=equal_nan, **kwargs)\n if equal_nan:\n return a.shape == b.shape and all(\n np.isnan(b) if np.isnan(a) else a == b for (a, b) in zip(a.flat, b.flat)\n )\n return (a == b).all()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_same_keys_assert_eq_shape.for_aa_bb_in_zip_a_b_.if_math_isnan_aa_or_math.else_.assert_aa_bb": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_same_keys_assert_eq_shape.for_aa_bb_in_zip_a_b_.if_math_isnan_aa_or_math.else_.assert_aa_bb", "embedding": null, "metadata": {"file_path": "dask/array/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 162, "end_line": 194, "span_ids": ["_not_empty", "_check_dsk", "assert_eq_shape", "same_keys"], "tokens": 237}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def same_keys(a, b):\n def key(k):\n if isinstance(k, str):\n return (k, -1, -1, -1)\n else:\n return k\n\n return sorted(a.dask, key=key) == sorted(b.dask, key=key)\n\n\ndef _not_empty(x):\n return x.shape and 0 not in x.shape\n\n\ndef _check_dsk(dsk):\n \"\"\" Check that graph is well named and non-overlapping \"\"\"\n if not isinstance(dsk, HighLevelGraph):\n return\n\n dsk.validate()\n assert all(isinstance(k, (tuple, str)) for k in dsk.layers)\n freqs = frequencies(concat(dsk.dicts.values()))\n non_one = {k: v for k, v in freqs.items() if v != 1}\n assert not non_one, non_one\n\n\ndef assert_eq_shape(a, b, check_nan=True):\n for aa, bb in zip(a, b):\n if math.isnan(aa) or math.isnan(bb):\n if check_nan:\n assert math.isnan(aa) == math.isnan(bb)\n else:\n assert aa == bb", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py__get_dt_meta_computed__get_dt_meta_computed.return.x_adt_x_meta_x_compute": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py__get_dt_meta_computed__get_dt_meta_computed.return.x_adt_x_meta_x_compute", "embedding": null, "metadata": {"file_path": "dask/array/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 197, "end_line": 223, "span_ids": ["_get_dt_meta_computed"], "tokens": 222}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _get_dt_meta_computed(x, check_shape=True, check_graph=True):\n x_original = x\n x_meta = None\n x_computed = None\n\n if isinstance(x, Array):\n assert x.dtype is not None\n adt = x.dtype\n if check_graph:\n _check_dsk(x.dask)\n x_meta = getattr(x, \"_meta\", None)\n x = x.compute(scheduler=\"sync\")\n x_computed = x\n if hasattr(x, \"todense\"):\n x = x.todense()\n if not hasattr(x, \"dtype\"):\n x = np.array(x, dtype=\"O\")\n if _not_empty(x):\n assert x.dtype == x_original.dtype\n if check_shape:\n assert_eq_shape(x_original.shape, x.shape, check_nan=False)\n else:\n if not hasattr(x, \"dtype\"):\n x = np.array(x, dtype=\"O\")\n adt = getattr(x, \"dtype\", None)\n\n return x, adt, x_meta, x_computed", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_assert_eq_assert_eq.return.True": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_assert_eq_assert_eq.return.True", "embedding": null, "metadata": {"file_path": "dask/array/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 226, "end_line": 275, "span_ids": ["assert_eq"], "tokens": 457}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def assert_eq(a, b, check_shape=True, check_graph=True, check_meta=True, **kwargs):\n a_original = a\n b_original = b\n\n a, adt, a_meta, a_computed = _get_dt_meta_computed(\n a, check_shape=check_shape, check_graph=check_graph\n )\n b, bdt, b_meta, b_computed = _get_dt_meta_computed(\n b, check_shape=check_shape, check_graph=check_graph\n )\n\n if str(adt) != str(bdt):\n # Ignore check for matching length of flexible dtypes, since Array._meta\n # can't encode that information\n if adt.type == bdt.type and not (adt.type == np.bytes_ or adt.type == np.str_):\n diff = difflib.ndiff(str(adt).splitlines(), str(bdt).splitlines())\n raise AssertionError(\n \"string repr are different\" + os.linesep + os.linesep.join(diff)\n )\n\n try:\n assert a.shape == b.shape\n if check_meta:\n if hasattr(a, \"_meta\") and hasattr(b, \"_meta\"):\n assert_eq(a._meta, b._meta)\n if hasattr(a_original, \"_meta\"):\n assert a_original._meta.ndim == a.ndim\n if a_meta is not None:\n assert type(a_original._meta) == type(a_meta)\n if not (np.isscalar(a_meta) or np.isscalar(a_computed)):\n assert type(a_meta) == type(a_computed)\n if hasattr(b_original, \"_meta\"):\n assert b_original._meta.ndim == b.ndim\n if b_meta is not None:\n assert type(b_original._meta) == type(b_meta)\n if not (np.isscalar(b_meta) or np.isscalar(b_computed)):\n assert type(b_meta) == type(b_computed)\n assert allclose(a, b, **kwargs)\n return True\n except TypeError:\n pass\n\n c = a == b\n\n if isinstance(c, np.ndarray):\n assert c.all()\n else:\n assert c\n\n return True", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_safe_wraps_empty_like_safe.try_.except_TypeError_.return.np_empty_shape_kwargs_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_safe_wraps_empty_like_safe.try_.except_TypeError_.return.np_empty_shape_kwargs_", "embedding": null, "metadata": {"file_path": "dask/array/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 278, "end_line": 298, "span_ids": ["safe_wraps", "empty_like_safe"], "tokens": 179}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def safe_wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS):\n \"\"\"Like functools.wraps, but safe to use even if wrapped is not a function.\n\n Only needed on Python 2.\n \"\"\"\n if all(hasattr(wrapped, attr) for attr in assigned):\n return functools.wraps(wrapped, assigned=assigned)\n else:\n return lambda x: x\n\n\ndef empty_like_safe(a, shape, **kwargs):\n \"\"\"\n Return np.empty_like(a, shape=shape, **kwargs) if the shape argument\n is supported (requires NumPy >= 1.17), otherwise falls back to\n using the old behavior, returning np.empty(shape, **kwargs).\n \"\"\"\n try:\n return np.empty_like(a, shape=shape, **kwargs)\n except TypeError:\n return np.empty(shape, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_full_like_safe_full_like_safe.try_.except_TypeError_.return.np_full_shape_fill_value": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_full_like_safe_full_like_safe.try_.except_TypeError_.return.np_full_shape_fill_value", "embedding": null, "metadata": {"file_path": "dask/array/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 301, "end_line": 311, "span_ids": ["full_like_safe"], "tokens": 113}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def full_like_safe(a, fill_value, shape, **kwargs):\n \"\"\"\n Return np.full_like(a, fill_value, shape=shape, **kwargs) if the\n shape argument is supported (requires NumPy >= 1.17), otherwise\n falls back to using the old behavior, returning\n np.full(shape, fill_value, **kwargs).\n \"\"\"\n try:\n return np.full_like(a, fill_value, shape=shape, **kwargs)\n except TypeError:\n return np.full(shape, fill_value, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_ones_like_safe_zeros_like_safe.try_.except_TypeError_.return.np_zeros_shape_kwargs_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_ones_like_safe_zeros_like_safe.try_.except_TypeError_.return.np_zeros_shape_kwargs_", "embedding": null, "metadata": {"file_path": "dask/array/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 314, "end_line": 335, "span_ids": ["ones_like_safe", "zeros_like_safe"], "tokens": 192}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def ones_like_safe(a, shape, **kwargs):\n \"\"\"\n Return np.ones_like(a, shape=shape, **kwargs) if the shape argument\n is supported (requires NumPy >= 1.17), otherwise falls back to\n using the old behavior, returning np.ones(shape, **kwargs).\n \"\"\"\n try:\n return np.ones_like(a, shape=shape, **kwargs)\n except TypeError:\n return np.ones(shape, **kwargs)\n\n\ndef zeros_like_safe(a, shape, **kwargs):\n \"\"\"\n Return np.zeros_like(a, shape=shape, **kwargs) if the shape argument\n is supported (requires NumPy >= 1.17), otherwise falls back to\n using the old behavior, returning np.zeros(shape, **kwargs).\n \"\"\"\n try:\n return np.zeros_like(a, shape=shape, **kwargs)\n except TypeError:\n return np.zeros(shape, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_validate_axis_validate_axis.return.axis": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_validate_axis_validate_axis.return.axis", "embedding": null, "metadata": {"file_path": "dask/array/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 338, "end_line": 350, "span_ids": ["validate_axis"], "tokens": 130}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def validate_axis(axis, ndim):\n \"\"\" Validate an input to axis= keywords \"\"\"\n if isinstance(axis, (tuple, list)):\n return tuple(validate_axis(ax, ndim) for ax in axis)\n if not isinstance(axis, numbers.Integral):\n raise TypeError(\"Axis value must be an integer, got %s\" % axis)\n if axis < -ndim or axis >= ndim:\n raise AxisError(\n \"Axis %d is out of bounds for array of dimension %d\" % (axis, ndim)\n )\n if axis < 0:\n axis += ndim\n return axis", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_svd_flip_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_svd_flip_", "embedding": null, "metadata": {"file_path": "dask/array/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 377, "end_line": 431, "span_ids": ["impl:10", "svd_flip", "_is_nep18_active"], "tokens": 400}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def svd_flip(u, v, u_based_decision=False):\n \"\"\"Sign correction to ensure deterministic output from SVD.\n\n This function is useful for orienting eigenvectors such that\n they all lie in a shared but arbitrary half-space. This makes\n it possible to ensure that results are equivalent across SVD\n implementations and random number generator states.\n\n Parameters\n ----------\n\n u : (M, K) array_like\n Left singular vectors (in columns)\n v : (K, N) array_like\n Right singular vectors (in rows)\n u_based_decision: bool\n Whether or not to choose signs based\n on `u` rather than `v`, by default False\n\n Returns\n -------\n\n u : (M, K) array_like\n Left singular vectors with corrected sign\n v: (K, N) array_like\n Right singular vectors with corrected sign\n \"\"\"\n # Determine half-space in which all singular vectors\n # lie relative to an arbitrary vector; summation\n # equivalent to dot product with row vector of ones\n if u_based_decision:\n dtype = u.dtype\n signs = np.sum(u, axis=0, keepdims=True)\n else:\n dtype = v.dtype\n signs = np.sum(v, axis=1, keepdims=True).T\n signs = 2.0 * ((signs >= 0) - 0.5).astype(dtype)\n # Force all singular vectors into same half-space\n u, v = u * signs, v * signs.T\n return u, v\n\n\ndef _is_nep18_active():\n class A:\n def __array_function__(self, *args, **kwargs):\n return True\n\n try:\n return np.concatenate([A()])\n except ValueError:\n return False\n\n\nIS_NEP18_ACTIVE = _is_nep18_active()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/wrap.py_from_functools_import_par__parse_wrap_args.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/wrap.py_from_functools_import_par__parse_wrap_args.return._", "embedding": null, "metadata": {"file_path": "dask/array/wrap.py", "file_name": "wrap.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 47, "span_ids": ["imports", "_parse_wrap_args"], "tokens": 255}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from functools import partial\nfrom itertools import product\n\nimport numpy as np\n\nfrom tlz import curry\n\nfrom ..base import tokenize\nfrom ..utils import funcname\nfrom .core import Array, normalize_chunks\nfrom .utils import (\n meta_from_array,\n empty_like_safe,\n full_like_safe,\n ones_like_safe,\n zeros_like_safe,\n)\n\n\ndef _parse_wrap_args(func, args, kwargs, shape):\n if isinstance(shape, np.ndarray):\n shape = shape.tolist()\n\n if not isinstance(shape, (tuple, list)):\n shape = (shape,)\n\n name = kwargs.pop(\"name\", None)\n chunks = kwargs.pop(\"chunks\", \"auto\")\n\n dtype = kwargs.pop(\"dtype\", None)\n if dtype is None:\n dtype = func(shape, *args, **kwargs).dtype\n dtype = np.dtype(dtype)\n\n chunks = normalize_chunks(chunks, shape, dtype=dtype)\n\n name = name or funcname(func) + \"-\" + tokenize(\n func, shape, chunks, dtype, args, kwargs\n )\n\n return {\n \"shape\": shape,\n \"dtype\": dtype,\n \"kwargs\": kwargs,\n \"chunks\": chunks,\n \"name\": name,\n }", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/wrap.py_wrap_func_shape_as_first_arg_wrap_func_shape_as_first_arg.return.Array_dsk_name_chunks_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/wrap.py_wrap_func_shape_as_first_arg_wrap_func_shape_as_first_arg.return.Array_dsk_name_chunks_", "embedding": null, "metadata": {"file_path": "dask/array/wrap.py", "file_name": "wrap.py", "file_type": "text/x-python", "category": "implementation", "start_line": 50, "end_line": 78, "span_ids": ["wrap_func_shape_as_first_arg"], "tokens": 239}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def wrap_func_shape_as_first_arg(func, *args, **kwargs):\n \"\"\"\n Transform np creation function into blocked version\n \"\"\"\n if \"shape\" not in kwargs:\n shape, args = args[0], args[1:]\n else:\n shape = kwargs.pop(\"shape\")\n\n if isinstance(shape, Array):\n raise TypeError(\n \"Dask array input not supported. \"\n \"Please use tuple, list, or a 1D numpy array instead.\"\n )\n\n parsed = _parse_wrap_args(func, args, kwargs, shape)\n shape = parsed[\"shape\"]\n dtype = parsed[\"dtype\"]\n chunks = parsed[\"chunks\"]\n name = parsed[\"name\"]\n kwargs = parsed[\"kwargs\"]\n\n keys = product([name], *[range(len(bd)) for bd in chunks])\n shapes = product(*chunks)\n func = partial(func, dtype=dtype, **kwargs)\n vals = ((func,) + (s,) + args for s in shapes)\n\n dsk = dict(zip(keys, vals))\n return Array(dsk, name, chunks, dtype=dtype, meta=kwargs.get(\"meta\", None))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/wrap.py_wrap_func_like_wrap_func_like.return.Array_dsk_name_chunks_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/wrap.py_wrap_func_like_wrap_func_like.return.Array_dsk_name_chunks_", "embedding": null, "metadata": {"file_path": "dask/array/wrap.py", "file_name": "wrap.py", "file_type": "text/x-python", "category": "implementation", "start_line": 75, "end_line": 100, "span_ids": ["wrap_func_like"], "tokens": 219}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def wrap_func_like(func, *args, **kwargs):\n \"\"\"\n Transform np creation function into blocked version\n \"\"\"\n x = args[0]\n meta = meta_from_array(x)\n shape = kwargs.get(\"shape\", x.shape)\n\n parsed = _parse_wrap_args(func, args, kwargs, shape)\n shape = parsed[\"shape\"]\n dtype = parsed[\"dtype\"]\n chunks = parsed[\"chunks\"]\n name = parsed[\"name\"]\n kwargs = parsed[\"kwargs\"]\n\n keys = product([name], *[range(len(bd)) for bd in chunks])\n shapes = product(*chunks)\n shapes = list(shapes)\n kw = [kwargs for _ in shapes]\n for i, s in enumerate(list(shapes)):\n kw[i][\"shape\"] = s\n vals = ((partial(func, dtype=dtype, **k),) + args for (k, s) in zip(kw, shapes))\n\n dsk = dict(zip(keys, vals))\n\n return Array(dsk, name, chunks, meta=meta.astype(dtype))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/wrap.py_wrap_func_like_safe_wrap.return.f": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/wrap.py_wrap_func_like_safe_wrap.return.f", "embedding": null, "metadata": {"file_path": "dask/array/wrap.py", "file_name": "wrap.py", "file_type": "text/x-python", "category": "implementation", "start_line": 103, "end_line": 132, "span_ids": ["wrap", "wrap_func_like_safe"], "tokens": 241}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def wrap_func_like_safe(func, func_like, *args, **kwargs):\n \"\"\"\n Safe implementation for wrap_func_like(), attempts to use func_like(),\n if the shape keyword argument, falls back to func().\n \"\"\"\n try:\n return func_like(*args, **kwargs)\n except TypeError:\n return func(*args, **kwargs)\n\n\n@curry\ndef wrap(wrap_func, func, **kwargs):\n func_like = kwargs.pop(\"func_like\", None)\n if func_like is None:\n f = partial(wrap_func, func, **kwargs)\n else:\n f = partial(wrap_func, func_like, **kwargs)\n template = \"\"\"\n Blocked variant of %(name)s\n\n Follows the signature of %(name)s exactly except that it also features\n optional keyword arguments ``chunks: int, tuple, or dict`` and ``name: str``.\n\n Original signature follows below.\n \"\"\"\n if func.__doc__ is not None:\n f.__doc__ = template % {\"name\": func.__name__} + func.__doc__\n f.__name__ = \"blocked_\" + func.__name__\n return f", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/wrap.py_w_broadcast_trick.return.inner": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/wrap.py_w_broadcast_trick.return.inner", "embedding": null, "metadata": {"file_path": "dask/array/wrap.py", "file_name": "wrap.py", "file_type": "text/x-python", "category": "implementation", "start_line": 141, "end_line": 179, "span_ids": ["broadcast_trick", "_broadcast_trick_inner", "impl"], "tokens": 309}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "w = wrap(wrap_func_shape_as_first_arg)\n\n\n@curry\ndef _broadcast_trick_inner(func, shape, meta=(), *args, **kwargs):\n if shape == ():\n return np.broadcast_to(func(meta, shape=(), *args, **kwargs), shape)\n else:\n return np.broadcast_to(func(meta, shape=1, *args, **kwargs), shape)\n\n\ndef broadcast_trick(func):\n \"\"\"\n Provide a decorator to wrap common numpy function with a broadcast trick.\n\n Dask arrays are currently immutable; thus when we know an array is uniform,\n we can replace the actual data by a single value and have all elements point\n to it, thus reducing the size.\n\n >>> x = np.broadcast_to(1, (100,100,100))\n >>> x.base.nbytes\n 8\n\n Those array are not only more efficient locally, but dask serialisation is\n aware of the _real_ size of those array and thus can send them around\n efficiently and schedule accordingly.\n\n Note that those array are read-only and numpy will refuse to assign to them,\n so should be safe.\n \"\"\"\n\n inner = _broadcast_trick_inner(func)\n\n if func.__doc__ is not None:\n inner.__doc__ = func.__doc__\n inner.__name__ = func.__name__\n if inner.__name__.endswith(\"_like_safe\"):\n inner.__name__ = inner.__name__[:-10]\n return inner", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/wrap.py_ones_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/wrap.py_ones_", "embedding": null, "metadata": {"file_path": "dask/array/wrap.py", "file_name": "wrap.py", "file_type": "text/x-python", "category": "implementation", "start_line": 182, "end_line": 230, "span_ids": ["impl:3", "impl:19", "full", "full_like"], "tokens": 431}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "ones = w(broadcast_trick(ones_like_safe), dtype=\"f8\")\nzeros = w(broadcast_trick(zeros_like_safe), dtype=\"f8\")\nempty = w(broadcast_trick(empty_like_safe), dtype=\"f8\")\n\n\nw_like = wrap(wrap_func_like_safe)\n\n\nempty_like = w_like(np.empty, func_like=np.empty_like)\n\n\n# full and full_like require special casing due to argument check on fill_value\n# Generate wrapped functions only once\n_full = w(broadcast_trick(full_like_safe))\n_full_like = w_like(np.full, func_like=np.full_like)\n\n# workaround for numpy doctest failure: https://github.com/numpy/numpy/pull/17472\n_full.__doc__ = _full.__doc__.replace(\n \"array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])\",\n \"array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])\",\n)\n\n\ndef full(shape, fill_value, *args, **kwargs):\n # np.isscalar has somewhat strange behavior:\n # https://docs.scipy.org/doc/numpy/reference/generated/numpy.isscalar.html\n if np.ndim(fill_value) != 0:\n raise ValueError(\n f\"fill_value must be scalar. Received {type(fill_value).__name__} instead.\"\n )\n return _full(shape=shape, fill_value=fill_value, *args, **kwargs)\n\n\ndef full_like(a, fill_value, *args, **kwargs):\n if np.ndim(fill_value) != 0:\n raise ValueError(\n f\"fill_value must be scalar. Received {type(fill_value).__name__} instead.\"\n )\n return _full_like(\n a=a,\n fill_value=fill_value,\n *args,\n **kwargs,\n )\n\n\nfull.__doc__ = _full.__doc__\nfull_like.__doc__ = _full_like.__doc__", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/__init__.py_try__": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/__init__.py_try__", "embedding": null, "metadata": {"file_path": "dask/bag/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 27, "span_ids": ["impl"], "tokens": 188}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "try:\n from .core import (\n Bag,\n Item,\n from_sequence,\n from_url,\n to_textfiles,\n concat,\n from_delayed,\n map_partitions,\n bag_range as range,\n bag_zip as zip,\n bag_map as map,\n )\n from .text import read_text\n from .utils import assert_eq\n from .avro import read_avro\n from ..base import compute\nexcept ImportError as e:\n msg = (\n \"Dask bag requirements are not installed.\\n\\n\"\n \"Please either conda or pip install as follows:\\n\\n\"\n \" conda install dask # either conda install\\n\"\n ' python -m pip install \"dask[bag]\" --upgrade # or python -m pip install'\n )\n raise ImportError(str(e) + \"\\n\\n\" + msg) from e", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/avro.py_io_read_bytes.return.fo_read_size_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/avro.py_io_read_bytes.return.fo_read_size_", "embedding": null, "metadata": {"file_path": "dask/bag/avro.py", "file_name": "avro.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 26, "span_ids": ["read_bytes", "imports", "read_long"], "tokens": 170}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import io\nimport uuid\n\nfrom ..highlevelgraph import HighLevelGraph\n\nMAGIC = b\"Obj\\x01\"\nSYNC_SIZE = 16\n\n\ndef read_long(fo):\n \"\"\"variable-length, zig-zag encoding.\"\"\"\n c = fo.read(1)\n b = ord(c)\n n = b & 0x7F\n shift = 7\n while (b & 0x80) != 0:\n b = ord(fo.read(1))\n n |= (b & 0x7F) << shift\n shift += 7\n return (n >> 1) ^ -(n & 1)\n\n\ndef read_bytes(fo):\n \"\"\"a long followed by that many bytes of data.\"\"\"\n size = read_long(fo)\n return fo.read(size)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/avro.py_read_header_open_head.return.head_size": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/avro.py_read_header_open_head.return.head_size", "embedding": null, "metadata": {"file_path": "dask/bag/avro.py", "file_name": "avro.py", "file_type": "text/x-python", "category": "implementation", "start_line": 29, "end_line": 66, "span_ids": ["open_head", "read_header"], "tokens": 264}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def read_header(fo):\n \"\"\"Extract an avro file's header\n\n fo: file-like\n This should be in bytes mode, e.g., io.BytesIO\n\n Returns dict representing the header\n\n Parameters\n ----------\n fo: file-like\n \"\"\"\n assert fo.read(len(MAGIC)) == MAGIC, \"Magic avro bytes missing\"\n meta = {}\n out = {\"meta\": meta}\n while True:\n n_keys = read_long(fo)\n if n_keys == 0:\n break\n for i in range(n_keys):\n # ignore dtype mapping for bag version\n read_bytes(fo) # schema keys\n read_bytes(fo) # schema values\n out[\"sync\"] = fo.read(SYNC_SIZE)\n out[\"header_size\"] = fo.tell()\n fo.seek(0)\n out[\"head_bytes\"] = fo.read(out[\"header_size\"])\n return out\n\n\ndef open_head(fs, path, compression):\n \"\"\"Open a file just to read its head and size\"\"\"\n from dask.bytes.core import OpenFile\n\n with OpenFile(fs, path, compression=compression) as f:\n head = read_header(f)\n size = fs.info(path)[\"size\"]\n return head, size", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/avro.py_read_avro_read_avro.if_blocksize_is_not_None_.else_.return.from_delayed_chunks_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/avro.py_read_avro_read_avro.if_blocksize_is_not_None_.else_.return.from_delayed_chunks_", "embedding": null, "metadata": {"file_path": "dask/bag/avro.py", "file_name": "avro.py", "file_type": "text/x-python", "category": "implementation", "start_line": 69, "end_line": 136, "span_ids": ["read_avro"], "tokens": 589}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def read_avro(urlpath, blocksize=100000000, storage_options=None, compression=None):\n \"\"\"Read set of avro files\n\n Use this with arbitrary nested avro schemas. Please refer to the\n fastavro documentation for its capabilities:\n https://github.com/fastavro/fastavro\n\n Parameters\n ----------\n urlpath: string or list\n Absolute or relative filepath, URL (may include protocols like\n ``s3://``), or globstring pointing to data.\n blocksize: int or None\n Size of chunks in bytes. If None, there will be no chunking and each\n file will become one partition.\n storage_options: dict or None\n passed to backend file-system\n compression: str or None\n Compression format of the targe(s), like 'gzip'. Should only be used\n with blocksize=None.\n \"\"\"\n from dask.utils import import_required\n from dask import delayed, compute\n from dask.bytes.core import open_files, get_fs_token_paths, OpenFile, tokenize\n from dask.bag import from_delayed\n\n import_required(\n \"fastavro\", \"fastavro is a required dependency for using bag.read_avro().\"\n )\n\n storage_options = storage_options or {}\n if blocksize is not None:\n fs, fs_token, paths = get_fs_token_paths(\n urlpath, mode=\"rb\", storage_options=storage_options\n )\n dhead = delayed(open_head)\n out = compute(*[dhead(fs, path, compression) for path in paths])\n heads, sizes = zip(*out)\n dread = delayed(read_chunk)\n\n offsets = []\n lengths = []\n for size in sizes:\n off = list(range(0, size, blocksize))\n length = [blocksize] * len(off)\n offsets.append(off)\n lengths.append(length)\n\n out = []\n for path, offset, length, head in zip(paths, offsets, lengths, heads):\n delimiter = head[\"sync\"]\n f = OpenFile(fs, path, compression=compression)\n token = tokenize(\n fs_token, delimiter, path, fs.ukey(path), compression, offset\n )\n keys = [\"read-avro-%s-%s\" % (o, token) for o in offset]\n values = [\n dread(f, o, l, head, dask_key_name=key)\n for o, key, l in zip(offset, keys, length)\n ]\n out.extend(values)\n\n return from_delayed(out)\n else:\n files = open_files(urlpath, compression=compression, **storage_options)\n dread = delayed(read_file)\n chunks = [dread(fo) for fo in files]\n return from_delayed(chunks)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/avro.py_read_chunk_read_file.with_fo_as_f_.return.list_fastavro_iter_avro_f": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/avro.py_read_chunk_read_file.with_fo_as_f_.return.list_fastavro_iter_avro_f", "embedding": null, "metadata": {"file_path": "dask/bag/avro.py", "file_name": "avro.py", "file_type": "text/x-python", "category": "implementation", "start_line": 139, "end_line": 158, "span_ids": ["read_chunk", "read_file"], "tokens": 142}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def read_chunk(fobj, off, l, head):\n \"\"\"Get rows from raw bytes block\"\"\"\n import fastavro\n from dask.bytes.core import read_block\n\n with fobj as f:\n chunk = read_block(f, off, l, head[\"sync\"])\n head_bytes = head[\"head_bytes\"]\n if not chunk.startswith(MAGIC):\n chunk = head_bytes + chunk\n i = io.BytesIO(chunk)\n return list(fastavro.iter_avro(i))\n\n\ndef read_file(fo):\n \"\"\"Get rows from file-like\"\"\"\n import fastavro\n\n with fo as f:\n return list(fastavro.iter_avro(f))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/avro.py_to_avro_to_avro.storage_options.storage_options_or_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/avro.py_to_avro_to_avro.storage_options.storage_options_or_", "embedding": null, "metadata": {"file_path": "dask/bag/avro.py", "file_name": "avro.py", "file_type": "text/x-python", "category": "implementation", "start_line": 161, "end_line": 244, "span_ids": ["to_avro"], "tokens": 741}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def to_avro(\n b,\n filename,\n schema,\n name_function=None,\n storage_options=None,\n codec=\"null\",\n sync_interval=16000,\n metadata=None,\n compute=True,\n **kwargs\n):\n \"\"\"Write bag to set of avro files\n\n The schema is a complex dictionary describing the data, see\n https://avro.apache.org/docs/1.8.2/gettingstartedpython.html#Defining+a+schema\n and https://fastavro.readthedocs.io/en/latest/writer.html .\n It's structure is as follows::\n\n {'name': 'Test',\n 'namespace': 'Test',\n 'doc': 'Descriptive text',\n 'type': 'record',\n 'fields': [\n {'name': 'a', 'type': 'int'},\n ]}\n\n where the \"name\" field is required, but \"namespace\" and \"doc\" are optional\n descriptors; \"type\" must always be \"record\". The list of fields should\n have an entry for every key of the input records, and the types are\n like the primitive, complex or logical types of the Avro spec\n ( https://avro.apache.org/docs/1.8.2/spec.html ).\n\n Results in one avro file per input partition.\n\n Parameters\n ----------\n b: dask.bag.Bag\n filename: list of str or str\n Filenames to write to. If a list, number must match the number of\n partitions. If a string, must include a glob character \"*\", which will\n be expanded using name_function\n schema: dict\n Avro schema dictionary, see above\n name_function: None or callable\n Expands integers into strings, see\n ``dask.bytes.utils.build_name_function``\n storage_options: None or dict\n Extra key/value options to pass to the backend file-system\n codec: 'null', 'deflate', or 'snappy'\n Compression algorithm\n sync_interval: int\n Number of records to include in each block within a file\n metadata: None or dict\n Included in the file header\n compute: bool\n If True, files are written immediately, and function blocks. If False,\n returns delayed objects, which can be computed by the user where\n convenient.\n kwargs: passed to compute(), if compute=True\n\n Examples\n --------\n >>> import dask.bag as db\n >>> b = db.from_sequence([{'name': 'Alice', 'value': 100},\n ... {'name': 'Bob', 'value': 200}])\n >>> schema = {'name': 'People', 'doc': \"Set of people's scores\",\n ... 'type': 'record',\n ... 'fields': [\n ... {'name': 'name', 'type': 'string'},\n ... {'name': 'value', 'type': 'int'}]}\n >>> b.to_avro('my-data.*.avro', schema) # doctest: +SKIP\n ['my-data.0.avro', 'my-data.1.avro']\n \"\"\"\n # TODO infer schema from first partition of data\n from dask.utils import import_required\n from dask.bytes.core import open_files\n\n import_required(\n \"fastavro\", \"fastavro is a required dependency for using bag.to_avro().\"\n )\n _verify_schema(schema)\n\n storage_options = storage_options or {}\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/avro.py_to_avro.files_to_avro.if_compute_.else_.return.out_to_delayed_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/avro.py_to_avro.files_to_avro.if_compute_.else_.return.out_to_delayed_", "embedding": null, "metadata": {"file_path": "dask/bag/avro.py", "file_name": "avro.py", "file_type": "text/x-python", "category": "implementation", "start_line": 245, "end_line": 271, "span_ids": ["to_avro"], "tokens": 221}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def to_avro(\n b,\n filename,\n schema,\n name_function=None,\n storage_options=None,\n codec=\"null\",\n sync_interval=16000,\n metadata=None,\n compute=True,\n **kwargs\n):\n # ... other code\n files = open_files(\n filename,\n \"wb\",\n name_function=name_function,\n num=b.npartitions,\n **storage_options\n )\n name = \"to-avro-\" + uuid.uuid4().hex\n dsk = {\n (name, i): (\n _write_avro_part,\n (b.name, i),\n f,\n schema,\n codec,\n sync_interval,\n metadata,\n )\n for i, f in enumerate(files)\n }\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[b])\n out = type(b)(graph, name, b.npartitions)\n if compute:\n out.compute(**kwargs)\n return [f.path for f in files]\n else:\n return out.to_delayed()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/avro.py__verify_schema_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/avro.py__verify_schema_", "embedding": null, "metadata": {"file_path": "dask/bag/avro.py", "file_name": "avro.py", "file_type": "text/x-python", "category": "implementation", "start_line": 274, "end_line": 290, "span_ids": ["_write_avro_part", "_verify_schema"], "tokens": 179}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _verify_schema(s):\n assert isinstance(s, dict), \"Schema must be dictionary\"\n for field in [\"name\", \"type\", \"fields\"]:\n assert field in s, \"Schema missing '%s' field\" % field\n assert s[\"type\"] == \"record\", \"Schema must be of type 'record'\"\n assert isinstance(s[\"fields\"], list), \"Fields entry must be a list\"\n for f in s[\"fields\"]:\n assert \"name\" in f and \"type\" in f, \"Field spec incomplete: %s\" % f\n\n\ndef _write_avro_part(part, f, schema, codec, sync_interval, metadata):\n \"\"\"Create single avro file from list of dictionaries\"\"\"\n import fastavro\n\n with f as f:\n fastavro.writer(f, schema, part, codec, sync_interval, metadata)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/chunk.py_barrier_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/chunk.py_barrier_", "embedding": null, "metadata": {"file_path": "dask/bag/chunk.py", "file_name": "chunk.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 34, "span_ids": ["foldby_combine2", "barrier", "groupby_tasks_group_hash", "getitem", "var_aggregate", "var_chunk"], "tokens": 227}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def barrier(*args):\n return None\n\n\ndef getitem(x, key):\n \"\"\"Like :func:`operator.getitem`, but allows setting key using partial\n ``partial(chunk.getitem, key=key)\n \"\"\"\n return x[key]\n\n\ndef foldby_combine2(combine, acc, x):\n return combine(acc, x[1])\n\n\ndef groupby_tasks_group_hash(x, hash, grouper):\n return hash(grouper(x)), x\n\n\ndef var_chunk(seq):\n squares, total, n = 0.0, 0.0, 0\n for x in seq:\n squares += x ** 2\n total += x\n n += 1\n return squares, total, n\n\n\ndef var_aggregate(x, ddof):\n squares, totals, counts = list(zip(*x))\n x2, x, n = float(sum(squares)), float(sum(totals)), sum(counts)\n result = (x2 / n) - (x / n) ** 2\n return result * n / (n - ddof)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_io_no_result.type_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_io_no_result.type_", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 69, "span_ids": ["imports"], "tokens": 342}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import io\nimport itertools\nimport math\nimport operator\nimport uuid\nimport warnings\nfrom collections import defaultdict\nfrom collections.abc import Iterable, Iterator\nfrom functools import wraps, partial, reduce\nfrom random import Random\nfrom urllib.request import urlopen\n\nimport tlz as toolz\nfrom tlz import (\n merge,\n take,\n valmap,\n partition_all,\n remove,\n compose,\n curry,\n first,\n second,\n accumulate,\n peek,\n frequencies,\n merge_with,\n join,\n reduceby,\n count,\n pluck,\n groupby,\n topk,\n unique,\n accumulate,\n)\n\nfrom .. import config\nfrom .avro import to_avro\nfrom ..base import tokenize, dont_optimize, DaskMethodsMixin\nfrom ..bytes import open_files\nfrom ..context import globalmethod\nfrom ..core import quote, istask, get_dependencies, reverse_dict, flatten\nfrom ..sizeof import sizeof\nfrom ..delayed import Delayed, unpack_collections\nfrom ..highlevelgraph import HighLevelGraph\nfrom ..multiprocessing import get as mpget\nfrom ..optimization import fuse, cull, inline\nfrom ..utils import (\n apply,\n system_encoding,\n takes_multiple_arguments,\n funcname,\n digit,\n insert,\n ensure_dict,\n ensure_bytes,\n ensure_unicode,\n key_split,\n parse_bytes,\n iter_chunks,\n)\nfrom . import chunk\n\n\nno_default = \"__no__default__\"\nno_result = type(\n \"no_result\", (object,), {\"__slots__\": (), \"__reduce__\": lambda self: \"no_result\"}\n)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_lazify_task_lazify_task.if_not_start_and_head_in_.else_.return._head_tuple_lazify_t": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_lazify_task_lazify_task.if_not_start_and_head_in_.else_.return._head_tuple_lazify_t", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 72, "end_line": 95, "span_ids": ["lazify_task"], "tokens": 251}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def lazify_task(task, start=True):\n \"\"\"\n Given a task, remove unnecessary calls to ``list`` and ``reify``.\n\n This traverses tasks and small lists. We choose not to traverse down lists\n of size >= 50 because it is unlikely that sequences this long contain other\n sequences in practice.\n\n Examples\n --------\n >>> task = (sum, (list, (map, inc, [1, 2, 3]))) # doctest: +SKIP\n >>> lazify_task(task) # doctest: +SKIP\n (sum, (map, inc, [1, 2, 3]))\n \"\"\"\n if type(task) is list and len(task) < 50:\n return [lazify_task(arg, False) for arg in task]\n if not istask(task):\n return task\n head, tail = task[0], task[1:]\n if not start and head in (list, reify):\n task = task[1]\n return lazify_task(*tail, start=False)\n else:\n return (head,) + tuple([lazify_task(arg, False) for arg in tail])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_lazify_inline_singleton_lists.return.dsk": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_lazify_inline_singleton_lists.return.dsk", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 98, "end_line": 132, "span_ids": ["inline_singleton_lists", "lazify"], "tokens": 275}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def lazify(dsk):\n \"\"\"\n Remove unnecessary calls to ``list`` in tasks.\n\n See Also\n --------\n dask.bag.core.lazify_task\n \"\"\"\n return valmap(lazify_task, dsk)\n\n\ndef inline_singleton_lists(dsk, keys, dependencies=None):\n \"\"\"Inline lists that are only used once.\n\n >>> d = {'b': (list, 'a'),\n ... 'c': (f, 'b', 1)} # doctest: +SKIP\n >>> inline_singleton_lists(d) # doctest: +SKIP\n {'c': (f, (list, 'a'), 1)}\n\n Pairs nicely with lazify afterwards.\n \"\"\"\n if dependencies is None:\n dependencies = {k: get_dependencies(dsk, task=v) for k, v in dsk.items()}\n dependents = reverse_dict(dependencies)\n\n inline_keys = {\n k\n for k, v in dsk.items()\n if istask(v) and v and v[0] is list and len(dependents[k]) == 1\n }\n inline_keys.difference_update(flatten(keys))\n dsk = inline(dsk, inline_keys, inline_constants=False)\n for k in inline_keys:\n del dsk[k]\n return dsk", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_optimize__to_textfiles_chunk.with_lazy_file_as_f_.if_last_endline_.f_write_endline_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_optimize__to_textfiles_chunk.with_lazy_file_as_f_.if_last_endline_.f_write_endline_", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 135, "end_line": 164, "span_ids": ["_to_textfiles_chunk", "optimize"], "tokens": 252}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def optimize(dsk, keys, fuse_keys=None, rename_fused_keys=None, **kwargs):\n \"\"\" Optimize a dask from a dask Bag. \"\"\"\n dsk = ensure_dict(dsk)\n dsk2, dependencies = cull(dsk, keys)\n kwargs = {}\n if rename_fused_keys is not None:\n kwargs[\"rename_keys\"] = rename_fused_keys\n dsk3, dependencies = fuse(dsk2, keys + (fuse_keys or []), dependencies, **kwargs)\n dsk4 = inline_singleton_lists(dsk3, keys, dependencies)\n dsk5 = lazify(dsk4)\n return dsk5\n\n\ndef _to_textfiles_chunk(data, lazy_file, last_endline):\n with lazy_file as f:\n if isinstance(f, io.TextIOWrapper):\n endline = \"\\n\"\n ensure = ensure_unicode\n else:\n endline = b\"\\n\"\n ensure = ensure_bytes\n started = False\n for d in data:\n if started:\n f.write(endline)\n else:\n started = True\n f.write(ensure(d))\n if last_endline:\n f.write(endline)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_to_textfiles_to_textfiles.if_compute_.else_.return.out_to_delayed_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_to_textfiles_to_textfiles.if_compute_.else_.return.out_to_delayed_", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 167, "end_line": 257, "span_ids": ["to_textfiles"], "tokens": 726}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def to_textfiles(\n b,\n path,\n name_function=None,\n compression=\"infer\",\n encoding=system_encoding,\n compute=True,\n storage_options=None,\n last_endline=False,\n **kwargs\n):\n \"\"\"Write dask Bag to disk, one filename per partition, one line per element.\n\n **Paths**: This will create one file for each partition in your bag. You\n can specify the filenames in a variety of ways.\n\n Use a globstring\n\n >>> b.to_textfiles('/path/to/data/*.json.gz') # doctest: +SKIP\n\n The * will be replaced by the increasing sequence 1, 2, ...\n\n ::\n\n /path/to/data/0.json.gz\n /path/to/data/1.json.gz\n\n Use a globstring and a ``name_function=`` keyword argument. The\n name_function function should expect an integer and produce a string.\n Strings produced by name_function must preserve the order of their\n respective partition indices.\n\n >>> from datetime import date, timedelta\n >>> def name(i):\n ... return str(date(2015, 1, 1) + i * timedelta(days=1))\n\n >>> name(0)\n '2015-01-01'\n >>> name(15)\n '2015-01-16'\n\n >>> b.to_textfiles('/path/to/data/*.json.gz', name_function=name) # doctest: +SKIP\n\n ::\n\n /path/to/data/2015-01-01.json.gz\n /path/to/data/2015-01-02.json.gz\n ...\n\n You can also provide an explicit list of paths.\n\n >>> paths = ['/path/to/data/alice.json.gz', '/path/to/data/bob.json.gz', ...] # doctest: +SKIP\n >>> b.to_textfiles(paths) # doctest: +SKIP\n\n **Compression**: Filenames with extensions corresponding to known\n compression algorithms (gz, bz2) will be compressed accordingly.\n\n **Bag Contents**: The bag calling ``to_textfiles`` must be a bag of\n text strings. For example, a bag of dictionaries could be written to\n JSON text files by mapping ``json.dumps`` on to the bag first, and\n then calling ``to_textfiles`` :\n\n >>> b_dict.map(json.dumps).to_textfiles(\"/path/to/data/*.json\") # doctest: +SKIP\n\n **Last endline**: By default the last line does not end with a newline\n character. Pass ``last_endline=True`` to invert the default.\n \"\"\"\n mode = \"wb\" if encoding is None else \"wt\"\n files = open_files(\n path,\n compression=compression,\n mode=mode,\n encoding=encoding,\n name_function=name_function,\n num=b.npartitions,\n **(storage_options or {})\n )\n\n name = \"to-textfiles-\" + uuid.uuid4().hex\n dsk = {\n (name, i): (_to_textfiles_chunk, (b.name, i), f, last_endline)\n for i, f in enumerate(files)\n }\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[b])\n out = type(b)(graph, name, b.npartitions)\n\n if compute:\n out.compute(**kwargs)\n return [f.path for f in files]\n else:\n return out.to_delayed()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_finalize_StringAccessor.__getattr__.try_.except_AttributeError_.if_key_in_dir_str_.else_.raise": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_finalize_StringAccessor.__getattr__.try_.except_AttributeError_.if_key_in_dir_str_.else_.raise", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 260, "end_line": 311, "span_ids": ["StringAccessor.__dir__", "finalize_item", "StringAccessor._strmap", "StringAccessor.__init__", "StringAccessor", "finalize", "StringAccessor.__getattr__"], "tokens": 324}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def finalize(results):\n if not results:\n return results\n if isinstance(results, Iterator):\n results = list(results)\n if isinstance(results[0], Iterable) and not isinstance(results[0], str):\n results = toolz.concat(results)\n if isinstance(results, Iterator):\n results = list(results)\n return results\n\n\ndef finalize_item(results):\n return results[0]\n\n\nclass StringAccessor(object):\n \"\"\"String processing functions\n\n Examples\n --------\n\n >>> import dask.bag as db\n >>> b = db.from_sequence(['Alice Smith', 'Bob Jones', 'Charlie Smith'])\n >>> list(b.str.lower())\n ['alice smith', 'bob jones', 'charlie smith']\n\n >>> list(b.str.match('*Smith'))\n ['Alice Smith', 'Charlie Smith']\n\n >>> list(b.str.split(' '))\n [['Alice', 'Smith'], ['Bob', 'Jones'], ['Charlie', 'Smith']]\n \"\"\"\n\n def __init__(self, bag):\n self._bag = bag\n\n def __dir__(self):\n return sorted(set(dir(type(self)) + dir(str)))\n\n def _strmap(self, key, *args, **kwargs):\n return self._bag.map(operator.methodcaller(key, *args, **kwargs))\n\n def __getattr__(self, key):\n try:\n return object.__getattribute__(self, key)\n except AttributeError:\n if key in dir(str):\n func = getattr(str, key)\n return robust_wraps(func)(partial(self._strmap, key))\n else:\n raise", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_StringAccessor.match_robust_wraps.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_StringAccessor.match_robust_wraps.return._", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 313, "end_line": 340, "span_ids": ["StringAccessor.match", "robust_wraps"], "tokens": 154}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class StringAccessor(object):\n\n def match(self, pattern):\n \"\"\"Filter strings by those that match a pattern.\n\n Examples\n --------\n\n >>> import dask.bag as db\n >>> b = db.from_sequence(['Alice Smith', 'Bob Jones', 'Charlie Smith'])\n >>> list(b.str.match('*Smith'))\n ['Alice Smith', 'Charlie Smith']\n\n See Also\n --------\n fnmatch.fnmatch\n \"\"\"\n from fnmatch import fnmatch\n\n return self._bag.filter(partial(fnmatch, pat=pattern))\n\n\ndef robust_wraps(wrapper):\n \"\"\" A weak version of wraps that only copies doc. \"\"\"\n\n def _(wrapped):\n wrapped.__doc__ = wrapper.__doc__\n return wrapped\n\n return _", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Item_Item.__int__.__float__.__complex__.__bool__.DaskMethodsMixin_compute": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Item_Item.__int__.__float__.__complex__.__bool__.DaskMethodsMixin_compute", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 343, "end_line": 396, "span_ids": ["Item.__init__", "Item.from_delayed", "Item.__getstate__", "Item.__dask_keys__", "Item.__dask_graph__", "Item.apply", "Item.__dask_postcompute__", "Item.__dask_postpersist__", "Item:6", "Item._args", "Item", "Item.__dask_tokenize__", "Item:2", "Item.__setstate__"], "tokens": 392}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Item(DaskMethodsMixin):\n def __init__(self, dsk, key):\n self.dask = dsk\n self.key = key\n self.name = key\n\n def __dask_graph__(self):\n return self.dask\n\n def __dask_keys__(self):\n return [self.key]\n\n def __dask_tokenize__(self):\n return self.key\n\n __dask_optimize__ = globalmethod(optimize, key=\"bag_optimize\", falsey=dont_optimize)\n __dask_scheduler__ = staticmethod(mpget)\n\n def __dask_postcompute__(self):\n return finalize_item, ()\n\n def __dask_postpersist__(self):\n return Item, (self.key,)\n\n @staticmethod\n def from_delayed(value):\n \"\"\"Create bag item from a dask.delayed value.\n\n See ``dask.bag.from_delayed`` for details\n \"\"\"\n from dask.delayed import Delayed, delayed\n\n if not isinstance(value, Delayed) and hasattr(value, \"key\"):\n value = delayed(value)\n assert isinstance(value, Delayed)\n return Item(ensure_dict(value.dask), value.key)\n\n @property\n def _args(self):\n return (self.dask, self.key)\n\n def __getstate__(self):\n return self._args\n\n def __setstate__(self, state):\n self.dask, self.key = state\n\n def apply(self, func):\n name = \"{0}-{1}\".format(funcname(func), tokenize(self, func, \"apply\"))\n dsk = {name: (func, self.key)}\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])\n return Item(graph, name)\n\n __int__ = __float__ = __complex__ = __bool__ = DaskMethodsMixin.compute", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Item.to_delayed_Item.to_delayed.return.Delayed_self_key_dsk_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Item.to_delayed_Item.to_delayed.return.Delayed_self_key_dsk_", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 398, "end_line": 412, "span_ids": ["Item.to_delayed"], "tokens": 125}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Item(DaskMethodsMixin):\n\n def to_delayed(self, optimize_graph=True):\n \"\"\"Convert into a ``dask.delayed`` object.\n\n Parameters\n ----------\n optimize_graph : bool, optional\n If True [default], the graph is optimized before converting into\n ``dask.delayed`` objects.\n \"\"\"\n from dask.delayed import Delayed\n\n dsk = self.__dask_graph__()\n if optimize_graph:\n dsk = self.__dask_optimize__(dsk, self.__dask_keys__())\n return Delayed(self.key, dsk)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag_Bag.str.property_fget_StringAcces": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag_Bag.str.property_fget_StringAcces", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 415, "end_line": 478, "span_ids": ["Bag.__dask_tokenize__", "Bag.__dask_layers__", "Bag.__dask_graph__", "Bag.__str__", "Bag:7", "Bag", "Bag.__dask_keys__", "Bag.__dask_postcompute__", "Bag.__dask_postpersist__", "Bag:3", "Bag.__init__"], "tokens": 569}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Bag(DaskMethodsMixin):\n \"\"\"Parallel collection of Python objects\n\n Examples\n --------\n Create Bag from sequence\n\n >>> import dask.bag as db\n >>> b = db.from_sequence(range(5))\n >>> list(b.filter(lambda x: x % 2 == 0).map(lambda x: x * 10)) # doctest: +SKIP\n [0, 20, 40]\n\n Create Bag from filename or globstring of filenames\n\n >>> b = db.read_text('/path/to/mydata.*.json.gz').map(json.loads) # doctest: +SKIP\n\n Create manually (expert use)\n\n >>> dsk = {('x', 0): (range, 5),\n ... ('x', 1): (range, 5),\n ... ('x', 2): (range, 5)}\n >>> b = db.Bag(dsk, 'x', npartitions=3)\n\n >>> sorted(b.map(lambda x: x * 10)) # doctest: +SKIP\n [0, 0, 0, 10, 10, 10, 20, 20, 20, 30, 30, 30, 40, 40, 40]\n\n >>> int(b.fold(lambda x, y: x + y)) # doctest: +SKIP\n 30\n \"\"\"\n\n def __init__(self, dsk, name, npartitions):\n if not isinstance(dsk, HighLevelGraph):\n dsk = HighLevelGraph.from_collections(name, dsk, dependencies=[])\n self.dask = dsk\n self.name = name\n self.npartitions = npartitions\n\n def __dask_graph__(self):\n return self.dask\n\n def __dask_keys__(self):\n return [(self.name, i) for i in range(self.npartitions)]\n\n def __dask_layers__(self):\n return (self.name,)\n\n def __dask_tokenize__(self):\n return self.name\n\n __dask_optimize__ = globalmethod(optimize, key=\"bag_optimize\", falsey=dont_optimize)\n __dask_scheduler__ = staticmethod(mpget)\n\n def __dask_postcompute__(self):\n return finalize, ()\n\n def __dask_postpersist__(self):\n return type(self), (self.name, self.npartitions)\n\n def __str__(self):\n return \"dask.bag<%s, npartitions=%d>\" % (key_split(self.name), self.npartitions)\n\n __repr__ = __str__\n\n str = property(fget=StringAccessor)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.map_Bag.map.return.bag_map_func_self_args": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.map_Bag.map.return.bag_map_func_self_args", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 480, "end_line": 539, "span_ids": ["Bag.map"], "tokens": 510}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Bag(DaskMethodsMixin):\n\n def map(self, func, *args, **kwargs):\n \"\"\"Apply a function elementwise across one or more bags.\n\n Note that all ``Bag`` arguments must be partitioned identically.\n\n Parameters\n ----------\n func : callable\n *args, **kwargs : Bag, Item, or object\n Extra arguments and keyword arguments to pass to ``func`` *after*\n the calling bag instance. Non-Bag args/kwargs are broadcasted\n across all calls to ``func``.\n\n Notes\n -----\n For calls with multiple `Bag` arguments, corresponding partitions\n should have the same length; if they do not, the call will error at\n compute time.\n\n Examples\n --------\n >>> import dask.bag as db\n >>> b = db.from_sequence(range(5), npartitions=2)\n >>> b2 = db.from_sequence(range(5, 10), npartitions=2)\n\n Apply a function to all elements in a bag:\n\n >>> b.map(lambda x: x + 1).compute()\n [1, 2, 3, 4, 5]\n\n Apply a function with arguments from multiple bags:\n\n >>> from operator import add\n >>> b.map(add, b2).compute()\n [5, 7, 9, 11, 13]\n\n Non-bag arguments are broadcast across all calls to the mapped\n function:\n\n >>> b.map(add, 1).compute()\n [1, 2, 3, 4, 5]\n\n Keyword arguments are also supported, and have the same semantics as\n regular arguments:\n\n >>> def myadd(x, y=0):\n ... return x + y\n >>> b.map(myadd, y=b2).compute()\n [5, 7, 9, 11, 13]\n >>> b.map(myadd, y=1).compute()\n [1, 2, 3, 4, 5]\n\n Both arguments and keyword arguments can also be instances of\n ``dask.bag.Item``. Here we'll add the max value in the bag to each\n element:\n\n >>> b.map(myadd, b.max()).compute()\n [4, 5, 6, 7, 8]\n \"\"\"\n return bag_map(func, self, *args, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.starmap_Bag.starmap.return.type_self_graph_name_s": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.starmap_Bag.starmap.return.type_self_graph_name_s", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 541, "end_line": 599, "span_ids": ["Bag.starmap"], "tokens": 546}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Bag(DaskMethodsMixin):\n\n def starmap(self, func, **kwargs):\n \"\"\"Apply a function using argument tuples from the given bag.\n\n This is similar to ``itertools.starmap``, except it also accepts\n keyword arguments. In pseudocode, this is could be written as:\n\n >>> def starmap(func, bag, **kwargs):\n ... return (func(*args, **kwargs) for args in bag)\n\n Parameters\n ----------\n func : callable\n **kwargs : Item, Delayed, or object, optional\n Extra keyword arguments to pass to ``func``. These can either be\n normal objects, ``dask.bag.Item``, or ``dask.delayed.Delayed``.\n\n Examples\n --------\n >>> import dask.bag as db\n >>> data = [(1, 2), (3, 4), (5, 6), (7, 8), (9, 10)]\n >>> b = db.from_sequence(data, npartitions=2)\n\n Apply a function to each argument tuple:\n\n >>> from operator import add\n >>> b.starmap(add).compute()\n [3, 7, 11, 15, 19]\n\n Apply a function to each argument tuple, with additional keyword\n arguments:\n\n >>> def myadd(x, y, z=0):\n ... return x + y + z\n >>> b.starmap(myadd, z=10).compute()\n [13, 17, 21, 25, 29]\n\n Keyword arguments can also be instances of ``dask.bag.Item`` or\n ``dask.delayed.Delayed``:\n\n >>> max_second = b.pluck(1).max()\n >>> max_second.compute()\n 10\n >>> b.starmap(myadd, z=max_second).compute()\n [13, 17, 21, 25, 29]\n \"\"\"\n name = \"{0}-{1}\".format(\n funcname(func), tokenize(self, func, \"starmap\", **kwargs)\n )\n dependencies = [self]\n if kwargs:\n kwargs, collections = unpack_scalar_dask_kwargs(kwargs)\n dependencies.extend(collections)\n\n dsk = {\n (name, i): (reify, (starmap_chunk, func, (self.name, i), kwargs))\n for i in range(self.npartitions)\n }\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=dependencies)\n return type(self)(graph, name, self.npartitions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag._args_Bag.filter.return.type_self_graph_name_s": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag._args_Bag.filter.return.type_self_graph_name_s", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 601, "end_line": 628, "span_ids": ["Bag.__setstate__", "Bag.filter", "Bag.__getstate__", "Bag._args"], "tokens": 248}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Bag(DaskMethodsMixin):\n\n @property\n def _args(self):\n return (self.dask, self.name, self.npartitions)\n\n def __getstate__(self):\n return self._args\n\n def __setstate__(self, state):\n self.dask, self.name, self.npartitions = state\n\n def filter(self, predicate):\n \"\"\"Filter elements in collection by a predicate function.\n\n >>> def iseven(x):\n ... return x % 2 == 0\n\n >>> import dask.bag as db\n >>> b = db.from_sequence(range(5))\n >>> list(b.filter(iseven)) # doctest: +SKIP\n [0, 2, 4]\n \"\"\"\n name = \"filter-{0}-{1}\".format(funcname(predicate), tokenize(self, predicate))\n dsk = dict(\n ((name, i), (reify, (filter, predicate, (self.name, i))))\n for i in range(self.npartitions)\n )\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])\n return type(self)(graph, name, self.npartitions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.random_sample_Bag.random_sample.return.type_self_graph_name_s": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.random_sample_Bag.random_sample.return.type_self_graph_name_s", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 630, "end_line": 663, "span_ids": ["Bag.random_sample"], "tokens": 338}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Bag(DaskMethodsMixin):\n\n def random_sample(self, prob, random_state=None):\n \"\"\"Return elements from bag with probability of ``prob``.\n\n Parameters\n ----------\n prob : float\n A float between 0 and 1, representing the probability that each\n element will be returned.\n random_state : int or random.Random, optional\n If an integer, will be used to seed a new ``random.Random`` object.\n If provided, results in deterministic sampling.\n\n Examples\n --------\n >>> import dask.bag as db\n >>> b = db.from_sequence(range(5))\n >>> list(b.random_sample(0.5, 43))\n [0, 3, 4]\n >>> list(b.random_sample(0.5, 43))\n [0, 3, 4]\n \"\"\"\n if not 0 <= prob <= 1:\n raise ValueError(\"prob must be a number in the interval [0, 1]\")\n if not isinstance(random_state, Random):\n random_state = Random(random_state)\n\n name = \"random-sample-%s\" % tokenize(self, prob, random_state.getstate())\n state_data = random_state_data_python(self.npartitions, random_state)\n dsk = {\n (name, i): (reify, (random_sample, (self.name, i), state, prob))\n for i, state in zip(range(self.npartitions), state_data)\n }\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])\n return type(self)(graph, name, self.npartitions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.remove_Bag.remove.return.type_self_graph_name_s": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.remove_Bag.remove.return.type_self_graph_name_s", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 665, "end_line": 682, "span_ids": ["Bag.remove"], "tokens": 180}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Bag(DaskMethodsMixin):\n\n def remove(self, predicate):\n \"\"\"Remove elements in collection that match predicate.\n\n >>> def iseven(x):\n ... return x % 2 == 0\n\n >>> import dask.bag as db\n >>> b = db.from_sequence(range(5))\n >>> list(b.remove(iseven)) # doctest: +SKIP\n [1, 3]\n \"\"\"\n name = \"remove-{0}-{1}\".format(funcname(predicate), tokenize(self, predicate))\n dsk = dict(\n ((name, i), (reify, (remove, predicate, (self.name, i))))\n for i in range(self.npartitions)\n )\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])\n return type(self)(graph, name, self.npartitions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.map_partitions_Bag.map_partitions.return.map_partitions_func_self": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.map_partitions_Bag.map_partitions.return.map_partitions_func_self", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 684, "end_line": 725, "span_ids": ["Bag.map_partitions"], "tokens": 379}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Bag(DaskMethodsMixin):\n\n def map_partitions(self, func, *args, **kwargs):\n \"\"\"Apply a function to every partition across one or more bags.\n\n Note that all ``Bag`` arguments must be partitioned identically.\n\n Parameters\n ----------\n func : callable\n The function to be called on every partition.\n This function should expect an ``Iterator`` or ``Iterable`` for\n every partition and should return an ``Iterator`` or ``Iterable``\n in return.\n *args, **kwargs : Bag, Item, Delayed, or object\n Arguments and keyword arguments to pass to ``func``.\n Partitions from this bag will be the first argument, and these will\n be passed *after*.\n\n Examples\n --------\n >>> import dask.bag as db\n >>> b = db.from_sequence(range(1, 101), npartitions=10)\n >>> def div(nums, den=1):\n ... return [num / den for num in nums]\n\n Using a python object:\n\n >>> hi = b.max().compute()\n >>> hi\n 100\n >>> b.map_partitions(div, den=hi).take(5)\n (0.01, 0.02, 0.03, 0.04, 0.05)\n\n Using an ``Item``:\n\n >>> b.map_partitions(div, den=b.max()).take(5)\n (0.01, 0.02, 0.03, 0.04, 0.05)\n\n Note that while both versions give the same output, the second forms a\n single graph, and then computes everything at once, and in some cases\n may be more efficient.\n \"\"\"\n return map_partitions(func, self, *args, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.pluck_Bag.pluck.return.type_self_graph_name_s": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.pluck_Bag.pluck.return.type_self_graph_name_s", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 727, "end_line": 751, "span_ids": ["Bag.pluck"], "tokens": 280}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Bag(DaskMethodsMixin):\n\n def pluck(self, key, default=no_default):\n \"\"\"Select item from all tuples/dicts in collection.\n\n >>> import dask.bag as db\n >>> b = db.from_sequence([{'name': 'Alice', 'credits': [1, 2, 3]},\n ... {'name': 'Bob', 'credits': [10, 20]}])\n >>> list(b.pluck('name')) # doctest: +SKIP\n ['Alice', 'Bob']\n >>> list(b.pluck('credits').pluck(0)) # doctest: +SKIP\n [1, 10]\n \"\"\"\n name = \"pluck-\" + tokenize(self, key, default)\n key = quote(key)\n if default == no_default:\n dsk = dict(\n ((name, i), (list, (pluck, key, (self.name, i))))\n for i in range(self.npartitions)\n )\n else:\n dsk = dict(\n ((name, i), (list, (pluck, key, (self.name, i), default)))\n for i in range(self.npartitions)\n )\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])\n return type(self)(graph, name, self.npartitions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.unzip_Bag.unzip.return.tuple_self_pluck_i_for_i": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.unzip_Bag.unzip.return.tuple_self_pluck_i_for_i", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 753, "end_line": 770, "span_ids": ["Bag.unzip"], "tokens": 179}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Bag(DaskMethodsMixin):\n\n def unzip(self, n):\n \"\"\"Transform a bag of tuples to ``n`` bags of their elements.\n\n Examples\n --------\n >>> import dask.bag as db\n >>> b = db.from_sequence([(i, i + 1, i + 2) for i in range(10)])\n >>> first, second, third = b.unzip(3)\n >>> isinstance(first, db.Bag)\n True\n >>> first.compute()\n [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n\n Note that this is equivalent to:\n\n >>> first, second, third = (b.pluck(i) for i in range(3))\n \"\"\"\n return tuple(self.pluck(i) for i in range(n))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.to_textfiles_Bag.to_avro.return.to_avro_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.to_textfiles_Bag.to_avro.return.to_avro_", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 770, "end_line": 818, "span_ids": ["Bag.to_textfiles", "Bag.to_avro"], "tokens": 212}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Bag(DaskMethodsMixin):\n\n @wraps(to_textfiles)\n def to_textfiles(\n self,\n path,\n name_function=None,\n compression=\"infer\",\n encoding=system_encoding,\n compute=True,\n storage_options=None,\n last_endline=False,\n **kwargs\n ):\n return to_textfiles(\n self,\n path,\n name_function,\n compression,\n encoding,\n compute,\n storage_options=storage_options,\n last_endline=last_endline,\n **kwargs\n )\n\n @wraps(to_avro)\n def to_avro(\n self,\n filename,\n schema,\n name_function=None,\n storage_options=None,\n codec=\"null\",\n sync_interval=16000,\n metadata=None,\n compute=True,\n **kwargs\n ):\n return to_avro(\n self,\n filename,\n schema,\n name_function,\n storage_options,\n codec,\n sync_interval,\n metadata,\n compute,\n **kwargs\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.fold_Bag.fold.if_initial_is_not_no_defa.else_.return.self_reduction_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.fold_Bag.fold.if_initial_is_not_no_defa.else_.return.self_reduction_", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 822, "end_line": 887, "span_ids": ["Bag.fold"], "tokens": 490}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Bag(DaskMethodsMixin):\n\n def fold(\n self, binop, combine=None, initial=no_default, split_every=None, out_type=Item\n ):\n \"\"\"Parallelizable reduction\n\n Fold is like the builtin function ``reduce`` except that it works in\n parallel. Fold takes two binary operator functions, one to reduce each\n partition of our dataset and another to combine results between\n partitions\n\n 1. ``binop``: Binary operator to reduce within each partition\n 2. ``combine``: Binary operator to combine results from binop\n\n Sequentially this would look like the following:\n\n >>> intermediates = [reduce(binop, part) for part in partitions] # doctest: +SKIP\n >>> final = reduce(combine, intermediates) # doctest: +SKIP\n\n If only one function is given then it is used for both functions\n ``binop`` and ``combine`` as in the following example to compute the\n sum:\n\n >>> def add(x, y):\n ... return x + y\n\n >>> import dask.bag as db\n >>> b = db.from_sequence(range(5))\n >>> b.fold(add).compute()\n 10\n\n In full form we provide both binary operators as well as their default\n arguments\n\n >>> b.fold(binop=add, combine=add, initial=0).compute()\n 10\n\n More complex binary operators are also doable\n\n >>> def add_to_set(acc, x):\n ... ''' Add new element x to set acc '''\n ... return acc | set([x])\n >>> b.fold(add_to_set, set.union, initial=set()).compute()\n {0, 1, 2, 3, 4}\n\n See Also\n --------\n\n Bag.foldby\n \"\"\"\n combine = combine or binop\n if initial is not no_default:\n return self.reduction(\n curry(_reduce, binop, initial=initial),\n curry(_reduce, combine),\n split_every=split_every,\n out_type=out_type,\n )\n else:\n from tlz.curried import reduce\n\n return self.reduction(\n reduce(binop),\n reduce(combine),\n split_every=split_every,\n out_type=out_type,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.frequencies_Bag.frequencies.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.frequencies_Bag.frequencies.return.result", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 889, "end_line": 906, "span_ids": ["Bag.frequencies"], "tokens": 149}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Bag(DaskMethodsMixin):\n\n def frequencies(self, split_every=None, sort=False):\n \"\"\"Count number of occurrences of each distinct element.\n\n >>> import dask.bag as db\n >>> b = db.from_sequence(['Alice', 'Bob', 'Alice'])\n >>> dict(b.frequencies()) # doctest: +SKIP\n {'Alice': 2, 'Bob', 1}\n \"\"\"\n result = self.reduction(\n frequencies,\n merge_frequencies,\n out_type=Bag,\n split_every=split_every,\n name=\"frequencies\",\n ).map_partitions(dictitems)\n if sort:\n result = result.map_partitions(sorted, key=second, reverse=True)\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.topk_Bag.topk.return.self_reduction_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.topk_Bag.topk.return.self_reduction_", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 908, "end_line": 933, "span_ids": ["Bag.topk"], "tokens": 199}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Bag(DaskMethodsMixin):\n\n def topk(self, k, key=None, split_every=None):\n \"\"\"K largest elements in collection\n\n Optionally ordered by some key function\n\n >>> import dask.bag as db\n >>> b = db.from_sequence([10, 3, 5, 7, 11, 4])\n >>> list(b.topk(2))\n [11, 10]\n\n >>> list(b.topk(2, lambda x: -x))\n [3, 4]\n \"\"\"\n if key:\n if callable(key) and takes_multiple_arguments(key):\n key = partial(apply, key)\n func = partial(topk, k, key=key)\n else:\n func = partial(topk, k)\n return self.reduction(\n func,\n compose(func, toolz.concat),\n out_type=Bag,\n split_every=split_every,\n name=\"topk\",\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.distinct_Bag.distinct.return.self_reduction_func_agg_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.distinct_Bag.distinct.return.self_reduction_func_agg_", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 935, "end_line": 960, "span_ids": ["Bag.distinct"], "tokens": 263}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Bag(DaskMethodsMixin):\n\n def distinct(self, key=None):\n \"\"\"Distinct elements of collection\n\n Unordered without repeats.\n\n Parameters\n ----------\n key: {callable,str}\n Defines uniqueness of items in bag by calling ``key`` on each item.\n If a string is passed ``key`` is considered to be ``lambda x: x[key]``.\n\n Examples\n --------\n >>> import dask.bag as db\n >>> b = db.from_sequence(['Alice', 'Bob', 'Alice'])\n >>> sorted(b.distinct())\n ['Alice', 'Bob']\n >>> b = db.from_sequence([{'name': 'Alice'}, {'name': 'Bob'}, {'name': 'Alice'}])\n >>> b.distinct(key=lambda x: x['name']).compute()\n [{'name': 'Alice'}, {'name': 'Bob'}]\n >>> b.distinct(key='name').compute()\n [{'name': 'Alice'}, {'name': 'Bob'}]\n \"\"\"\n func = chunk_distinct if key is None else partial(chunk_distinct, key=key)\n agg = merge_distinct if key is None else partial(merge_distinct, key=key)\n return self.reduction(func, agg, out_type=Bag, name=\"distinct\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.reduction_Bag.reduction.if_out_type_is_Item_.else_.return.Bag_graph_fmt_1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.reduction_Bag.reduction.if_out_type_is_Item_.else_.return.Bag_graph_fmt_1_", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 962, "end_line": 1030, "span_ids": ["Bag.reduction"], "tokens": 520}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Bag(DaskMethodsMixin):\n\n def reduction(\n self, perpartition, aggregate, split_every=None, out_type=Item, name=None\n ):\n \"\"\"Reduce collection with reduction operators.\n\n Parameters\n ----------\n perpartition: function\n reduction to apply to each partition\n aggregate: function\n reduction to apply to the results of all partitions\n split_every: int (optional)\n Group partitions into groups of this size while performing reduction\n Defaults to 8\n out_type: {Bag, Item}\n The out type of the result, Item if a single element, Bag if a list\n of elements. Defaults to Item.\n\n Examples\n --------\n >>> import dask.bag as db\n >>> b = db.from_sequence(range(10))\n >>> b.reduction(sum, sum).compute()\n 45\n \"\"\"\n if split_every is None:\n split_every = 8\n if split_every is False:\n split_every = self.npartitions\n\n token = tokenize(self, perpartition, aggregate, split_every)\n a = \"%s-part-%s\" % (name or funcname(perpartition), token)\n is_last = self.npartitions == 1\n dsk = {\n (a, i): (empty_safe_apply, perpartition, (self.name, i), is_last)\n for i in range(self.npartitions)\n }\n k = self.npartitions\n b = a\n fmt = \"%s-aggregate-%s\" % (name or funcname(aggregate), token)\n depth = 0\n\n while k > split_every:\n c = fmt + str(depth)\n for i, inds in enumerate(partition_all(split_every, range(k))):\n dsk[(c, i)] = (\n empty_safe_aggregate,\n aggregate,\n [(b, j) for j in inds],\n False,\n )\n\n k = i + 1\n b = c\n depth += 1\n\n dsk[(fmt, 0)] = (\n empty_safe_aggregate,\n aggregate,\n [(b, j) for j in range(k)],\n True,\n )\n\n graph = HighLevelGraph.from_collections(fmt, dsk, dependencies=[self])\n if out_type is Item:\n dsk[fmt] = dsk.pop((fmt, 0))\n return Item(graph, fmt)\n else:\n return Bag(graph, fmt, 1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.sum_Bag.std.return.self_var_ddof_ddof_apply": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.sum_Bag.std.return.self_var_ddof_ddof_apply", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1025, "end_line": 1073, "span_ids": ["Bag.max", "Bag.mean", "Bag.count", "Bag.min", "Bag.all", "Bag.std", "Bag.any", "Bag.var", "Bag.sum"], "tokens": 371}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Bag(DaskMethodsMixin):\n\n def sum(self, split_every=None):\n \"\"\" Sum all elements \"\"\"\n return self.reduction(sum, sum, split_every=split_every)\n\n def max(self, split_every=None):\n \"\"\" Maximum element \"\"\"\n return self.reduction(max, max, split_every=split_every)\n\n def min(self, split_every=None):\n \"\"\" Minimum element \"\"\"\n return self.reduction(min, min, split_every=split_every)\n\n def any(self, split_every=None):\n \"\"\" Are any of the elements truthy? \"\"\"\n return self.reduction(any, any, split_every=split_every)\n\n def all(self, split_every=None):\n \"\"\" Are all elements truthy? \"\"\"\n return self.reduction(all, all, split_every=split_every)\n\n def count(self, split_every=None):\n \"\"\" Count the number of elements. \"\"\"\n return self.reduction(count, sum, split_every=split_every)\n\n def mean(self):\n \"\"\" Arithmetic mean \"\"\"\n\n def mean_chunk(seq):\n total, n = 0.0, 0\n for x in seq:\n total += x\n n += 1\n return total, n\n\n def mean_aggregate(x):\n totals, counts = list(zip(*x))\n return 1.0 * sum(totals) / sum(counts)\n\n return self.reduction(mean_chunk, mean_aggregate, split_every=False)\n\n def var(self, ddof=0):\n \"\"\" Variance \"\"\"\n return self.reduction(\n chunk.var_chunk, partial(chunk.var_aggregate, ddof=ddof), split_every=False\n )\n\n def std(self, ddof=0):\n \"\"\" Standard deviation \"\"\"\n return self.var(ddof=ddof).apply(math.sqrt)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.join_Bag.join.return.type_self_graph_name_s": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.join_Bag.join.return.type_self_graph_name_s", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1082, "end_line": 1149, "span_ids": ["Bag.join"], "tokens": 583}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Bag(DaskMethodsMixin):\n\n def join(self, other, on_self, on_other=None):\n \"\"\"Joins collection with another collection.\n\n Other collection must be one of the following:\n\n 1. An iterable. We recommend tuples over lists for internal\n performance reasons.\n 2. A delayed object, pointing to a tuple. This is recommended if the\n other collection is sizable and you're using the distributed\n scheduler. Dask is able to pass around data wrapped in delayed\n objects with greater sophistication.\n 3. A Bag with a single partition\n\n You might also consider Dask Dataframe, whose join operations are much\n more heavily optimized.\n\n Parameters\n ----------\n other: Iterable, Delayed, Bag\n Other collection on which to join\n on_self: callable\n Function to call on elements in this collection to determine a\n match\n on_other: callable (defaults to on_self)\n Function to call on elements in the other collection to determine a\n match\n\n Examples\n --------\n >>> import dask.bag as db\n >>> people = db.from_sequence(['Alice', 'Bob', 'Charlie'])\n >>> fruit = ['Apple', 'Apricot', 'Banana']\n >>> list(people.join(fruit, lambda x: x[0]))\n [('Apple', 'Alice'), ('Apricot', 'Alice'), ('Banana', 'Bob')]\n \"\"\"\n name = \"join-\" + tokenize(self, other, on_self, on_other)\n dsk = {}\n if isinstance(other, Bag):\n if other.npartitions == 1:\n dsk.update(other.dask)\n other = other.__dask_keys__()[0]\n dsk[\"join-%s-other\" % name] = (list, other)\n else:\n msg = (\n \"Multi-bag joins are not implemented. \"\n \"We recommend Dask dataframe if appropriate\"\n )\n raise NotImplementedError(msg)\n elif isinstance(other, Delayed):\n dsk.update(other.dask)\n other = other._key\n elif isinstance(other, Iterable):\n other = other\n else:\n msg = (\n \"Joined argument must be single-partition Bag, \"\n \" delayed object, or Iterable, got %s\" % type(other).__name\n )\n raise TypeError(msg)\n\n if on_other is None:\n on_other = on_self\n\n for i in range(self.npartitions):\n dsk[(name, i)] = (list, (join, on_other, other, on_self, (self.name, i)))\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])\n return type(self)(graph, name, self.npartitions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.product_Bag.product.return.type_self_graph_name_n": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.product_Bag.product.return.type_self_graph_name_n", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1143, "end_line": 1157, "span_ids": ["Bag.product"], "tokens": 146}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Bag(DaskMethodsMixin):\n\n def product(self, other):\n \"\"\" Cartesian product between two bags. \"\"\"\n assert isinstance(other, Bag)\n name = \"product-\" + tokenize(self, other)\n n, m = self.npartitions, other.npartitions\n dsk = dict(\n (\n (name, i * m + j),\n (list, (itertools.product, (self.name, i), (other.name, j))),\n )\n for i in range(n)\n for j in range(m)\n )\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self, other])\n return type(self)(graph, name, n * m)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.foldby_Bag.foldby._Combined_reduction_and": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.foldby_Bag.foldby._Combined_reduction_and", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1167, "end_line": 1282, "span_ids": ["Bag.foldby"], "tokens": 917}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Bag(DaskMethodsMixin):\n\n def foldby(\n self,\n key,\n binop,\n initial=no_default,\n combine=None,\n combine_initial=no_default,\n split_every=None,\n ):\n \"\"\"Combined reduction and groupby.\n\n Foldby provides a combined groupby and reduce for efficient parallel\n split-apply-combine tasks.\n\n The computation\n\n >>> b.foldby(key, binop, init) # doctest: +SKIP\n\n is equivalent to the following:\n\n >>> def reduction(group): # doctest: +SKIP\n ... return reduce(binop, group, init) # doctest: +SKIP\n\n >>> b.groupby(key).map(lambda (k, v): (k, reduction(v)))# doctest: +SKIP\n\n But uses minimal communication and so is *much* faster.\n\n >>> import dask.bag as db\n >>> b = db.from_sequence(range(10))\n >>> iseven = lambda x: x % 2 == 0\n >>> add = lambda x, y: x + y\n >>> dict(b.foldby(iseven, add))\n {True: 20, False: 25}\n\n **Key Function**\n\n The key function determines how to group the elements in your bag.\n In the common case where your bag holds dictionaries then the key\n function often gets out one of those elements.\n\n >>> def key(x):\n ... return x['name']\n\n This case is so common that it is special cased, and if you provide a\n key that is not a callable function then dask.bag will turn it into one\n automatically. The following are equivalent:\n\n >>> b.foldby(lambda x: x['name'], ...) # doctest: +SKIP\n >>> b.foldby('name', ...) # doctest: +SKIP\n\n **Binops**\n\n It can be tricky to construct the right binary operators to perform\n analytic queries. The ``foldby`` method accepts two binary operators,\n ``binop`` and ``combine``. Binary operators two inputs and output must\n have the same type.\n\n Binop takes a running total and a new element and produces a new total:\n\n >>> def binop(total, x):\n ... return total + x['amount']\n\n Combine takes two totals and combines them:\n\n >>> def combine(total1, total2):\n ... return total1 + total2\n\n Each of these binary operators may have a default first value for\n total, before any other value is seen. For addition binary operators\n like above this is often ``0`` or the identity element for your\n operation.\n\n **split_every**\n\n Group partitions into groups of this size while performing reduction.\n Defaults to 8.\n\n >>> b.foldby('name', binop, 0, combine, 0) # doctest: +SKIP\n\n Examples\n --------\n\n We can compute the maximum of some ``(key, value)`` pairs, grouped\n by the ``key``. (You might be better off converting the ``Bag`` to\n a ``dask.dataframe`` and using its groupby).\n\n >>> import random\n >>> import dask.bag as db\n\n >>> tokens = list('abcdefg')\n >>> values = range(10000)\n >>> a = [(random.choice(tokens), random.choice(values))\n ... for _ in range(100)]\n >>> a[:2] # doctest: +SKIP\n [('g', 676), ('a', 871)]\n\n >>> a = db.from_sequence(a)\n\n >>> def binop(t, x):\n ... return max((t, x), key=lambda x: x[1])\n\n >>> a.foldby(lambda x: x[0], binop).compute() # doctest: +SKIP\n [('g', ('g', 984)),\n ('a', ('a', 871)),\n ('b', ('b', 999)),\n ('c', ('c', 765)),\n ('f', ('f', 955)),\n ('e', ('e', 991)),\n ('d', ('d', 854))]\n\n See Also\n --------\n\n toolz.reduceby\n pyspark.combineByKey\n \"\"\"\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.foldby.if_split_every_is_None__Bag.foldby.return.type_self_graph_e_1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.foldby.if_split_every_is_None__Bag.foldby.return.type_self_graph_e_1_", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1274, "end_line": 1340, "span_ids": ["Bag.foldby"], "tokens": 553}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Bag(DaskMethodsMixin):\n\n def foldby(\n self,\n key,\n binop,\n initial=no_default,\n combine=None,\n combine_initial=no_default,\n split_every=None,\n ):\n if split_every is None:\n split_every = 8\n if split_every is False:\n split_every = self.npartitions\n\n token = tokenize(self, key, binop, initial, combine, combine_initial)\n a = \"foldby-a-\" + token\n if combine is None:\n combine = binop\n if initial is not no_default:\n dsk = {\n (a, i): (reduceby, key, binop, (self.name, i), initial)\n for i in range(self.npartitions)\n }\n else:\n dsk = {\n (a, i): (reduceby, key, binop, (self.name, i))\n for i in range(self.npartitions)\n }\n\n combine2 = partial(chunk.foldby_combine2, combine)\n depth = 0\n k = self.npartitions\n b = a\n while k > split_every:\n c = b + str(depth)\n if combine_initial is not no_default:\n for i, inds in enumerate(partition_all(split_every, range(k))):\n dsk[(c, i)] = (\n reduceby,\n 0,\n combine2,\n (toolz.concat, (map, dictitems, [(b, j) for j in inds])),\n combine_initial,\n )\n else:\n for i, inds in enumerate(partition_all(split_every, range(k))):\n dsk[(c, i)] = (\n merge_with,\n (partial, reduce, combine),\n [(b, j) for j in inds],\n )\n\n k = i + 1\n b = c\n depth += 1\n\n e = \"foldby-b-\" + token\n if combine_initial is not no_default:\n dsk[(e, 0)] = (\n dictitems,\n (\n reduceby,\n 0,\n combine2,\n (toolz.concat, (map, dictitems, [(b, j) for j in range(k)])),\n combine_initial,\n ),\n )\n else:\n dsk[(e, 0)] = (\n dictitems,\n (merge_with, (partial, reduce, combine), [(b, j) for j in range(k)]),\n )\n\n graph = HighLevelGraph.from_collections(e, dsk, dependencies=[self])\n return type(self)(graph, e, 1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.take_Bag.take.if_compute_.else_.return.b": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.take_Bag.take.if_compute_.else_.return.b", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1351, "end_line": 1404, "span_ids": ["Bag.take"], "tokens": 465}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Bag(DaskMethodsMixin):\n\n def take(self, k, npartitions=1, compute=True, warn=True):\n \"\"\"Take the first k elements.\n\n Parameters\n ----------\n k : int\n The number of elements to return\n npartitions : int, optional\n Elements are only taken from the first ``npartitions``, with a\n default of 1. If there are fewer than ``k`` rows in the first\n ``npartitions`` a warning will be raised and any found rows\n returned. Pass -1 to use all partitions.\n compute : bool, optional\n Whether to compute the result, default is True.\n warn : bool, optional\n Whether to warn if the number of elements returned is less than\n requested, default is True.\n\n >>> import dask.bag as db\n >>> b = db.from_sequence(range(1_000))\n >>> b.take(3)\n (0, 1, 2)\n \"\"\"\n\n if npartitions <= -1:\n npartitions = self.npartitions\n if npartitions > self.npartitions:\n raise ValueError(\n \"only {} partitions, take \"\n \"received {}\".format(self.npartitions, npartitions)\n )\n\n token = tokenize(self, k, npartitions)\n name = \"take-\" + token\n\n if npartitions > 1:\n name_p = \"take-partial-\" + token\n\n dsk = {}\n for i in range(npartitions):\n dsk[(name_p, i)] = (list, (take, k, (self.name, i)))\n\n concat = (toolz.concat, ([(name_p, i) for i in range(npartitions)]))\n dsk[(name, 0)] = (safe_take, k, concat, warn)\n else:\n dsk = {(name, 0): (safe_take, k, (self.name, 0), warn)}\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])\n b = Bag(graph, name, 1)\n\n if compute:\n return tuple(b.compute())\n else:\n return b", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.flatten_Bag.__iter__.return.iter_self_compute_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.flatten_Bag.__iter__.return.iter_self_compute_", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1406, "end_line": 1426, "span_ids": ["Bag.flatten", "Bag.__iter__"], "tokens": 174}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Bag(DaskMethodsMixin):\n\n def flatten(self):\n \"\"\"Concatenate nested lists into one long list.\n\n >>> import dask.bag as db\n >>> b = db.from_sequence([[1], [2, 3]])\n >>> list(b)\n [[1], [2, 3]]\n\n >>> list(b.flatten())\n [1, 2, 3]\n \"\"\"\n name = \"flatten-\" + tokenize(self)\n dsk = dict(\n ((name, i), (list, (toolz.concat, (self.name, i))))\n for i in range(self.npartitions)\n )\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])\n return type(self)(graph, name, self.npartitions)\n\n def __iter__(self):\n return iter(self.compute())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.groupby_Bag.groupby.if_shuffle_disk_.else_.raise_NotImplementedError": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.groupby_Bag.groupby.if_shuffle_disk_.else_.raise_NotImplementedError", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1428, "end_line": 1488, "span_ids": ["Bag.groupby"], "tokens": 500}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Bag(DaskMethodsMixin):\n\n def groupby(\n self,\n grouper,\n method=None,\n npartitions=None,\n blocksize=2 ** 20,\n max_branch=None,\n shuffle=None,\n ):\n \"\"\"Group collection by key function\n\n This requires a full dataset read, serialization and shuffle.\n This is expensive. If possible you should use ``foldby``.\n\n Parameters\n ----------\n grouper: function\n Function on which to group elements\n shuffle: str\n Either 'disk' for an on-disk shuffle or 'tasks' to use the task\n scheduling framework. Use 'disk' if you are on a single machine\n and 'tasks' if you are on a distributed cluster.\n npartitions: int\n If using the disk-based shuffle, the number of output partitions\n blocksize: int\n If using the disk-based shuffle, the size of shuffle blocks (bytes)\n max_branch: int\n If using the task-based shuffle, the amount of splitting each\n partition undergoes. Increase this for fewer copies but more\n scheduler overhead.\n\n Examples\n --------\n >>> import dask.bag as db\n >>> b = db.from_sequence(range(10))\n >>> iseven = lambda x: x % 2 == 0\n >>> dict(b.groupby(iseven)) # doctest: +SKIP\n {True: [0, 2, 4, 6, 8], False: [1, 3, 5, 7, 9]}\n\n See Also\n --------\n Bag.foldby\n \"\"\"\n if method is not None:\n raise Exception(\"The method= keyword has been moved to shuffle=\")\n if shuffle is None:\n shuffle = config.get(\"shuffle\", None)\n if shuffle is None:\n if \"distributed\" in config.get(\"scheduler\", \"\"):\n shuffle = \"tasks\"\n else:\n shuffle = \"disk\"\n if shuffle == \"disk\":\n return groupby_disk(\n self, grouper, npartitions=npartitions, blocksize=blocksize\n )\n elif shuffle == \"tasks\":\n return groupby_tasks(self, grouper, max_branch=max_branch)\n else:\n msg = \"Shuffle must be 'disk' or 'tasks'\"\n raise NotImplementedError(msg)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.to_dataframe_Bag.to_dataframe.return.dd_DataFrame_dsk_name_m": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.to_dataframe_Bag.to_dataframe.return.dd_DataFrame_dsk_name_m", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1478, "end_line": 1549, "span_ids": ["Bag.to_dataframe"], "tokens": 718}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Bag(DaskMethodsMixin):\n\n def to_dataframe(self, meta=None, columns=None):\n \"\"\"Create Dask Dataframe from a Dask Bag.\n\n Bag should contain tuples, dict records, or scalars.\n\n Index will not be particularly meaningful. Use ``reindex`` afterwards\n if necessary.\n\n Parameters\n ----------\n meta : pd.DataFrame, dict, iterable, optional\n An empty ``pd.DataFrame`` that matches the dtypes and column names\n of the output. This metadata is necessary for many algorithms in\n dask dataframe to work. For ease of use, some alternative inputs\n are also available. Instead of a ``DataFrame``, a ``dict`` of\n ``{name: dtype}`` or iterable of ``(name, dtype)`` can be provided.\n If not provided or a list, a single element from the first\n partition will be computed, triggering a potentially expensive call\n to ``compute``. This may lead to unexpected results, so providing\n ``meta`` is recommended. For more information, see\n ``dask.dataframe.utils.make_meta``.\n columns : sequence, optional\n Column names to use. If the passed data do not have names\n associated with them, this argument provides names for the columns.\n Otherwise this argument indicates the order of the columns in the\n result (any names not found in the data will become all-NA\n columns). Note that if ``meta`` is provided, column names will be\n taken from there and this parameter is invalid.\n\n Examples\n --------\n >>> import dask.bag as db\n >>> b = db.from_sequence([{'name': 'Alice', 'balance': 100},\n ... {'name': 'Bob', 'balance': 200},\n ... {'name': 'Charlie', 'balance': 300}],\n ... npartitions=2)\n >>> df = b.to_dataframe()\n\n >>> df.compute()\n name balance\n 0 Alice 100\n 1 Bob 200\n 0 Charlie 300\n \"\"\"\n import pandas as pd\n import dask.dataframe as dd\n\n if meta is None:\n head = self.take(1, warn=False)\n if len(head) == 0:\n raise ValueError(\n \"`dask.bag.Bag.to_dataframe` failed to \"\n \"properly infer metadata, please pass in \"\n \"metadata via the `meta` keyword\"\n )\n meta = pd.DataFrame(list(head), columns=columns)\n elif columns is not None:\n raise ValueError(\"Can't specify both `meta` and `columns`\")\n else:\n meta = dd.utils.make_meta(meta)\n # Serializing the columns and dtypes is much smaller than serializing\n # the empty frame\n cols = list(meta.columns)\n dtypes = meta.dtypes.to_dict()\n name = \"to_dataframe-\" + tokenize(self, cols, dtypes)\n dsk = self.__dask_optimize__(self.dask, self.__dask_keys__())\n\n for i in range(self.npartitions):\n dsk[(name, i)] = (to_dataframe, (self.name, i), cols, dtypes)\n\n divisions = [None] * (self.npartitions + 1)\n return dd.DataFrame(dsk, name, meta, divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.to_delayed_Bag.to_delayed.return._Delayed_k_dsk_for_k_in": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.to_delayed_Bag.to_delayed.return._Delayed_k_dsk_for_k_in", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1551, "end_line": 1570, "span_ids": ["Bag.to_delayed"], "tokens": 154}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Bag(DaskMethodsMixin):\n\n def to_delayed(self, optimize_graph=True):\n \"\"\"Convert into a list of ``dask.delayed`` objects, one per partition.\n\n Parameters\n ----------\n optimize_graph : bool, optional\n If True [default], the graph is optimized before converting into\n ``dask.delayed`` objects.\n\n See Also\n --------\n dask.bag.from_delayed\n \"\"\"\n from dask.delayed import Delayed\n\n keys = self.__dask_keys__()\n dsk = self.__dask_graph__()\n if optimize_graph:\n dsk = self.__dask_optimize__(dsk, keys)\n return [Delayed(k, dsk) for k in keys]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.repartition_Bag.repartition.if_npartitions_is_not_Non.elif_partition_size_is_no.return.repartition_size_self_pa": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.repartition_Bag.repartition.if_npartitions_is_not_Non.elif_partition_size_is_no.return.repartition_size_self_pa", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1572, "end_line": 1604, "span_ids": ["Bag.repartition"], "tokens": 269}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Bag(DaskMethodsMixin):\n\n def repartition(self, npartitions=None, partition_size=None):\n \"\"\"Repartition Bag across new divisions.\n\n Parameters\n ----------\n npartitions : int, optional\n Number of partitions of output.\n partition_size : int or string, optional\n Max number of bytes of memory for each partition. Use numbers or\n strings like 5MB.\n\n .. warning::\n\n This keyword argument triggers computation to determine\n the memory size of each partition, which may be expensive.\n\n Notes\n -----\n Exactly one of ``npartitions`` or ``partition_size`` should be specified.\n A ``ValueError`` will be raised when that is not the case.\n\n Examples\n --------\n >>> b.repartition(5) # set to have 5 partitions # doctest: +SKIP\n \"\"\"\n if sum([partition_size is not None, npartitions is not None]) != 1:\n raise ValueError(\n \"Please provide exactly one ``npartitions`` or ``partition_size`` keyword arguments\"\n )\n if npartitions is not None:\n return repartition_npartitions(self, npartitions)\n elif partition_size is not None:\n return repartition_size(self, partition_size)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.accumulate_Bag.accumulate.return.Bag_graph_b_self_nparti": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.accumulate_Bag.accumulate.return.Bag_graph_b_self_nparti", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1618, "end_line": 1653, "span_ids": ["Bag.accumulate"], "tokens": 432}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Bag(DaskMethodsMixin):\n\n def accumulate(self, binop, initial=no_default):\n \"\"\"Repeatedly apply binary function to a sequence, accumulating results.\n\n This assumes that the bag is ordered. While this is typically the case\n not all Dask.bag functions preserve this property.\n\n Examples\n --------\n >>> import dask.bag as db\n >>> from operator import add\n >>> b = db.from_sequence([1, 2, 3, 4, 5], npartitions=2)\n >>> b.accumulate(add).compute()\n [1, 3, 6, 10, 15]\n\n Accumulate also takes an optional argument that will be used as the\n first value.\n\n >>> b.accumulate(add, initial=-1).compute()\n [-1, 0, 2, 5, 9, 14]\n \"\"\"\n token = tokenize(self, binop, initial)\n binop_name = funcname(binop)\n a = \"%s-part-%s\" % (binop_name, token)\n b = \"%s-first-%s\" % (binop_name, token)\n c = \"%s-second-%s\" % (binop_name, token)\n dsk = {\n (a, 0): (accumulate_part, binop, (self.name, 0), initial, True),\n (b, 0): (first, (a, 0)),\n (c, 0): (second, (a, 0)),\n }\n for i in range(1, self.npartitions):\n dsk[(a, i)] = (accumulate_part, binop, (self.name, i), (c, i - 1))\n dsk[(b, i)] = (first, (a, i))\n dsk[(c, i)] = (second, (a, i))\n graph = HighLevelGraph.from_collections(b, dsk, dependencies=[self])\n return Bag(graph, b, self.npartitions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_accumulate_part_collect.return.list_d_items_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_accumulate_part_collect.return.list_d_items_", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1643, "end_line": 1667, "span_ids": ["collect", "accumulate_part", "partition"], "tokens": 234}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def accumulate_part(binop, seq, initial, is_first=False):\n if initial == no_default:\n res = list(accumulate(binop, seq))\n else:\n res = list(accumulate(binop, seq, initial=initial))\n if is_first:\n return res, res[-1] if res else [], initial\n return res[1:], res[-1]\n\n\ndef partition(grouper, sequence, npartitions, p, nelements=2 ** 20):\n \"\"\" Partition a bag along a grouper, store partitions on disk. \"\"\"\n for block in partition_all(nelements, sequence):\n d = groupby(grouper, block)\n d2 = defaultdict(list)\n for k, v in d.items():\n d2[abs(hash(k)) % npartitions].extend(v)\n p.append(d2, fsync=True)\n return p\n\n\ndef collect(grouper, group, p, barrier_token):\n \"\"\" Collect partitions from disk and yield k,v group pairs. \"\"\"\n d = groupby(grouper, p.get(group, lock=False))\n return list(d.items())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_from_sequence_from_sequence.return.Bag_d_name_len_d_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_from_sequence_from_sequence.return.Bag_d_name_len_d_", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1683, "end_line": 1727, "span_ids": ["from_sequence"], "tokens": 361}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def from_sequence(seq, partition_size=None, npartitions=None):\n \"\"\"Create a dask Bag from Python sequence.\n\n This sequence should be relatively small in memory. Dask Bag works\n best when it handles loading your data itself. Commonly we load a\n sequence of filenames into a Bag and then use ``.map`` to open them.\n\n Parameters\n ----------\n seq: Iterable\n A sequence of elements to put into the dask\n partition_size: int (optional)\n The length of each partition\n npartitions: int (optional)\n The number of desired partitions\n\n It is best to provide either ``partition_size`` or ``npartitions``\n (though not both.)\n\n Examples\n --------\n >>> import dask.bag as db\n >>> b = db.from_sequence(['Alice', 'Bob', 'Chuck'], partition_size=2)\n\n See Also\n --------\n read_text: Create bag from text files\n \"\"\"\n seq = list(seq)\n if npartitions and not partition_size:\n partition_size = int(math.ceil(len(seq) / npartitions))\n if npartitions is None and partition_size is None:\n if len(seq) < 100:\n partition_size = 1\n else:\n partition_size = int(len(seq) / 100)\n\n parts = list(partition_all(partition_size, seq))\n name = \"from_sequence-\" + tokenize(seq, partition_size)\n if len(parts) > 0:\n d = dict(((name, i), list(part)) for i, part in enumerate(parts))\n else:\n d = {(name, 0): []}\n\n return Bag(d, name, len(d))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_from_url_from_url.return.Bag_dsk_name_len_urls_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_from_url_from_url.return.Bag_dsk_name_len_urls_", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1716, "end_line": 1745, "span_ids": ["from_url"], "tokens": 288}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def from_url(urls):\n \"\"\"Create a dask Bag from a url.\n\n Examples\n --------\n >>> a = from_url('http://raw.githubusercontent.com/dask/dask/master/README.rst') # doctest: +SKIP\n >>> a.npartitions # doctest: +SKIP\n 1\n\n >>> a.take(8) # doctest: +SKIP\n (b'Dask\\\\n',\n b'====\\\\n',\n b'\\\\n',\n b'|Build Status| |Coverage| |Doc Status| |Gitter| |Version Status|\\\\n',\n b'\\\\n',\n b'Dask is a flexible parallel computing library for analytics. See\\\\n',\n b'documentation_ for more information.\\\\n',\n b'\\\\n')\n\n >>> b = from_url(['http://github.com', 'http://google.com']) # doctest: +SKIP\n >>> b.npartitions # doctest: +SKIP\n 2\n \"\"\"\n if isinstance(urls, str):\n urls = [urls]\n name = \"from_url-\" + uuid.uuid4().hex\n dsk = {}\n for i, u in enumerate(urls):\n dsk[(name, i)] = (list, (urlopen, u))\n return Bag(dsk, name, len(urls))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_dictitems_reify.return.seq": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_dictitems_reify.return.seq", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1748, "end_line": 1780, "span_ids": ["dictitems", "concat", "reify"], "tokens": 252}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def dictitems(d):\n \"\"\"A pickleable version of dict.items\n\n >>> dictitems({'x': 1})\n [('x', 1)]\n \"\"\"\n return list(d.items())\n\n\ndef concat(bags):\n \"\"\"Concatenate many bags together, unioning all elements.\n\n >>> import dask.bag as db\n >>> a = db.from_sequence([1, 2, 3])\n >>> b = db.from_sequence([4, 5, 6])\n >>> c = db.concat([a, b])\n\n >>> list(c)\n [1, 2, 3, 4, 5, 6]\n \"\"\"\n name = \"concat-\" + tokenize(*bags)\n counter = itertools.count(0)\n dsk = {(name, next(counter)): key for bag in bags for key in bag.__dask_keys__()}\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=bags)\n return Bag(graph, name, len(dsk))\n\n\ndef reify(seq):\n if isinstance(seq, Iterator):\n seq = list(seq)\n if len(seq) and isinstance(seq[0], Iterator):\n seq = list(map(list, seq))\n return seq", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_from_delayed_from_delayed.return.Bag_graph_name_len_valu": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_from_delayed_from_delayed.return.Bag_graph_name_len_valu", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1783, "end_line": 1823, "span_ids": ["from_delayed"], "tokens": 302}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def from_delayed(values):\n \"\"\"Create bag from many dask Delayed objects.\n\n These objects will become the partitions of the resulting Bag. They should\n evaluate to a ``list`` or some other concrete sequence.\n\n Parameters\n ----------\n values: list of delayed values\n An iterable of dask Delayed objects. Each evaluating to a list.\n\n Returns\n -------\n Bag\n\n Examples\n --------\n >>> x, y, z = [delayed(load_sequence_from_file)(fn)\n ... for fn in filenames] # doctest: +SKIP\n >>> b = from_delayed([x, y, z]) # doctest: +SKIP\n\n See also\n --------\n dask.delayed\n \"\"\"\n from dask.delayed import Delayed, delayed\n\n if isinstance(values, Delayed):\n values = [values]\n values = [\n delayed(v) if not isinstance(v, Delayed) and hasattr(v, \"key\") else v\n for v in values\n ]\n\n name = \"bag-from-delayed-\" + tokenize(*values)\n names = [(name, i) for i in range(len(values))]\n values2 = [(reify, v.key) for v in values]\n dsk = dict(zip(names, values2))\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=values)\n return Bag(graph, name, len(values))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_chunk_distinct_merge_frequencies.return.out": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_chunk_distinct_merge_frequencies.return.out", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1826, "end_line": 1849, "span_ids": ["chunk_distinct", "merge_frequencies", "merge_distinct"], "tokens": 156}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def chunk_distinct(seq, key=None):\n if key is not None and not callable(key):\n key = partial(chunk.getitem, key=key)\n return list(unique(seq, key=key))\n\n\ndef merge_distinct(seqs, key=None):\n return chunk_distinct(toolz.concat(seqs), key=key)\n\n\ndef merge_frequencies(seqs):\n if isinstance(seqs, Iterable):\n seqs = list(seqs)\n if not seqs:\n return {}\n first, rest = seqs[0], seqs[1:]\n if not rest:\n return first\n out = defaultdict(int)\n out.update(first)\n for d in rest:\n for k, v in d.items():\n out[k] += v\n return out", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_bag_range_bag_range.return.Bag_dsk_name_npartition": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_bag_range_bag_range.return.Bag_dsk_name_npartition", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1852, "end_line": 1872, "span_ids": ["bag_range"], "tokens": 209}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def bag_range(n, npartitions):\n \"\"\"Numbers from zero to n\n\n Examples\n --------\n\n >>> import dask.bag as db\n >>> b = db.range(5, npartitions=2)\n >>> list(b)\n [0, 1, 2, 3, 4]\n \"\"\"\n size = n // npartitions\n name = \"range-%d-npartitions-%d\" % (n, npartitions)\n ijs = list(enumerate(take(npartitions, range(0, n, size))))\n dsk = dict(((name, i), (reify, (range, j, min(j + size, n)))) for i, j in ijs)\n\n if n % npartitions != 0:\n i, j = ijs[-1]\n dsk[(name, i)] = (reify, (range, j, n))\n\n return Bag(dsk, name, npartitions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_bag_zip_bag_zip.return.Bag_graph_name_npartiti": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_bag_zip_bag_zip.return.Bag_graph_name_npartiti", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1875, "end_line": 1922, "span_ids": ["bag_zip"], "tokens": 572}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def bag_zip(*bags):\n \"\"\"Partition-wise bag zip\n\n All passed bags must have the same number of partitions.\n\n NOTE: corresponding partitions should have the same length; if they do not,\n the \"extra\" elements from the longer partition(s) will be dropped. If you\n have this case chances are that what you really need is a data alignment\n mechanism like pandas's, and not a missing value filler like zip_longest.\n\n Examples\n --------\n\n Correct usage:\n\n >>> import dask.bag as db\n >>> evens = db.from_sequence(range(0, 10, 2), partition_size=4)\n >>> odds = db.from_sequence(range(1, 10, 2), partition_size=4)\n >>> pairs = db.zip(evens, odds)\n >>> list(pairs)\n [(0, 1), (2, 3), (4, 5), (6, 7), (8, 9)]\n\n Incorrect usage:\n\n >>> numbers = db.range(20) # doctest: +SKIP\n >>> fizz = numbers.filter(lambda n: n % 3 == 0) # doctest: +SKIP\n >>> buzz = numbers.filter(lambda n: n % 5 == 0) # doctest: +SKIP\n >>> fizzbuzz = db.zip(fizz, buzz) # doctest: +SKIP\n >>> list(fizzbuzzz) # doctest: +SKIP\n [(0, 0), (3, 5), (6, 10), (9, 15), (12, 20), (15, 25), (18, 30)]\n\n When what you really wanted was more along the lines of the following:\n\n >>> list(fizzbuzzz) # doctest: +SKIP\n [(0, 0), (3, None), (None, 5), (6, None), (None 10), (9, None),\n (12, None), (15, 15), (18, None), (None, 20), (None, 25), (None, 30)]\n \"\"\"\n npartitions = bags[0].npartitions\n assert all(bag.npartitions == npartitions for bag in bags)\n # TODO: do more checks\n\n name = \"zip-\" + tokenize(*bags)\n dsk = dict(\n ((name, i), (reify, (zip,) + tuple((bag.name, i) for bag in bags)))\n for i in range(npartitions)\n )\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=bags)\n return Bag(graph, name, npartitions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_map_chunk_map_chunk.return._MapChunk_f_iters_kwarg": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_map_chunk_map_chunk.return._MapChunk_f_iters_kwarg", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1925, "end_line": 1943, "span_ids": ["map_chunk"], "tokens": 178}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def map_chunk(f, iters, iter_kwarg_keys=None, kwargs=None):\n \"\"\"Map ``f`` across one or more iterables, maybe with keyword arguments.\n\n Low-level function used in ``bag_map``, not user facing.\n\n Arguments\n ---------\n f : callable\n iters : List[Iterable]\n iter_kwarg_keys : List[str] or None\n Keyword names to use for pair with the tail end of ``iters``, allowing\n keyword arguments to be passed in from iterators.\n kwargs : dict or None\n Additional constant keyword arguments to use on every call to ``f``.\n \"\"\"\n if kwargs:\n f = partial(f, **kwargs)\n iters = [iter(a) for a in iters]\n return _MapChunk(f, iters, kwarg_keys=iter_kwarg_keys)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py__MapChunk__MapChunk.check_all_iterators_consumed.if_len_self_iters_1_.for_i_in_self_iters_.try_.else_.raise_ValueError_msg_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py__MapChunk__MapChunk.check_all_iterators_consumed.if_len_self_iters_1_.for_i_in_self_iters_.try_.else_.raise_ValueError_msg_", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1946, "end_line": 1981, "span_ids": ["_MapChunk.__next__", "_MapChunk.__init__", "_MapChunk.check_all_iterators_consumed", "_MapChunk"], "tokens": 260}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _MapChunk(Iterator):\n def __init__(self, f, iters, kwarg_keys=None):\n self.f = f\n self.iters = iters\n self.kwarg_keys = kwarg_keys or ()\n self.nkws = len(self.kwarg_keys)\n\n def __next__(self):\n try:\n vals = [next(i) for i in self.iters]\n except StopIteration:\n self.check_all_iterators_consumed()\n raise\n\n if self.nkws:\n args = vals[: -self.nkws]\n kwargs = dict(zip(self.kwarg_keys, vals[-self.nkws :]))\n return self.f(*args, **kwargs)\n return self.f(*vals)\n\n def check_all_iterators_consumed(self):\n if len(self.iters) > 1:\n for i in self.iters:\n if isinstance(i, itertools.repeat):\n continue\n try:\n next(i)\n except StopIteration:\n pass\n else:\n msg = (\n \"map called with multiple bags that aren't identically \"\n \"partitioned. Please ensure that all bag arguments \"\n \"have the same partition lengths\"\n )\n raise ValueError(msg)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_starmap_chunk_unpack_scalar_dask_kwargs.return.kwargs2_dependencies": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_starmap_chunk_unpack_scalar_dask_kwargs.return.kwargs2_dependencies", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1984, "end_line": 2008, "span_ids": ["starmap_chunk", "unpack_scalar_dask_kwargs"], "tokens": 182}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def starmap_chunk(f, x, kwargs):\n if kwargs:\n f = partial(f, **kwargs)\n return itertools.starmap(f, x)\n\n\ndef unpack_scalar_dask_kwargs(kwargs):\n \"\"\"Extracts dask values from kwargs.\n\n Currently only ``dask.bag.Item`` and ``dask.delayed.Delayed`` are\n supported. Returns a merged dask graph and a task resulting in a keyword\n dict.\n \"\"\"\n kwargs2 = {}\n dependencies = []\n for k, v in kwargs.items():\n vv, collections = unpack_collections(v)\n if not collections:\n kwargs2[k] = v\n else:\n kwargs2[k] = vv\n dependencies.extend(collections)\n if dependencies:\n kwargs2 = (dict, (zip, list(kwargs2), list(kwargs2.values())))\n return kwargs2, dependencies", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_bag_map_bag_map.npartitions_8.npartitions_pop_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_bag_map_bag_map.npartitions_8.npartitions_pop_", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2011, "end_line": 2101, "span_ids": ["bag_map"], "tokens": 751}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def bag_map(func, *args, **kwargs):\n \"\"\"Apply a function elementwise across one or more bags.\n\n Note that all ``Bag`` arguments must be partitioned identically.\n\n Parameters\n ----------\n func : callable\n *args, **kwargs : Bag, Item, Delayed, or object\n Arguments and keyword arguments to pass to ``func``. Non-Bag args/kwargs\n are broadcasted across all calls to ``func``.\n\n Notes\n -----\n For calls with multiple `Bag` arguments, corresponding partitions should\n have the same length; if they do not, the call will error at compute time.\n\n Examples\n --------\n >>> import dask.bag as db\n >>> b = db.from_sequence(range(5), npartitions=2)\n >>> b2 = db.from_sequence(range(5, 10), npartitions=2)\n\n Apply a function to all elements in a bag:\n\n >>> db.map(lambda x: x + 1, b).compute()\n [1, 2, 3, 4, 5]\n\n Apply a function with arguments from multiple bags:\n\n >>> from operator import add\n >>> db.map(add, b, b2).compute()\n [5, 7, 9, 11, 13]\n\n Non-bag arguments are broadcast across all calls to the mapped function:\n\n >>> db.map(add, b, 1).compute()\n [1, 2, 3, 4, 5]\n\n Keyword arguments are also supported, and have the same semantics as\n regular arguments:\n\n >>> def myadd(x, y=0):\n ... return x + y\n >>> db.map(myadd, b, y=b2).compute()\n [5, 7, 9, 11, 13]\n >>> db.map(myadd, b, y=1).compute()\n [1, 2, 3, 4, 5]\n\n Both arguments and keyword arguments can also be instances of\n ``dask.bag.Item`` or ``dask.delayed.Delayed``. Here we'll add the max value\n in the bag to each element:\n\n >>> db.map(myadd, b, b.max()).compute()\n [4, 5, 6, 7, 8]\n \"\"\"\n name = \"%s-%s\" % (funcname(func), tokenize(func, \"map\", *args, **kwargs))\n dsk = {}\n dependencies = []\n\n bags = []\n args2 = []\n for a in args:\n if isinstance(a, Bag):\n bags.append(a)\n args2.append(a)\n elif isinstance(a, (Item, Delayed)):\n dependencies.append(a)\n args2.append((itertools.repeat, a.key))\n else:\n args2.append((itertools.repeat, a))\n\n bag_kwargs = {}\n other_kwargs = {}\n for k, v in kwargs.items():\n if isinstance(v, Bag):\n bag_kwargs[k] = v\n bags.append(v)\n else:\n other_kwargs[k] = v\n\n other_kwargs, collections = unpack_scalar_dask_kwargs(other_kwargs)\n dependencies.extend(collections)\n\n if not bags:\n raise ValueError(\"At least one argument must be a Bag.\")\n\n npartitions = {b.npartitions for b in bags}\n if len(npartitions) > 1:\n raise ValueError(\"All bags must have the same number of partitions.\")\n npartitions = npartitions.pop()\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_bag_map.build_iters_bag_map.return.return_type_graph_name_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_bag_map.build_iters_bag_map.return.return_type_graph_name_", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2103, "end_line": 2128, "span_ids": ["bag_map"], "tokens": 225}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def bag_map(func, *args, **kwargs):\n # ... other code\n\n def build_iters(n):\n args = [(a.name, n) if isinstance(a, Bag) else a for a in args2]\n if bag_kwargs:\n args.extend((b.name, n) for b in bag_kwargs.values())\n return args\n\n if bag_kwargs:\n iter_kwarg_keys = list(bag_kwargs)\n else:\n iter_kwarg_keys = None\n\n dsk = {\n (name, n): (\n reify,\n (map_chunk, func, build_iters(n), iter_kwarg_keys, other_kwargs),\n )\n for n in range(npartitions)\n }\n\n # If all bags are the same type, use that type, otherwise fallback to Bag\n return_type = set(map(type, bags))\n return_type = return_type.pop() if len(return_type) == 1 else Bag\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=bags + dependencies)\n\n return return_type(graph, name, npartitions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_map_partitions_map_partitions.return.return_type_graph_name_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_map_partitions_map_partitions.return.return_type_graph_name_", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2131, "end_line": 2232, "span_ids": ["map_partitions"], "tokens": 764}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def map_partitions(func, *args, **kwargs):\n \"\"\"Apply a function to every partition across one or more bags.\n\n Note that all ``Bag`` arguments must be partitioned identically.\n\n Parameters\n ----------\n func : callable\n *args, **kwargs : Bag, Item, Delayed, or object\n Arguments and keyword arguments to pass to ``func``.\n\n Examples\n --------\n >>> import dask.bag as db\n >>> b = db.from_sequence(range(1, 101), npartitions=10)\n >>> def div(nums, den=1):\n ... return [num / den for num in nums]\n\n Using a python object:\n\n >>> hi = b.max().compute()\n >>> hi\n 100\n >>> b.map_partitions(div, den=hi).take(5)\n (0.01, 0.02, 0.03, 0.04, 0.05)\n\n Using an ``Item``:\n\n >>> b.map_partitions(div, den=b.max()).take(5)\n (0.01, 0.02, 0.03, 0.04, 0.05)\n\n Note that while both versions give the same output, the second forms a\n single graph, and then computes everything at once, and in some cases\n may be more efficient.\n \"\"\"\n name = \"%s-%s\" % (funcname(func), tokenize(func, \"map-partitions\", *args, **kwargs))\n dsk = {}\n dependencies = []\n\n bags = []\n args2 = []\n for a in args:\n if isinstance(a, Bag):\n bags.append(a)\n args2.append(a)\n elif isinstance(a, (Item, Delayed)):\n args2.append(a.key)\n dependencies.append(a)\n else:\n args2.append(a)\n\n bag_kwargs = {}\n other_kwargs = {}\n for k, v in kwargs.items():\n if isinstance(v, Bag):\n bag_kwargs[k] = v\n bags.append(v)\n else:\n other_kwargs[k] = v\n\n other_kwargs, collections = unpack_scalar_dask_kwargs(other_kwargs)\n dependencies.extend(collections)\n\n if not bags:\n raise ValueError(\"At least one argument must be a Bag.\")\n\n npartitions = {b.npartitions for b in bags}\n if len(npartitions) > 1:\n raise ValueError(\"All bags must have the same number of partitions.\")\n npartitions = npartitions.pop()\n\n def build_args(n):\n return [(a.name, n) if isinstance(a, Bag) else a for a in args2]\n\n def build_bag_kwargs(n):\n if not bag_kwargs:\n return {}\n return (\n dict,\n (zip, list(bag_kwargs), [(b.name, n) for b in bag_kwargs.values()]),\n )\n\n if kwargs:\n dsk = {\n (name, n): (\n apply,\n func,\n build_args(n),\n (merge, build_bag_kwargs(n), other_kwargs),\n )\n for n in range(npartitions)\n }\n else:\n dsk = {(name, n): (func,) + tuple(build_args(n)) for n in range(npartitions)}\n\n # If all bags are the same type, use that type, otherwise fallback to Bag\n return_type = set(map(type, bags))\n return_type = return_type.pop() if len(return_type) == 1 else Bag\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=bags + dependencies)\n\n return return_type(graph, name, npartitions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py__reduce_groupby_tasks.return.type_b_graph_name_len_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py__reduce_groupby_tasks.return.type_b_graph_name_len_", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2235, "end_line": 2342, "span_ids": ["_reduce", "make_group", "groupby_tasks"], "tokens": 666}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _reduce(binop, sequence, initial=no_default):\n if initial is not no_default:\n return reduce(binop, sequence, initial)\n else:\n return reduce(binop, sequence)\n\n\ndef make_group(k, stage):\n def h(x):\n return x[0] // k ** stage % k\n\n return h\n\n\ndef groupby_tasks(b, grouper, hash=hash, max_branch=32):\n max_branch = max_branch or 32\n n = b.npartitions\n\n stages = int(math.ceil(math.log(n) / math.log(max_branch))) or 1\n if stages > 1:\n k = int(math.ceil(n ** (1 / stages)))\n else:\n k = n\n\n groups = []\n splits = []\n joins = []\n\n inputs = [tuple(digit(i, j, k) for j in range(stages)) for i in range(k ** stages)]\n\n b2 = b.map(partial(chunk.groupby_tasks_group_hash, hash=hash, grouper=grouper))\n\n token = tokenize(b, grouper, hash, max_branch)\n\n shuffle_join_name = \"shuffle-join-\" + token\n shuffle_group_name = \"shuffle-group-\" + token\n shuffle_split_name = \"shuffle-split-\" + token\n\n start = {}\n\n for idx, inp in enumerate(inputs):\n group = {}\n split = {}\n if idx < b.npartitions:\n start[(shuffle_join_name, 0, inp)] = (b2.name, idx)\n else:\n start[(shuffle_join_name, 0, inp)] = []\n\n for stage in range(1, stages + 1):\n _key_tuple = (shuffle_group_name, stage, inp)\n group[_key_tuple] = (\n groupby,\n (make_group, k, stage - 1),\n (shuffle_join_name, stage - 1, inp),\n )\n\n for i in range(k):\n split[(shuffle_split_name, stage, i, inp)] = (\n dict.get,\n _key_tuple,\n i,\n {},\n )\n\n groups.append(group)\n splits.append(split)\n\n for stage in range(1, stages + 1):\n join = dict(\n (\n (shuffle_join_name, stage, inp),\n (\n list,\n (\n toolz.concat,\n [\n (\n shuffle_split_name,\n stage,\n inp[stage - 1],\n insert(inp, stage - 1, j),\n )\n for j in range(k)\n ],\n ),\n ),\n )\n for inp in inputs\n )\n\n joins.append(join)\n\n name = \"shuffle-\" + token\n\n end = dict(\n (\n (name, i),\n (list, (dict.items, (groupby, grouper, (pluck, 1, j)))),\n )\n for i, j in enumerate(join)\n )\n\n groups.extend(splits)\n groups.extend(joins)\n\n dsk = merge(start, end, *(groups))\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[b2])\n return type(b)(graph, name, len(inputs))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_groupby_disk_groupby_disk.return.type_b_graph_name_npar": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_groupby_disk_groupby_disk.return.type_b_graph_name_npar", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2345, "end_line": 2383, "span_ids": ["groupby_disk"], "tokens": 374}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def groupby_disk(b, grouper, npartitions=None, blocksize=2 ** 20):\n if npartitions is None:\n npartitions = b.npartitions\n token = tokenize(b, grouper, npartitions, blocksize)\n\n import partd\n\n p = (\"partd-\" + token,)\n dirname = config.get(\"temporary_directory\", None)\n if dirname:\n file = (apply, partd.File, (), {\"dir\": dirname})\n else:\n file = (partd.File,)\n try:\n dsk1 = {p: (partd.Python, (partd.Snappy, file))}\n except AttributeError:\n dsk1 = {p: (partd.Python, file)}\n\n # Partition data on disk\n name = \"groupby-part-{0}-{1}\".format(funcname(grouper), token)\n dsk2 = dict(\n ((name, i), (partition, grouper, (b.name, i), npartitions, p, blocksize))\n for i in range(b.npartitions)\n )\n\n # Barrier\n barrier_token = \"groupby-barrier-\" + token\n\n dsk3 = {barrier_token: (chunk.barrier,) + tuple(dsk2)}\n\n # Collect groups\n name = \"groupby-collect-\" + token\n dsk4 = dict(\n ((name, i), (collect, grouper, i, p, barrier_token)) for i in range(npartitions)\n )\n\n dsk = merge(dsk1, dsk2, dsk3, dsk4)\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[b])\n return type(b)(graph, name, npartitions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_empty_safe_apply_safe_take.return.r": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_empty_safe_apply_safe_take.return.r", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2386, "end_line": 2413, "span_ids": ["safe_take", "empty_safe_aggregate", "empty_safe_apply"], "tokens": 204}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def empty_safe_apply(func, part, is_last):\n if isinstance(part, Iterator):\n try:\n _, part = peek(part)\n except StopIteration:\n if not is_last:\n return no_result\n return func(part)\n elif not is_last and len(part) == 0:\n return no_result\n else:\n return func(part)\n\n\ndef empty_safe_aggregate(func, parts, is_last):\n parts2 = (p for p in parts if p is not no_result)\n return empty_safe_apply(func, parts2, is_last)\n\n\ndef safe_take(n, b, warn=True):\n r = list(take(n, b))\n if len(r) != n and warn:\n warnings.warn(\n \"Insufficient elements for `take`. {0} elements \"\n \"requested, only {1} elements available. Try passing \"\n \"larger `npartitions` to `take`.\".format(n, len(r))\n )\n return r", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_random_sample_random_sample.for_i_in_x_.if_random_state_random_.yield_i": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_random_sample_random_sample.for_i_in_x_.if_random_state_random_.yield_i", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2416, "end_line": 2432, "span_ids": ["random_sample"], "tokens": 116}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def random_sample(x, state_data, prob):\n \"\"\"Filter elements of `x` by a probability `prob`.\n\n Parameters\n ----------\n x : iterable\n state_data : tuple\n A tuple that can be passed to ``random.Random.setstate``.\n prob : float\n A float between 0 and 1, representing the probability that each\n element will be yielded.\n \"\"\"\n random_state = Random()\n random_state.setstate(state_data)\n for i in x:\n if random_state.random() < prob:\n yield i", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_random_state_data_python_random_state_data_python.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_random_state_data_python_random_state_data_python.return._", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2435, "end_line": 2457, "span_ids": ["random_state_data_python"], "tokens": 153}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def random_state_data_python(n, random_state=None):\n \"\"\"Return a list of tuples that can be passed to\n ``random.Random.setstate``.\n\n Parameters\n ----------\n n : int\n Number of tuples to return.\n random_state : int or ``random.Random``, optional\n If an int, is used to seed a new ``random.Random``.\n \"\"\"\n if not isinstance(random_state, Random):\n random_state = Random(random_state)\n\n maxuint32 = 1 << 32\n return [\n (\n 3,\n tuple(random_state.randint(0, maxuint32) for i in range(624)) + (624,),\n None,\n )\n for i in range(n)\n ]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_split_to_dataframe.return.res_astype_dtypes_copy_F": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_split_to_dataframe.return.res_astype_dtypes_copy_F", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2460, "end_line": 2483, "span_ids": ["split", "to_dataframe"], "tokens": 204}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def split(seq, n):\n \"\"\"Split apart a sequence into n equal pieces.\n\n >>> split(range(10), 3)\n [[0, 1, 2], [3, 4, 5], [6, 7, 8, 9]]\n \"\"\"\n if not isinstance(seq, (list, tuple)):\n seq = list(seq)\n\n part = len(seq) / n\n L = [seq[int(part * i) : int(part * (i + 1))] for i in range(n - 1)]\n L.append(seq[int(part * (n - 1)) :])\n return L\n\n\ndef to_dataframe(seq, columns, dtypes):\n import pandas as pd\n\n seq = reify(seq)\n # pd.DataFrame expects lists, only copy if necessary\n if not isinstance(seq, list):\n seq = list(seq)\n res = pd.DataFrame(seq, columns=list(columns))\n return res.astype(dtypes, copy=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_repartition_npartitions_total_mem_usage.return.sizeof_partition_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_repartition_npartitions_total_mem_usage.return.sizeof_partition_", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2486, "end_line": 2518, "span_ids": ["total_mem_usage", "repartition_npartitions"], "tokens": 305}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def repartition_npartitions(bag, npartitions):\n \"\"\"Changes the number of partitions of the bag.\n\n This can be used to reduce or increase the number of partitions\n of the bag.\n \"\"\"\n if npartitions == bag.npartitions:\n return bag\n\n new_name = \"repartition-%d-%s\" % (npartitions, tokenize(bag, npartitions))\n if bag.npartitions > npartitions:\n ratio = bag.npartitions / npartitions\n new_partitions_boundaries = [\n int(old_partition_index * ratio)\n for old_partition_index in range(npartitions + 1)\n ]\n return _repartition_from_boundaries(bag, new_partitions_boundaries, new_name)\n else: # npartitions > bag.npartitions\n div, mod = divmod(npartitions, bag.npartitions)\n nsplits = [div] * bag.npartitions\n nsplits[-1] += mod\n return _split_partitions(bag, nsplits, new_name)\n\n\ndef total_mem_usage(partition):\n from copy import deepcopy\n from itertools import chain\n\n # if repartition is called multiple times prior to calling compute(), the partitions\n # will be itertools.chain objects. Copy the object to avoid consuming the iterable.\n if isinstance(partition, chain):\n partition = reify(deepcopy(partition))\n return sizeof(partition)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_repartition_size_repartition_size.return._repartition_from_boundar": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_repartition_size_repartition_size.return._repartition_from_boundar", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2521, "end_line": 2546, "span_ids": ["repartition_size"], "tokens": 312}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def repartition_size(bag, size):\n \"\"\"\n Repartition bag so that new partitions have approximately `size` memory usage each\n \"\"\"\n if isinstance(size, str):\n size = parse_bytes(size)\n size = int(size)\n mem_usages = bag.map_partitions(total_mem_usage).compute()\n\n # 1. split each partition that is larger than partition size\n nsplits = [1 + mem_usage // size for mem_usage in mem_usages]\n if any((nsplit > 1 for nsplit in nsplits)):\n split_name = \"repartition-split-{}\".format(tokenize(bag, size))\n bag = _split_partitions(bag, nsplits, split_name)\n # update mem_usages to account for the split partitions\n split_mem_usages = []\n for n, usage in zip(nsplits, mem_usages):\n split_mem_usages.extend([usage / n] * n)\n mem_usages = split_mem_usages\n\n # 2. now that all partitions are less than size, concat them up to size\n assert all((mem_usage <= size for mem_usage in mem_usages))\n new_npartitions = list(map(len, iter_chunks(mem_usages, size)))\n new_partitions_boundaries = accumulate(operator.add, new_npartitions)\n new_name = \"repartition-{}\".format(tokenize(bag, size))\n return _repartition_from_boundaries(bag, new_partitions_boundaries, new_name)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py__split_partitions__split_partitions.return.Bag_graph_name_new_name_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py__split_partitions__split_partitions.return.Bag_graph_name_new_name_", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2549, "end_line": 2581, "span_ids": ["_split_partitions"], "tokens": 285}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _split_partitions(bag, nsplits, new_name):\n \"\"\"Split a Dask bag into new partitions\n\n Parameters\n ----------\n bag: Dask bag\n nsplits: List[int]\n Number of target bags for each partition\n The length of nsplits should be the same as bag.npartitions\n new_name: str\n\n See Also\n --------\n repartition_npartitions\n repartition_size\n \"\"\"\n if len(nsplits) != bag.npartitions:\n raise ValueError(\"nsplits should have len={}\".format(bag.npartitions))\n dsk = {}\n split_name = \"split-{}\".format(tokenize(bag, nsplits))\n j = 0\n for i, k in enumerate(nsplits):\n if k == 1:\n dsk[new_name, j] = (bag.name, i)\n j += 1\n else:\n dsk[split_name, i] = (split, (bag.name, i), k)\n for jj in range(k):\n dsk[new_name, j] = (operator.getitem, (split_name, i), jj)\n j += 1\n\n graph = HighLevelGraph.from_collections(new_name, dsk, dependencies=[bag])\n return Bag(graph, name=new_name, npartitions=sum(nsplits))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py__repartition_from_boundaries_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py__repartition_from_boundaries_", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2584, "end_line": 2610, "span_ids": ["_repartition_from_boundaries"], "tokens": 233}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _repartition_from_boundaries(bag, new_partitions_boundaries, new_name):\n if not isinstance(new_partitions_boundaries, list):\n new_partitions_boundaries = list(new_partitions_boundaries)\n if new_partitions_boundaries[0] > 0:\n new_partitions_boundaries.insert(0, 0)\n if new_partitions_boundaries[-1] < bag.npartitions:\n new_partitions_boundaries.append(bag.npartitions)\n num_new_partitions = len(new_partitions_boundaries) - 1\n dsk = {}\n for new_partition_index in range(num_new_partitions):\n value = (\n list,\n (\n toolz.concat,\n [\n (bag.name, old_partition_index)\n for old_partition_index in range(\n new_partitions_boundaries[new_partition_index],\n new_partitions_boundaries[new_partition_index + 1],\n )\n ],\n ),\n )\n dsk[new_name, new_partition_index] = value\n graph = HighLevelGraph.from_collections(new_name, dsk, dependencies=[bag])\n return Bag(graph, name=new_name, npartitions=num_new_partitions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/random.py_heapq_sample.return._sample_population_popula": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/random.py_heapq_sample.return._sample_population_popula", "embedding": null, "metadata": {"file_path": "dask/bag/random.py", "file_name": "random.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 30, "span_ids": ["imports", "sample"], "tokens": 172}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import heapq\nimport math\n\nimport random as rnd\nfrom functools import partial\n\n\ndef sample(population, k):\n \"\"\"Chooses k unique random elements from a bag.\n\n Returns a new bag containing elements from the population while\n leaving the original population unchanged.\n\n Parameters\n ----------\n population: Bag\n Elements to sample.\n k: integer, optional\n Number of elements to sample.\n\n Examples\n --------\n >>> import dask.bag as db # doctest: +SKIP\n ... from dask.bag import random\n ...\n ... b = db.from_sequence(range(5), npartitions=2)\n ... list(random.sample(b, 3).compute())\n [1, 3, 5]\n \"\"\"\n return _sample(population=population, k=k, replace=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/random.py_choices__sample.return.population_reduction_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/random.py_choices__sample.return.population_reduction_", "embedding": null, "metadata": {"file_path": "dask/bag/random.py", "file_name": "random.py", "file_type": "text/x-python", "category": "implementation", "start_line": 33, "end_line": 60, "span_ids": ["_sample", "choices"], "tokens": 187}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def choices(population, k=1):\n \"\"\"\n Return a k sized list of elements chosen with replacement.\n\n Parameters\n ----------\n population: Bag\n Elements to sample.\n k: integer, optional\n Number of elements to sample.\n\n Examples\n --------\n >>> import dask.bag as db # doctest: +SKIP\n ... from dask.bag import random\n ...\n ... b = db.from_sequence(range(5), npartitions=2)\n ... list(random.choices(b, 3).compute())\n [1, 1, 5]\n \"\"\"\n return _sample(population=population, k=k, replace=True)\n\n\ndef _sample(population, k, replace=False):\n return population.reduction(\n partial(_sample_map_partitions, k=k, replace=replace),\n partial(_sample_reduce, k=k, replace=replace),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/random.py__sample_map_partitions__sample_map_partitions.return.sampled_lx": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/random.py__sample_map_partitions__sample_map_partitions.return.sampled_lx", "embedding": null, "metadata": {"file_path": "dask/bag/random.py", "file_name": "random.py", "file_type": "text/x-python", "category": "implementation", "start_line": 63, "end_line": 87, "span_ids": ["_sample_map_partitions"], "tokens": 174}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _sample_map_partitions(population, k, replace):\n \"\"\"\n Map function used on the sample and choices functions.\n Parameters\n ----------\n population : list\n List of elements to sample.\n k : int, optional\n Number of elements to sample. Default is 1.\n\n Returns\n -------\n sample: list\n List of sampled elements from the partition.\n lx: int\n Number of elements on the partition.\n k: int\n Number of elements to sample.\n \"\"\"\n lx = len(population)\n real_k = k if k <= lx else lx\n sample_func = rnd.choices if replace else rnd.sample\n # because otherwise it raises IndexError:\n sampled = [] if real_k == 0 else sample_func(population=population, k=real_k)\n return sampled, lx", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/random.py__sample_reduce_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/random.py__sample_reduce_", "embedding": null, "metadata": {"file_path": "dask/bag/random.py", "file_name": "random.py", "file_type": "text/x-python", "category": "implementation", "start_line": 90, "end_line": 133, "span_ids": ["_sample_reduce", "_weighted_sampling_without_replacement"], "tokens": 334}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _sample_reduce(reduce_iter, k, replace):\n \"\"\"\n Reduce function used on the sample and choice functions.\n\n Parameters\n ----------\n reduce_iter : iterable\n Each element is a tuple coming generated by the _sample_map_partitions function.\n\n Returns a sequence of uniformly distributed samples;\n \"\"\"\n ns_ks = []\n s = []\n n = 0\n # unfolding reduce outputs\n for i in reduce_iter:\n (s_i, n_i) = i\n s.extend(s_i)\n n += n_i\n k_i = len(s_i)\n ns_ks.append((n_i, k_i))\n\n if k < 0 or (k > n and not replace):\n raise ValueError(\"Sample larger than population or is negative\")\n\n # creating the probability array\n p = []\n for n_i, k_i in ns_ks:\n if k_i > 0:\n p_i = n_i / (k_i * n)\n p += [p_i] * k_i\n\n sample_func = rnd.choices if replace else _weighted_sampling_without_replacement\n return sample_func(population=s, weights=p, k=k)\n\n\ndef _weighted_sampling_without_replacement(population, weights, k):\n \"\"\"\n Source:\n Weighted random sampling with a reservoir, Pavlos S. Efraimidis, Paul G. Spirakis\n \"\"\"\n elt = [(math.log(rnd.random()) / weights[i], i) for i in range(len(weights))]\n return [population[x[1]] for x in heapq.nlargest(k, elt)]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_avro.py_os_test_onefile_oneblock.assert_b_compute_exp": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_avro.py_os_test_onefile_oneblock.assert_b_compute_exp", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_avro.py", "file_name": "test_avro.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 31, "span_ids": ["imports", "test_onefile_oneblock"], "tokens": 229}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import os\nimport pytest\nimport random\nimport dask.bag as db\n\nfastavro = pytest.importorskip(\"fastavro\")\n\nexpected = [\n {\n \"name\": random.choice([\"fred\", \"wilma\", \"barney\", \"betty\"]),\n \"number\": random.randint(0, 100),\n }\n for _ in range(1000)\n]\nschema = {\n \"doc\": \"Descr\",\n \"name\": \"Random\",\n \"namespace\": \"test\",\n \"type\": \"record\",\n \"fields\": [{\"name\": \"name\", \"type\": \"string\"}, {\"name\": \"number\", \"type\": \"int\"}],\n}\n\n\ndef test_onefile_oneblock(tmpdir):\n tmpdir = str(tmpdir)\n fn = os.path.join(tmpdir, \"one.avro\")\n with open(fn, \"wb\") as f:\n fastavro.writer(f, records=expected, schema=schema)\n b = db.read_avro(fn, blocksize=None)\n assert b.npartitions == 1\n assert b.compute() == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_avro.py_test_twofile_oneblock_test_twofile_oneblock.assert_b_compute_exp": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_avro.py_test_twofile_oneblock_test_twofile_oneblock.assert_b_compute_exp", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_avro.py", "file_name": "test_avro.py", "file_type": "text/x-python", "category": "test", "start_line": 34, "end_line": 44, "span_ids": ["test_twofile_oneblock"], "tokens": 143}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_twofile_oneblock(tmpdir):\n tmpdir = str(tmpdir)\n fn1 = os.path.join(tmpdir, \"one.avro\")\n fn2 = os.path.join(tmpdir, \"two.avro\")\n with open(fn1, \"wb\") as f:\n fastavro.writer(f, records=expected[:500], schema=schema)\n with open(fn2, \"wb\") as f:\n fastavro.writer(f, records=expected[500:], schema=schema)\n b = db.read_avro(os.path.join(tmpdir, \"*.avro\"), blocksize=None)\n assert b.npartitions == 2\n assert b.compute() == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_avro.py_test_twofile_multiblock_test_twofile_multiblock.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_avro.py_test_twofile_multiblock_test_twofile_multiblock.None_3", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_avro.py", "file_name": "test_avro.py", "file_type": "text/x-python", "category": "test", "start_line": 47, "end_line": 61, "span_ids": ["test_twofile_multiblock"], "tokens": 195}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_twofile_multiblock(tmpdir):\n tmpdir = str(tmpdir)\n fn1 = os.path.join(tmpdir, \"one.avro\")\n fn2 = os.path.join(tmpdir, \"two.avro\")\n with open(fn1, \"wb\") as f:\n fastavro.writer(f, records=expected[:500], schema=schema, sync_interval=100)\n with open(fn2, \"wb\") as f:\n fastavro.writer(f, records=expected[500:], schema=schema, sync_interval=100)\n b = db.read_avro(os.path.join(tmpdir, \"*.avro\"), blocksize=None)\n assert b.npartitions == 2\n assert b.compute() == expected\n\n b = db.read_avro(os.path.join(tmpdir, \"*.avro\"), blocksize=1000)\n assert b.npartitions > 2\n assert b.compute() == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_avro.py_test_roundtrip_simple_test_roundtrip_simple.assert_b_compute_b2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_avro.py_test_roundtrip_simple_test_roundtrip_simple.assert_b_compute_b2_", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_avro.py", "file_name": "test_avro.py", "file_type": "text/x-python", "category": "test", "start_line": 64, "end_line": 80, "span_ids": ["test_roundtrip_simple"], "tokens": 178}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_roundtrip_simple(tmpdir):\n from dask.delayed import Delayed\n\n tmpdir = str(tmpdir)\n fn = os.path.join(tmpdir, \"out*.avro\")\n b = db.from_sequence([{\"a\": i} for i in [1, 2, 3, 4, 5]], npartitions=2)\n schema = {\n \"name\": \"Test\",\n \"type\": \"record\",\n \"fields\": [{\"name\": \"a\", \"type\": \"int\"}],\n }\n out = b.to_avro(fn, schema, compute=False)\n assert isinstance(out[0], Delayed)\n out = b.to_avro(fn, schema)\n assert len(out) == 2\n b2 = db.read_avro(fn)\n assert b.compute() == b2.compute()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_avro.py_test_roundtrip_test_roundtrip.assert_b_compute_b2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_avro.py_test_roundtrip_test_roundtrip.assert_b_compute_b2_", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_avro.py", "file_name": "test_avro.py", "file_type": "text/x-python", "category": "test", "start_line": 83, "end_line": 92, "span_ids": ["test_roundtrip"], "tokens": 115}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"codec\", [\"null\", \"deflate\", \"snappy\"])\ndef test_roundtrip(tmpdir, codec):\n tmpdir = str(tmpdir)\n if codec == \"snappy\":\n pytest.importorskip(\"snappy\")\n fn = os.path.join(tmpdir, \"out*.avro\")\n b = db.from_sequence(expected, npartitions=3)\n b.to_avro(fn, schema=schema, codec=codec)\n b2 = db.read_avro(fn)\n assert b.compute() == b2.compute()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_avro.py_test_invalid_schema_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_avro.py_test_invalid_schema_", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_avro.py", "file_name": "test_avro.py", "file_type": "text/x-python", "category": "test", "start_line": 95, "end_line": 117, "span_ids": ["test_invalid_schema"], "tokens": 266}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_invalid_schema(tmpdir):\n tmpdir = str(tmpdir)\n b = db.from_sequence(expected, npartitions=3)\n fn = os.path.join(tmpdir, \"out*.avro\")\n with pytest.raises(AssertionError):\n b.to_avro(fn, schema=[])\n with pytest.raises(AssertionError):\n b.to_avro(fn, schema={})\n with pytest.raises(AssertionError):\n b.to_avro(fn, schema={\"doc\": \"unknown\"})\n with pytest.raises(AssertionError):\n b.to_avro(fn, schema={\"name\": \"test\"})\n with pytest.raises(AssertionError):\n b.to_avro(fn, schema={\"name\": \"test\", \"type\": \"wrong\"})\n with pytest.raises(AssertionError):\n b.to_avro(fn, schema={\"name\": \"test\", \"type\": \"record\"})\n with pytest.raises(AssertionError):\n b.to_avro(fn, schema={\"name\": \"test\", \"type\": \"record\"})\n with pytest.raises(AssertionError):\n b.to_avro(\n fn, schema={\"name\": \"test\", \"type\": \"record\", \"fields\": [{\"name\": \"a\"}]}\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_bag_map_test_bag_map.None_3.db_map_myadd_b_b_unequa": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_bag_map_test_bag_map.None_3.db_map_myadd_b_b_unequa", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 64, "end_line": 113, "span_ids": ["test_bag_map"], "tokens": 618}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_bag_map():\n b = db.from_sequence(range(100), npartitions=10)\n b2 = db.from_sequence(range(100, 200), npartitions=10)\n x = b.compute()\n x2 = b2.compute()\n\n def myadd(a=1, b=2, c=3):\n return a + b + c\n\n assert_eq(db.map(myadd, b), list(map(myadd, x)))\n assert_eq(db.map(myadd, a=b), list(map(myadd, x)))\n assert_eq(db.map(myadd, b, b2), list(map(myadd, x, x2)))\n assert_eq(db.map(myadd, b, 10), [myadd(i, 10) for i in x])\n assert_eq(db.map(myadd, 10, b=b), [myadd(10, b=i) for i in x])\n\n sol = [myadd(i, b=j, c=100) for (i, j) in zip(x, x2)]\n assert_eq(db.map(myadd, b, b=b2, c=100), sol)\n\n sol = [myadd(i, c=100) for (i, j) in zip(x, x2)]\n assert_eq(db.map(myadd, b, c=100), sol)\n\n x_sum = sum(x)\n sol = [myadd(x_sum, b=i, c=100) for i in x2]\n assert_eq(db.map(myadd, b.sum(), b=b2, c=100), sol)\n\n sol = [myadd(i, b=x_sum, c=100) for i in x2]\n assert_eq(db.map(myadd, b2, b.sum(), c=100), sol)\n\n sol = [myadd(a=100, b=x_sum, c=i) for i in x2]\n assert_eq(db.map(myadd, a=100, b=b.sum(), c=b2), sol)\n\n a = dask.delayed(10)\n assert_eq(db.map(myadd, b, a), [myadd(i, 10) for i in x])\n assert_eq(db.map(myadd, b, b=a), [myadd(i, b=10) for i in x])\n\n # Mispatched npartitions\n fewer_parts = db.from_sequence(range(100), npartitions=5)\n with pytest.raises(ValueError):\n db.map(myadd, b, fewer_parts)\n\n # No bags\n with pytest.raises(ValueError):\n db.map(myadd, b.sum(), 1, 2)\n\n # Unequal partitioning\n unequal = db.from_sequence(range(110), npartitions=10)\n with pytest.raises(ValueError):\n db.map(myadd, b, unequal, c=b2).compute()\n with pytest.raises(ValueError):\n db.map(myadd, b, b=unequal, c=b2).compute()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_map_method_test_map_method.assert_b_map_myadd_b_sum": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_map_method_test_map_method.assert_b_map_myadd_b_sum", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 116, "end_line": 133, "span_ids": ["test_map_method"], "tokens": 254}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_map_method():\n b = db.from_sequence(range(100), npartitions=10)\n b2 = db.from_sequence(range(100, 200), npartitions=10)\n x = b.compute()\n x2 = b2.compute()\n\n def myadd(a, b=2, c=3):\n return a + b + c\n\n assert b.map(myadd).compute() == list(map(myadd, x))\n assert b.map(myadd, b2).compute() == list(map(myadd, x, x2))\n assert b.map(myadd, 10).compute() == [myadd(i, 10) for i in x]\n assert b.map(myadd, b=10).compute() == [myadd(i, b=10) for i in x]\n assert b.map(myadd, b2, c=10).compute() == [\n myadd(i, j, 10) for (i, j) in zip(x, x2)\n ]\n x_sum = sum(x)\n assert b.map(myadd, b.sum(), c=10).compute() == [myadd(i, x_sum, 10) for i in x]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_starmap_test_starmap.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_starmap_test_starmap.None_3", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 136, "end_line": 150, "span_ids": ["test_starmap"], "tokens": 214}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_starmap():\n data = [(1, 2), (3, 4), (5, 6), (7, 8), (9, 10)]\n b = db.from_sequence(data, npartitions=2)\n\n def myadd(a, b, c=0):\n return a + b + c\n\n assert b.starmap(myadd).compute() == [myadd(*a) for a in data]\n assert b.starmap(myadd, c=10).compute() == [myadd(*a, c=10) for a in data]\n max_second = b.pluck(1).max()\n assert b.starmap(myadd, c=max_second).compute() == [\n myadd(*a, c=max_second.compute()) for a in data\n ]\n c = dask.delayed(10)\n assert b.starmap(myadd, c=c).compute() == [myadd(*a, c=10) for a in data]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_filter_test_repr.assert_from_sequence_in": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_filter_test_repr.assert_from_sequence_in", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 153, "end_line": 183, "span_ids": ["test_repr", "test_iter", "test_filter", "test_remove"], "tokens": 226}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_filter():\n c = b.filter(iseven)\n expected = merge(\n dsk,\n dict(\n ((c.name, i), (reify, (filter, iseven, (b.name, i))))\n for i in range(b.npartitions)\n ),\n )\n assert c.dask == expected\n assert c.name == b.filter(iseven).name\n\n\ndef test_remove():\n f = lambda x: x % 2 == 0\n c = b.remove(f)\n assert list(c) == [1, 3] * 3\n assert c.name == b.remove(f).name\n\n\ndef test_iter():\n assert sorted(list(b)) == sorted(L)\n assert sorted(list(b.map(inc))) == sorted(list(range(1, 6)) * 3)\n\n\n@pytest.mark.parametrize(\"func\", [str, repr])\ndef test_repr(func):\n assert str(b.npartitions) in func(b)\n assert b.name[:5] in func(b)\n\n assert \"from_sequence\" in func(db.from_sequence(range(5)))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_pluck_test_pluck.assert_b_pluck_1_0_na": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_pluck_test_pluck.assert_b_pluck_1_0_na", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 186, "end_line": 192, "span_ids": ["test_pluck"], "tokens": 162}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_pluck():\n d = {(\"x\", 0): [(1, 10), (2, 20)], (\"x\", 1): [(3, 30), (4, 40)]}\n b = Bag(d, \"x\", 2)\n assert set(b.pluck(0)) == {1, 2, 3, 4}\n assert set(b.pluck(1)) == {10, 20, 30, 40}\n assert set(b.pluck([1, 0])) == {(10, 1), (20, 2), (30, 3), (40, 4)}\n assert b.pluck([1, 0]).name == b.pluck([1, 0]).name", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_pluck_with_default_test_unzip.assert_one_name_two_na": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_pluck_with_default_test_unzip.assert_one_name_two_na", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 195, "end_line": 209, "span_ids": ["test_pluck_with_default", "test_unzip"], "tokens": 194}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_pluck_with_default():\n b = db.from_sequence([\"Hello\", \"\", \"World\"])\n pytest.raises(IndexError, lambda: list(b.pluck(0)))\n assert list(b.pluck(0, None)) == [\"H\", None, \"W\"]\n assert b.pluck(0, None).name == b.pluck(0, None).name\n assert b.pluck(0).name != b.pluck(0, None).name\n\n\ndef test_unzip():\n b = db.from_sequence(range(100)).map(lambda x: (x, x + 1, x + 2))\n one, two, three = b.unzip(3)\n assert list(one) == list(range(100))\n assert list(three) == [i + 2 for i in range(100)]\n assert one.name == b.unzip(3)[0].name\n assert one.name != two.name", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_fold_test_fold.assert_set_e_fold_add_in": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_fold_test_fold.assert_set_e_fold_add_in", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 212, "end_line": 239, "span_ids": ["test_fold"], "tokens": 254}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fold():\n c = b.fold(add)\n assert c.compute() == sum(L)\n assert c.key == b.fold(add).key\n\n c2 = b.fold(add, initial=10)\n assert c2.key != c.key\n assert c2.compute() == sum(L) + 10 * b.npartitions\n assert c2.key == b.fold(add, initial=10).key\n\n c = db.from_sequence(range(5), npartitions=3)\n\n def binop(acc, x):\n acc = acc.copy()\n acc.add(x)\n return acc\n\n d = c.fold(binop, set.union, initial=set())\n assert d.compute() == set(c)\n assert d.key == c.fold(binop, set.union, initial=set()).key\n\n d = db.from_sequence(\"hello\")\n assert set(d.fold(lambda a, b: \"\".join([a, b]), initial=\"\").compute()) == set(\n \"hello\"\n )\n\n e = db.from_sequence([[1], [2], [3]], npartitions=2)\n assert set(e.fold(add, initial=[]).compute(scheduler=\"sync\")) == {1, 2, 3}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_fold_bag_test_distinct.assert_bag_filter_None_d": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_fold_bag_test_distinct.assert_bag_filter_None_d", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 242, "end_line": 258, "span_ids": ["test_fold_bag", "test_distinct"], "tokens": 161}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fold_bag():\n def binop(tot, x):\n tot.add(x)\n return tot\n\n c = b.fold(binop, combine=set.union, initial=set(), out_type=Bag)\n assert isinstance(c, Bag)\n assert_eq(c, list(set(range(5))))\n\n\ndef test_distinct():\n assert sorted(b.distinct()) == [0, 1, 2, 3, 4]\n assert b.distinct().name == b.distinct().name\n assert \"distinct\" in b.distinct().name\n assert b.distinct().count().compute() == 5\n bag = db.from_sequence([0] * 50, npartitions=50)\n assert bag.filter(None).distinct().compute() == []", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_distinct_with_key_test_distinct_with_key.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_distinct_with_key_test_distinct_with_key.None_1", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 261, "end_line": 266, "span_ids": ["test_distinct_with_key"], "tokens": 107}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_distinct_with_key():\n seq = [{\"a\": i} for i in [0, 1, 2, 1, 2, 3, 2, 3, 4, 5]]\n bag = db.from_sequence(seq, npartitions=3)\n expected = list(unique(seq, key=lambda x: x[\"a\"]))\n assert_eq(bag.distinct(key=\"a\"), expected)\n assert_eq(bag.distinct(key=lambda x: x[\"a\"]), expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_frequencies_test_frequencies.assert_eq_bag2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_frequencies_test_frequencies.assert_eq_bag2_", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 269, "end_line": 284, "span_ids": ["test_frequencies"], "tokens": 246}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_frequencies():\n c = b.frequencies()\n assert dict(c) == {0: 3, 1: 3, 2: 3, 3: 3, 4: 3}\n c2 = b.frequencies(split_every=2)\n assert dict(c2) == {0: 3, 1: 3, 2: 3, 3: 3, 4: 3}\n assert c.name == b.frequencies().name\n assert c.name != c2.name\n assert c2.name == b.frequencies(split_every=2).name\n # test bag with empty partitions\n b2 = db.from_sequence(range(20), partition_size=2)\n b2 = b2.filter(lambda x: x < 10)\n d = b2.frequencies()\n assert dict(d) == dict(zip(range(10), [1] * 10))\n bag = db.from_sequence([0, 0, 0, 0], npartitions=4)\n bag2 = bag.filter(None).frequencies(split_every=2)\n assert_eq(bag2, [])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_frequencies_sorted_test_topk.assert_b_topk_4_name_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_frequencies_sorted_test_topk.assert_b_topk_4_name_", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 287, "end_line": 299, "span_ids": ["test_topk", "test_frequencies_sorted"], "tokens": 189}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_frequencies_sorted():\n b = db.from_sequence([\"a\", \"b\", \"b\", \"b\", \"c\", \"c\"])\n assert list(b.frequencies(sort=True).compute()) == [(\"b\", 3), (\"c\", 2), (\"a\", 1)]\n\n\ndef test_topk():\n assert list(b.topk(4)) == [4, 4, 4, 3]\n c = b.topk(4, key=lambda x: -x)\n assert list(c) == [0, 0, 0, 1]\n c2 = b.topk(4, key=lambda x: -x, split_every=2)\n assert list(c2) == [0, 0, 0, 1]\n assert c.name != c2.name\n assert b.topk(4).name == b.topk(4).name", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_topk_with_non_callable_key_test_topk_with_non_callable_key.assert_b_topk_2_key_1_n": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_topk_with_non_callable_key_test_topk_with_non_callable_key.assert_b_topk_2_key_1_n", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 302, "end_line": 307, "span_ids": ["test_topk_with_non_callable_key"], "tokens": 134}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"npartitions\", [1, 2])\ndef test_topk_with_non_callable_key(npartitions):\n b = db.from_sequence([(1, 10), (2, 9), (3, 8)], npartitions=npartitions)\n assert list(b.topk(2, key=1)) == [(1, 10), (2, 9)]\n assert list(b.topk(2, key=0)) == [(3, 8), (2, 9)]\n assert b.topk(2, key=1).name == b.topk(2, key=1).name", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_topk_with_multiarg_lambda_test_reduction_names.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_topk_with_multiarg_lambda_test_reduction_names.None_3", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 310, "end_line": 336, "span_ids": ["test_topk_with_multiarg_lambda", "test_reductions", "test_reduction_names", "test_lambdas"], "tokens": 264}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_topk_with_multiarg_lambda():\n b = db.from_sequence([(1, 10), (2, 9), (3, 8)], npartitions=2)\n assert list(b.topk(2, key=lambda a, b: b)) == [(1, 10), (2, 9)]\n\n\ndef test_lambdas():\n assert list(b.map(lambda x: x + 1)) == list(b.map(inc))\n\n\ndef test_reductions():\n assert int(b.count()) == 15\n assert int(b.sum()) == 30\n assert int(b.max()) == 4\n assert int(b.min()) == 0\n assert b.any().compute() is True\n assert b.all().compute() is False\n assert b.all().key == b.all().key\n assert b.all().key != b.any().key\n\n\ndef test_reduction_names():\n assert b.sum().name.startswith(\"sum\")\n assert b.reduction(sum, sum).name.startswith(\"sum\")\n assert any(\n isinstance(k, str) and k.startswith(\"max\") for k in b.reduction(sum, max).dask\n )\n assert b.reduction(sum, sum, name=\"foo\").name.startswith(\"foo\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_tree_reductions_test_tree_reductions.assert_c_key_b_sum_k": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_tree_reductions_test_tree_reductions.assert_c_key_b_sum_k", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 339, "end_line": 357, "span_ids": ["test_tree_reductions"], "tokens": 161}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_tree_reductions():\n b = db.from_sequence(range(12))\n c = b.reduction(sum, sum, split_every=2)\n d = b.reduction(sum, sum, split_every=6)\n e = b.reduction(sum, sum, split_every=5)\n\n assert c.compute() == d.compute() == e.compute()\n\n assert len(c.dask) > len(d.dask)\n\n c = b.sum(split_every=2)\n d = b.sum(split_every=5)\n\n assert c.compute() == d.compute()\n assert len(c.dask) > len(d.dask)\n\n assert c.key != d.key\n assert c.key == b.sum(split_every=2).key\n assert c.key != b.sum().key", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_aggregation_test_var.assert_float_b_var_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_aggregation_test_var.assert_float_b_var_", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 360, "end_line": 386, "span_ids": ["test_non_splittable_reductions", "test_aggregation", "test_var", "test_std"], "tokens": 217}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"npartitions\", [1, 3, 4])\ndef test_aggregation(npartitions):\n L = list(range(15))\n b = db.range(15, npartitions=npartitions)\n assert_eq(b.mean(), sum(L) / len(L))\n assert_eq(b.sum(), sum(L))\n assert_eq(b.count(), len(L))\n\n\n@pytest.mark.parametrize(\"npartitions\", [1, 10])\ndef test_non_splittable_reductions(npartitions):\n np = pytest.importorskip(\"numpy\")\n data = list(range(100))\n c = db.from_sequence(data, npartitions=npartitions)\n\n assert_eq(c.mean(), np.mean(data))\n assert_eq(c.std(), np.std(data))\n\n\ndef test_std():\n assert_eq(b.std(), math.sqrt(2.0))\n assert float(b.std()) == math.sqrt(2.0)\n\n\ndef test_var():\n assert_eq(b.var(), 2.0)\n assert float(b.var()) == 2.0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_join_test_join.assert_c_name_b_join_o": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_join_test_join.assert_c_name_b_join_o", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 389, "end_line": 397, "span_ids": ["test_join"], "tokens": 144}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"transform\", [identity, dask.delayed, lambda x: db.from_sequence(x, npartitions=1)]\n)\ndef test_join(transform):\n other = transform([1, 2, 3])\n c = b.join(other, on_self=isodd, on_other=iseven)\n assert_eq(c, list(join(iseven, [1, 2, 3], isodd, list(b))))\n assert_eq(b.join(other, isodd), list(join(isodd, [1, 2, 3], isodd, list(b))))\n assert c.name == b.join(other, on_self=isodd, on_other=iseven).name", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_foldby_test_foldby.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_foldby_test_foldby.None_3", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 400, "end_line": 407, "span_ids": ["test_foldby"], "tokens": 156}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_foldby():\n c = b.foldby(iseven, add, 0, add, 0)\n assert (reduceby, iseven, add, (b.name, 0), 0) in list(c.dask.values())\n assert set(c) == set(reduceby(iseven, lambda acc, x: acc + x, L, 0).items())\n assert c.name == b.foldby(iseven, add, 0, add, 0).name\n\n c = b.foldby(iseven, lambda acc, x: acc + x)\n assert set(c) == set(reduceby(iseven, lambda acc, x: acc + x, L, 0).items())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_foldby_tree_reduction_test_map_partitions.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_foldby_tree_reduction_test_map_partitions.None_2", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 410, "end_line": 430, "span_ids": ["test_foldby_tree_reduction", "test_map_partitions"], "tokens": 258}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_foldby_tree_reduction():\n dsk = list()\n for n in [1, 7, 32]:\n b = db.from_sequence(range(100), npartitions=n)\n c = b.foldby(iseven, add)\n dsk.extend([c])\n for m in [False, None, 2, 3]:\n d = b.foldby(iseven, add, split_every=m)\n e = b.foldby(iseven, add, 0, split_every=m)\n f = b.foldby(iseven, add, 0, add, split_every=m)\n g = b.foldby(iseven, add, 0, add, 0, split_every=m)\n dsk.extend([d, e, f, g])\n results = dask.compute(dsk)\n first = results[0]\n assert all([r == first for r in results])\n\n\ndef test_map_partitions():\n assert list(b.map_partitions(len)) == [5, 5, 5]\n assert b.map_partitions(len).name == b.map_partitions(len).name\n assert b.map_partitions(lambda a: len(a) + 1).name != b.map_partitions(len).name", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_map_partitions_args_kwargs_test_map_partitions_args_kwargs.None_9": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_map_partitions_args_kwargs_test_map_partitions_args_kwargs.None_9", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 433, "end_line": 461, "span_ids": ["test_map_partitions_args_kwargs"], "tokens": 319}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_map_partitions_args_kwargs():\n x = [random.randint(-100, 100) for i in range(100)]\n y = [random.randint(-100, 100) for i in range(100)]\n\n dx = db.from_sequence(x, npartitions=10)\n dy = db.from_sequence(y, npartitions=10)\n\n def maximum(x, y=0):\n y = repeat(y) if isinstance(y, int) else y\n return [max(a, b) for (a, b) in zip(x, y)]\n\n sol = maximum(x, y=10)\n assert_eq(db.map_partitions(maximum, dx, y=10), sol)\n assert_eq(dx.map_partitions(maximum, y=10), sol)\n assert_eq(dx.map_partitions(maximum, 10), sol)\n\n sol = maximum(x, y)\n assert_eq(db.map_partitions(maximum, dx, dy), sol)\n assert_eq(dx.map_partitions(maximum, y=dy), sol)\n assert_eq(dx.map_partitions(maximum, dy), sol)\n\n dy_mean = dy.mean().apply(int)\n sol = maximum(x, int(sum(y) / len(y)))\n assert_eq(dx.map_partitions(maximum, y=dy_mean), sol)\n assert_eq(dx.map_partitions(maximum, dy_mean), sol)\n\n dy_mean = dask.delayed(dy_mean)\n assert_eq(dx.map_partitions(maximum, y=dy_mean), sol)\n assert_eq(dx.map_partitions(maximum, dy_mean), sol)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_random_sample_size_test_random_sample_random_state.assert_list_b_list_c_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_random_sample_size_test_random_sample_random_state.assert_list_b_list_c_", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 464, "end_line": 512, "span_ids": ["test_random_sample_different_definitions", "test_random_sample_random_state", "test_random_sample_repeated_computation", "test_random_sample_size", "test_random_sample_prob_range"], "tokens": 396}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_random_sample_size():\n \"\"\"\n Number of randomly sampled elements are in the expected range.\n \"\"\"\n a = db.from_sequence(range(1000), npartitions=5)\n # we expect a size of approx. 100, but leave large margins to avoid\n # random failures\n assert 10 < len(list(a.random_sample(0.1, 42))) < 300\n\n\ndef test_random_sample_prob_range():\n \"\"\"\n Specifying probabilities outside the range [0, 1] raises ValueError.\n \"\"\"\n a = db.from_sequence(range(50), npartitions=5)\n with pytest.raises(ValueError):\n a.random_sample(-1)\n with pytest.raises(ValueError):\n a.random_sample(1.1)\n\n\ndef test_random_sample_repeated_computation():\n \"\"\"\n Repeated computation of a defined random sampling operation\n generates identical results.\n \"\"\"\n a = db.from_sequence(range(50), npartitions=5)\n b = a.random_sample(0.2)\n assert list(b) == list(b) # computation happens here\n\n\ndef test_random_sample_different_definitions():\n \"\"\"\n Repeatedly defining a random sampling operation yields different results\n upon computation if no random seed is specified.\n \"\"\"\n a = db.from_sequence(range(50), npartitions=5)\n assert list(a.random_sample(0.5)) != list(a.random_sample(0.5))\n assert a.random_sample(0.5).name != a.random_sample(0.5).name\n\n\ndef test_random_sample_random_state():\n \"\"\"\n Sampling with fixed random seed generates identical results.\n \"\"\"\n a = db.from_sequence(range(50), npartitions=5)\n b = a.random_sample(0.5, 1234)\n c = a.random_sample(0.5, 1234)\n assert list(b) == list(c)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_lazify_task_test_lazify_task.assert_lazify_task_a_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_lazify_task_test_lazify_task.assert_lazify_task_a_", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 515, "end_line": 524, "span_ids": ["test_lazify_task"], "tokens": 142}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_lazify_task():\n task = (sum, (reify, (map, inc, [1, 2, 3])))\n assert lazify_task(task) == (sum, (map, inc, [1, 2, 3]))\n\n task = (reify, (map, inc, [1, 2, 3]))\n assert lazify_task(task) == task\n\n a = (reify, (map, inc, (reify, (filter, iseven, \"y\"))))\n b = (reify, (map, inc, (filter, iseven, \"y\")))\n assert lazify_task(a) == b", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_f_test_lazify.assert_lazify_a_b": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_f_test_lazify.assert_lazify_a_b", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 527, "end_line": 537, "span_ids": ["test_lazify", "impl:8"], "tokens": 117}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "f = lambda x: x\n\n\ndef test_lazify():\n a = {\n \"x\": (reify, (map, inc, (reify, (filter, iseven, \"y\")))),\n \"a\": (f, \"x\"),\n \"b\": (f, \"x\"),\n }\n b = {\"x\": (reify, (map, inc, (filter, iseven, \"y\"))), \"a\": (f, \"x\"), \"b\": (f, \"x\")}\n assert lazify(a) == b", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_inline_singleton_lists_test_inline_singleton_lists.None_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_inline_singleton_lists_test_inline_singleton_lists.None_5", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 540, "end_line": 557, "span_ids": ["test_inline_singleton_lists"], "tokens": 235}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_inline_singleton_lists():\n inp = {\"b\": (list, \"a\"), \"c\": (f, \"b\", 1)}\n out = {\"c\": (f, (list, \"a\"), 1)}\n assert inline_singleton_lists(inp, [\"c\"]) == out\n\n out = {\"c\": (f, \"a\", 1)}\n assert optimize(inp, [\"c\"], rename_fused_keys=False) == out\n\n # If list is an output key, don't fuse it\n assert inline_singleton_lists(inp, [\"b\", \"c\"]) == inp\n assert optimize(inp, [\"b\", \"c\"], rename_fused_keys=False) == inp\n\n inp = {\"b\": (list, \"a\"), \"c\": (f, \"b\", 1), \"d\": (f, \"b\", 2)}\n assert inline_singleton_lists(inp, [\"c\", \"d\"]) == inp\n\n # Doesn't inline constants\n inp = {\"b\": (4, 5), \"c\": (f, \"b\")}\n assert inline_singleton_lists(inp, [\"c\"]) == inp", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_rename_fused_keys_bag_test_rename_fused_keys_bag.assert_optimize_inp_c_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_rename_fused_keys_bag_test_rename_fused_keys_bag.assert_optimize_inp_c_", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 560, "end_line": 571, "span_ids": ["test_rename_fused_keys_bag"], "tokens": 124}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_rename_fused_keys_bag():\n inp = {\"b\": (list, \"a\"), \"c\": (f, \"b\", 1)}\n\n outp = optimize(inp, [\"c\"], rename_fused_keys=False)\n assert outp.keys() == {\"c\"}\n assert outp[\"c\"][1:] == (\"a\", 1)\n\n with dask.config.set({\"optimization.fuse.rename-keys\": False}):\n assert optimize(inp, [\"c\"]) == outp\n\n # By default, fused keys are renamed\n assert optimize(inp, [\"c\"]) != outp", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_take_test_take_npartitions.with_pytest_raises_ValueE.b_take_1_npartitions_5_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_take_test_take_npartitions.with_pytest_raises_ValueE.b_take_1_npartitions_5_", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 574, "end_line": 585, "span_ids": ["test_take_npartitions", "test_take"], "tokens": 159}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_take():\n assert list(b.take(2)) == [0, 1]\n assert b.take(2) == (0, 1)\n assert isinstance(b.take(2, compute=False), Bag)\n\n\ndef test_take_npartitions():\n assert list(b.take(6, npartitions=2)) == [0, 1, 2, 3, 4, 0]\n assert b.take(6, npartitions=-1) == (0, 1, 2, 3, 4, 0)\n assert b.take(3, npartitions=-1) == (0, 1, 2)\n with pytest.raises(ValueError):\n b.take(1, npartitions=5)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_take_npartitions_warn_test_take_npartitions_warn.with_dask_config_set_sche.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_take_npartitions_warn_test_take_npartitions_warn.with_dask_config_set_sche.None_1", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 588, "end_line": 604, "span_ids": ["test_take_npartitions_warn"], "tokens": 125}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_take_npartitions_warn():\n # Use single-threaded scheduler so warnings are properly captured in the\n # same process\n with dask.config.set(scheduler=\"sync\"):\n with pytest.warns(UserWarning):\n b.take(100)\n\n with pytest.warns(UserWarning):\n b.take(7)\n\n with pytest.warns(None) as rec:\n b.take(7, npartitions=2)\n assert len(rec) == 0\n\n with pytest.warns(None) as rec:\n b.take(7, warn=False)\n assert len(rec) == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_map_is_lazy_test_read_text.pytest_raises_ValueError_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_map_is_lazy_test_read_text.pytest_raises_ValueError_", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 607, "end_line": 632, "span_ids": ["test_from_url", "test_can_use_dict_to_make_concrete", "test_read_text", "test_map_is_lazy"], "tokens": 234}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_map_is_lazy():\n assert isinstance(map(lambda x: x, [1, 2, 3]), Iterator)\n\n\ndef test_can_use_dict_to_make_concrete():\n assert isinstance(dict(b.frequencies()), dict)\n\n\n@pytest.mark.slow\n@pytest.mark.network\n@pytest.mark.skip(reason=\"Hangs\")\ndef test_from_url():\n a = db.from_url([\"http://google.com\", \"http://github.com\"])\n assert a.npartitions == 2\n\n b = db.from_url(\"http://raw.githubusercontent.com/dask/dask/master/README.rst\")\n assert b.npartitions == 1\n assert b\"Dask\\n\" in b.take(10)\n\n\ndef test_read_text():\n with filetexts({\"a1.log\": \"A\\nB\", \"a2.log\": \"C\\nD\"}) as fns:\n assert set(line.strip() for line in db.read_text(fns)) == set(\"ABCD\")\n assert set(line.strip() for line in db.read_text(\"a*.log\")) == set(\"ABCD\")\n\n pytest.raises(ValueError, lambda: db.read_text(\"non-existent-*-path\"))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_read_text_large_test_read_text_large.with_tmpfile_as_fn_.assert_list_b_list_d_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_read_text_large_test_read_text_large.with_tmpfile_as_fn_.assert_list_b_list_d_", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 635, "end_line": 645, "span_ids": ["test_read_text_large"], "tokens": 118}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_text_large():\n with tmpfile() as fn:\n with open(fn, \"wb\") as f:\n f.write((\"Hello, world!\" + os.linesep).encode() * 100)\n b = db.read_text(fn, blocksize=100)\n c = db.read_text(fn)\n assert len(b.dask) > 5\n assert list(map(str, b.str.strip())) == list(map(str, c.str.strip()))\n\n d = db.read_text([fn], blocksize=100)\n assert list(b) == list(d)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_read_text_encoding_test_read_text_encoding.with_tmpfile_as_fn_.assert_list_b_list_d_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_read_text_encoding_test_read_text_encoding.with_tmpfile_as_fn_.assert_list_b_list_d_", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 648, "end_line": 660, "span_ids": ["test_read_text_encoding"], "tokens": 170}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_text_encoding():\n with tmpfile() as fn:\n with open(fn, \"wb\") as f:\n f.write((\"\u4f60\u597d\uff01\" + os.linesep).encode(\"gb18030\") * 100)\n b = db.read_text(fn, blocksize=100, encoding=\"gb18030\")\n c = db.read_text(fn, encoding=\"gb18030\")\n assert len(b.dask) > 5\n b_enc = b.str.strip().map(lambda x: x.encode(\"utf-8\"))\n c_enc = c.str.strip().map(lambda x: x.encode(\"utf-8\"))\n assert list(b_enc) == list(c_enc)\n\n d = db.read_text([fn], blocksize=100, encoding=\"gb18030\")\n assert list(b) == list(d)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_read_text_large_gzip_test_read_text_large_gzip.with_tmpfile_gz_as_fn_.assert_join_c_compute_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_read_text_large_gzip_test_read_text_large_gzip.with_tmpfile_gz_as_fn_.assert_join_c_compute_", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 663, "end_line": 676, "span_ids": ["test_read_text_large_gzip"], "tokens": 117}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_text_large_gzip():\n with tmpfile(\"gz\") as fn:\n data = b\"Hello, world!\\n\" * 100\n f = GzipFile(fn, \"wb\")\n f.write(data)\n f.close()\n\n with pytest.raises(ValueError):\n # not allowed blocks when compressed\n db.read_text(fn, blocksize=50, linedelimiter=\"\\n\")\n\n c = db.read_text(fn, blocksize=None)\n assert c.npartitions == 1\n assert \"\".join(c.compute()) == data.decode()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_from_s3_test_from_s3.assert_c_npartitions_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_from_s3_test_from_s3.assert_c_npartitions_3", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 679, "end_line": 703, "span_ids": ["test_from_s3"], "tokens": 265}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.slow\n@pytest.mark.network\ndef test_from_s3():\n # note we don't test connection modes with aws_access_key and\n # aws_secret_key because these are not on travis-ci\n pytest.importorskip(\"s3fs\")\n\n five_tips = (\n \"total_bill,tip,sex,smoker,day,time,size\\n\",\n \"16.99,1.01,Female,No,Sun,Dinner,2\\n\",\n \"10.34,1.66,Male,No,Sun,Dinner,3\\n\",\n \"21.01,3.5,Male,No,Sun,Dinner,3\\n\",\n \"23.68,3.31,Male,No,Sun,Dinner,2\\n\",\n )\n\n # test compressed data\n e = db.read_text(\"s3://tip-data/t*.gz\", storage_options=dict(anon=True))\n assert e.take(5) == five_tips\n\n # test multiple keys in bucket\n c = db.read_text(\n [\"s3://tip-data/tips.gz\", \"s3://tip-data/tips.json\", \"s3://tip-data/tips.csv\"],\n storage_options=dict(anon=True),\n )\n assert c.npartitions == 3", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_from_sequence_test_from_empty_sequence.assert_df_empty_DataFra": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_from_sequence_test_from_empty_sequence.assert_df_empty_DataFra", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 706, "end_line": 722, "span_ids": ["test_from_sequence", "test_from_long_sequence", "test_from_empty_sequence"], "tokens": 146}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_from_sequence():\n b = db.from_sequence([1, 2, 3, 4, 5], npartitions=3)\n assert len(b.dask) == 3\n assert set(b) == {1, 2, 3, 4, 5}\n\n\ndef test_from_long_sequence():\n L = list(range(1001))\n b = db.from_sequence(L)\n assert set(b) == set(L)\n\n\ndef test_from_empty_sequence():\n b = db.from_sequence([])\n assert b.npartitions == 1\n df = b.to_dataframe(meta={\"a\": \"int\"}).compute()\n assert df.empty, \"DataFrame is not empty\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_product_test_product.assert_z_name_x_produc": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_product_test_product.assert_z_name_x_produc", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 725, "end_line": 736, "span_ids": ["test_product"], "tokens": 147}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_product():\n b2 = b.product(b)\n assert b2.npartitions == b.npartitions ** 2\n assert set(b2) == {(i, j) for i in L for j in L}\n\n x = db.from_sequence([1, 2, 3, 4])\n y = db.from_sequence([10, 20, 30])\n z = x.product(y)\n assert set(z) == {(i, j) for i in [1, 2, 3, 4] for j in [10, 20, 30]}\n\n assert z.name != b2.name\n assert z.name == x.product(y).name", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_partition_collect_test_groupby.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_partition_collect_test_groupby.None_3", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 739, "end_line": 761, "span_ids": ["test_groupby", "test_partition_collect"], "tokens": 230}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_partition_collect():\n with partd.Pickle() as p:\n partition(identity, range(6), 3, p)\n assert set(p.get(0)) == {0, 3}\n assert set(p.get(1)) == {1, 4}\n assert set(p.get(2)) == {2, 5}\n\n assert sorted(collect(identity, 0, p, \"\")) == [(0, [0]), (3, [3])]\n\n\ndef test_groupby():\n c = b.groupby(identity)\n result = dict(c)\n assert result == {\n 0: [0, 0, 0],\n 1: [1, 1, 1],\n 2: [2, 2, 2],\n 3: [3, 3, 3],\n 4: [4, 4, 4],\n }\n assert c.npartitions == b.npartitions\n assert c.name == b.groupby(identity).name\n assert c.name != b.groupby(lambda x: x + 1).name", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_groupby_with_indexer_test_groupby_with_npartitions_changed.assert_result_npartitions": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_groupby_with_indexer_test_groupby_with_npartitions_changed.assert_result_npartitions", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 764, "end_line": 781, "span_ids": ["test_groupby_with_indexer", "test_groupby_with_npartitions_changed"], "tokens": 208}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_groupby_with_indexer():\n b = db.from_sequence([[1, 2, 3], [1, 4, 9], [2, 3, 4]])\n result = dict(b.groupby(0))\n assert valmap(sorted, result) == {1: [[1, 2, 3], [1, 4, 9]], 2: [[2, 3, 4]]}\n\n\ndef test_groupby_with_npartitions_changed():\n result = b.groupby(lambda x: x, npartitions=1)\n result2 = dict(result)\n assert result2 == {\n 0: [0, 0, 0],\n 1: [1, 1, 1],\n 2: [2, 2, 2],\n 3: [3, 3, 3],\n 4: [4, 4, 4],\n }\n\n assert result.npartitions == 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_concat_test_args.assert_c_npartitions_d": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_concat_test_args.assert_c_npartitions_d", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 784, "end_line": 810, "span_ids": ["test_concat", "test_concat_after_map", "test_args", "test_flatten"], "tokens": 237}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_concat():\n a = db.from_sequence([1, 2, 3])\n b = db.from_sequence([4, 5, 6])\n c = db.concat([a, b])\n assert list(c) == [1, 2, 3, 4, 5, 6]\n assert c.name == db.concat([a, b]).name\n\n\ndef test_flatten():\n b = db.from_sequence([[1], [2, 3]])\n assert list(b.flatten()) == [1, 2, 3]\n assert b.flatten().name == b.flatten().name\n\n\ndef test_concat_after_map():\n a = db.from_sequence([1, 2])\n b = db.from_sequence([4, 5])\n result = db.concat([a.map(inc), b])\n assert list(result) == [2, 3, 4, 5]\n\n\ndef test_args():\n c = b.map(lambda x: x + 1)\n d = Bag(*c._args)\n\n assert list(c) == list(d)\n assert c.npartitions == d.npartitions", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_to_dataframe_test_to_dataframe.for_f_in_iter_tuple_.check_parts_df_sol_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_to_dataframe_test_to_dataframe.for_f_in_iter_tuple_.check_parts_df_sol_", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 813, "end_line": 871, "span_ids": ["test_to_dataframe"], "tokens": 575}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_dataframe():\n dd = pytest.importorskip(\"dask.dataframe\")\n pd = pytest.importorskip(\"pandas\")\n\n def check_parts(df, sol):\n assert all(\n (p.dtypes == sol.dtypes).all() for p in dask.compute(*df.to_delayed())\n )\n\n dsk = {(\"test\", 0): [(1, 2)], (\"test\", 1): [], (\"test\", 2): [(10, 20), (100, 200)]}\n b = Bag(dsk, \"test\", 3)\n sol = pd.DataFrame(b.compute(), columns=[\"a\", \"b\"])\n\n # Elements are tuples\n df = b.to_dataframe()\n dd.utils.assert_eq(df, sol.rename(columns={\"a\": 0, \"b\": 1}), check_index=False)\n df = b.to_dataframe(columns=[\"a\", \"b\"])\n dd.utils.assert_eq(df, sol, check_index=False)\n check_parts(df, sol)\n df = b.to_dataframe(meta=[(\"a\", \"i8\"), (\"b\", \"i8\")])\n dd.utils.assert_eq(df, sol, check_index=False)\n check_parts(df, sol)\n\n # Elements are dictionaries\n b = b.map(lambda x: dict(zip([\"a\", \"b\"], x)))\n df = b.to_dataframe()\n dd.utils.assert_eq(df, sol, check_index=False)\n check_parts(df, sol)\n assert df._name == b.to_dataframe()._name\n\n # With metadata specified\n for meta in [sol, [(\"a\", \"i8\"), (\"b\", \"i8\")]]:\n df = b.to_dataframe(meta=meta)\n dd.utils.assert_eq(df, sol, check_index=False)\n check_parts(df, sol)\n\n # Error to specify both columns and meta\n with pytest.raises(ValueError):\n b.to_dataframe(columns=[\"a\", \"b\"], meta=sol)\n\n # Inference fails if empty first partition\n b2 = b.filter(lambda x: x[\"a\"] > 200)\n with pytest.raises(ValueError):\n b2.to_dataframe()\n\n # Single column\n b = b.pluck(\"a\")\n sol = sol[[\"a\"]]\n df = b.to_dataframe(meta=sol)\n dd.utils.assert_eq(df, sol, check_index=False)\n check_parts(df, sol)\n\n # Works with iterators and tuples\n sol = pd.DataFrame({\"a\": range(100)})\n b = db.from_sequence(range(100), npartitions=5)\n for f in [iter, tuple]:\n df = b.map_partitions(f).to_dataframe(meta=sol)\n dd.utils.assert_eq(df, sol, check_index=False)\n check_parts(df, sol)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_ext_open_test_to_textfiles.with_tmpdir_as_dir_.f_close_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_ext_open_test_to_textfiles.with_tmpdir_as_dir_.f_close_", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 874, "end_line": 890, "span_ids": ["test_to_textfiles", "impl:10"], "tokens": 173}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "ext_open = [(\"gz\", GzipFile), (\"bz2\", BZ2File), (\"\", open)]\n\n\n@pytest.mark.parametrize(\"ext,myopen\", ext_open)\ndef test_to_textfiles(ext, myopen):\n b = db.from_sequence([\"abc\", \"123\", \"xyz\"], npartitions=2)\n with tmpdir() as dir:\n c = b.to_textfiles(os.path.join(dir, \"*.\" + ext), compute=False)\n dask.compute(*c, scheduler=\"sync\")\n assert os.path.exists(os.path.join(dir, \"1.\" + ext))\n\n f = myopen(os.path.join(dir, \"1.\" + ext), \"rb\")\n text = f.read()\n if hasattr(text, \"decode\"):\n text = text.decode()\n assert \"xyz\" in text\n f.close()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_to_textfiles_name_function_preserves_order_test_to_textfiles_name_function_preserves_order.with_tmpdir_as_dn_.assert_seq_out": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_to_textfiles_name_function_preserves_order_test_to_textfiles_name_function_preserves_order.with_tmpdir_as_dn_.assert_seq_out", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 893, "end_line": 922, "span_ids": ["test_to_textfiles_name_function_preserves_order"], "tokens": 153}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_textfiles_name_function_preserves_order():\n seq = [\n \"a\",\n \"b\",\n \"c\",\n \"d\",\n \"e\",\n \"f\",\n \"g\",\n \"h\",\n \"i\",\n \"j\",\n \"k\",\n \"l\",\n \"m\",\n \"n\",\n \"o\",\n \"p\",\n ]\n b = db.from_sequence(seq, npartitions=16)\n with tmpdir() as dn:\n b.to_textfiles(dn)\n\n out = (\n db.read_text(os.path.join(dn, \"*\"), encoding=\"ascii\")\n .map(str)\n .map(str.strip)\n .compute()\n )\n assert seq == out", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_to_textfiles_name_function_warn_test_to_textfiles_name_function_warn.with_tmpdir_as_dn_.with_pytest_warns_None_.a_to_textfiles_dn_name_f": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_to_textfiles_name_function_warn_test_to_textfiles_name_function_warn.with_tmpdir_as_dn_.with_pytest_warns_None_.a_to_textfiles_dn_name_f", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 925, "end_line": 947, "span_ids": ["test_to_textfiles_name_function_warn"], "tokens": 120}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_textfiles_name_function_warn():\n seq = [\n \"a\",\n \"b\",\n \"c\",\n \"d\",\n \"e\",\n \"f\",\n \"g\",\n \"h\",\n \"i\",\n \"j\",\n \"k\",\n \"l\",\n \"m\",\n \"n\",\n \"o\",\n \"p\",\n ]\n a = db.from_sequence(seq, npartitions=16)\n with tmpdir() as dn:\n with pytest.warns(None):\n a.to_textfiles(dn, name_function=str)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_to_textfiles_encoding_test_to_textfiles_encoding.for_ext_myopen_in_ext_op.with_tmpdir_as_dir_.f_close_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_to_textfiles_encoding_test_to_textfiles_encoding.for_ext_myopen_in_ext_op.with_tmpdir_as_dir_.f_close_", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 950, "end_line": 965, "span_ids": ["test_to_textfiles_encoding"], "tokens": 161}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_textfiles_encoding():\n b = db.from_sequence([\"\u6c7d\u8f66\", \"\u82f9\u679c\", \"\u5929\u6c14\"], npartitions=2)\n for ext, myopen in ext_open:\n with tmpdir() as dir:\n c = b.to_textfiles(\n os.path.join(dir, \"*.\" + ext), encoding=\"gb18030\", compute=False\n )\n dask.compute(*c)\n assert os.path.exists(os.path.join(dir, \"1.\" + ext))\n\n f = myopen(os.path.join(dir, \"1.\" + ext), \"rb\")\n text = f.read()\n if hasattr(text, \"decode\"):\n text = text.decode(\"gb18030\")\n assert \"\u5929\u6c14\" in text\n f.close()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_to_textfiles_inputs_test_to_textfiles_endlines.with_tmpfile_as_fn_.for_last_endline_in_False.assert_result_a_n_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_to_textfiles_inputs_test_to_textfiles_endlines.with_tmpfile_as_fn_.for_last_endline_in_False.assert_result_a_n_", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 968, "end_line": 992, "span_ids": ["test_to_textfiles_endlines", "test_to_textfiles_inputs"], "tokens": 223}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_textfiles_inputs():\n B = db.from_sequence([\"abc\", \"123\", \"xyz\"], npartitions=2)\n with tmpfile() as a:\n with tmpfile() as b:\n B.to_textfiles([a, b])\n assert os.path.exists(a)\n assert os.path.exists(b)\n\n with tmpdir() as dirname:\n B.to_textfiles(dirname)\n assert os.path.exists(dirname)\n assert os.path.exists(os.path.join(dirname, \"0.part\"))\n\n with pytest.raises(TypeError):\n B.to_textfiles(5)\n\n\ndef test_to_textfiles_endlines():\n b = db.from_sequence([\"a\", \"b\", \"c\"], npartitions=1)\n with tmpfile() as fn:\n for last_endline in False, True:\n b.to_textfiles([fn], last_endline=last_endline)\n with open(fn, \"r\") as f:\n result = f.readlines()\n assert result == [\"a\\n\", \"b\\n\", \"c\\n\" if last_endline else \"c\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_string_namespace_test_string_namespace.None_6": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_string_namespace_test_string_namespace.None_6", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 995, "end_line": 1011, "span_ids": ["test_string_namespace"], "tokens": 174}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_string_namespace():\n b = db.from_sequence([\"Alice Smith\", \"Bob Jones\", \"Charlie Smith\"], npartitions=2)\n\n assert \"split\" in dir(b.str)\n assert \"match\" in dir(b.str)\n\n assert list(b.str.lower()) == [\"alice smith\", \"bob jones\", \"charlie smith\"]\n assert list(b.str.split(\" \")) == [\n [\"Alice\", \"Smith\"],\n [\"Bob\", \"Jones\"],\n [\"Charlie\", \"Smith\"],\n ]\n assert list(b.str.match(\"*Smith\")) == [\"Alice Smith\", \"Charlie Smith\"]\n\n pytest.raises(AttributeError, lambda: b.str.sfohsofhf)\n assert b.str.match(\"*Smith\").name == b.str.match(\"*Smith\").name\n assert b.str.match(\"*Smith\").name != b.str.match(\"*John\").name", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_string_namespace_with_unicode_BagOfDicts.set.return.self_map_setter_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_string_namespace_with_unicode_BagOfDicts.set.return.self_map_setter_", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 1014, "end_line": 1055, "span_ids": ["test_string_namespace_with_unicode", "test_ensure_compute_output_is_concrete", "test_str_empty_split", "BagOfDicts.get", "test_map_with_iterator_function", "BagOfDicts", "BagOfDicts.set"], "tokens": 315}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_string_namespace_with_unicode():\n b = db.from_sequence([\"Alice Smith\", \"Bob Jones\", \"Charlie Smith\"], npartitions=2)\n assert list(b.str.lower()) == [\"alice smith\", \"bob jones\", \"charlie smith\"]\n\n\ndef test_str_empty_split():\n b = db.from_sequence([\"Alice Smith\", \"Bob Jones\", \"Charlie Smith\"], npartitions=2)\n assert list(b.str.split()) == [\n [\"Alice\", \"Smith\"],\n [\"Bob\", \"Jones\"],\n [\"Charlie\", \"Smith\"],\n ]\n\n\ndef test_map_with_iterator_function():\n b = db.from_sequence([[1, 2, 3], [4, 5, 6]], npartitions=2)\n\n def f(L):\n for x in L:\n yield x + 1\n\n c = b.map(f)\n\n assert list(c) == [[2, 3, 4], [5, 6, 7]]\n\n\ndef test_ensure_compute_output_is_concrete():\n b = db.from_sequence([1, 2, 3])\n result = b.map(lambda x: x + 1).compute()\n assert not isinstance(result, Iterator)\n\n\nclass BagOfDicts(db.Bag):\n def get(self, key, default=None):\n return self.map(lambda d: d.get(key, default))\n\n def set(self, key, value):\n def setter(d):\n d[key] = value\n return d\n\n return self.map(setter)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_bag_class_extend_test_bag_class_extend.assert_isinstance_dictbag": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_bag_class_extend_test_bag_class_extend.assert_isinstance_dictbag", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 1058, "end_line": 1065, "span_ids": ["test_bag_class_extend"], "tokens": 113}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_bag_class_extend():\n dictbag = BagOfDicts(*db.from_sequence([{\"a\": {\"b\": \"c\"}}])._args)\n assert dictbag.get(\"a\").get(\"b\").compute()[0] == \"c\"\n assert dictbag.get(\"a\").set(\"d\", \"EXTENSIBILITY!!!\").compute()[0] == {\n \"b\": \"c\",\n \"d\": \"EXTENSIBILITY!!!\",\n }\n assert isinstance(dictbag.get(\"a\").get(\"b\"), BagOfDicts)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_gh715_test_bag_compute_forward_kwargs.x_compute_bogus_keyword_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_gh715_test_bag_compute_forward_kwargs.x_compute_bogus_keyword_1", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 1068, "end_line": 1079, "span_ids": ["test_bag_compute_forward_kwargs", "test_gh715"], "tokens": 111}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_gh715():\n bin_data = \"\\u20ac\".encode(\"utf-8\")\n with tmpfile() as fn:\n with open(fn, \"wb\") as f:\n f.write(bin_data)\n a = db.read_text(fn)\n assert a.compute()[0] == bin_data.decode(\"utf-8\")\n\n\ndef test_bag_compute_forward_kwargs():\n x = db.from_sequence([1, 2, 3]).map(lambda a: a + 1)\n x.compute(bogus_keyword=10)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_to_delayed_test_to_delayed.assert_t_compute_21": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_to_delayed_test_to_delayed.assert_t_compute_21", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 1082, "end_line": 1091, "span_ids": ["test_to_delayed"], "tokens": 139}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_delayed():\n b = db.from_sequence([1, 2, 3, 4, 5, 6], npartitions=3)\n a, b, c = b.map(inc).to_delayed()\n assert all(isinstance(x, Delayed) for x in [a, b, c])\n assert b.compute() == [4, 5]\n\n b = db.from_sequence([1, 2, 3, 4, 5, 6], npartitions=3)\n t = b.sum().to_delayed()\n assert isinstance(t, Delayed)\n assert t.compute() == 21", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_to_delayed_optimize_graph_test_to_delayed_optimize_graph.None_9": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_to_delayed_optimize_graph_test_to_delayed_optimize_graph.None_9", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 1094, "end_line": 1115, "span_ids": ["test_to_delayed_optimize_graph"], "tokens": 239}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_delayed_optimize_graph():\n b = db.from_sequence([1, 2, 3, 4, 5, 6], npartitions=1)\n b2 = b.map(inc).map(inc).map(inc)\n\n [d] = b2.to_delayed()\n text = str(dict(d.dask))\n assert text.count(\"reify\") == 1\n [d2] = b2.to_delayed(optimize_graph=False)\n assert dict(d2.dask) == dict(b2.dask)\n assert d.compute() == d2.compute()\n\n x = b2.sum()\n d = x.to_delayed()\n text = str(dict(d.dask))\n assert text.count(\"reify\") == 0\n d2 = x.to_delayed(optimize_graph=False)\n assert dict(d2.dask) == dict(x.dask)\n assert d.compute() == d2.compute()\n\n [d] = b2.to_textfiles(\"foo.txt\", compute=False)\n text = str(dict(d.dask))\n assert text.count(\"reify\") <= 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_from_delayed_test_from_delayed.assert_asum_value_compute": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_from_delayed_test_from_delayed.assert_asum_value_compute", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 1118, "end_line": 1130, "span_ids": ["test_from_delayed"], "tokens": 162}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_from_delayed():\n from dask.delayed import delayed\n\n a, b, c = delayed([1, 2, 3]), delayed([4, 5, 6]), delayed([7, 8, 9])\n bb = from_delayed([a, b, c])\n assert bb.name == from_delayed([a, b, c]).name\n\n assert isinstance(bb, Bag)\n assert list(bb) == [1, 2, 3, 4, 5, 6, 7, 8, 9]\n\n asum_value = delayed(sum)(a)\n asum_item = db.Item.from_delayed(asum_value)\n assert asum_value.compute() == asum_item.compute() == 6", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_from_delayed_iterator_test_range.for_npartitions_in_1_7_.assert_list_b_list_ra": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_from_delayed_iterator_test_range.for_npartitions_in_1_7_.assert_list_b_list_ra", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 1133, "end_line": 1157, "span_ids": ["test_from_delayed_iterator", "test_range"], "tokens": 195}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_from_delayed_iterator():\n from dask.delayed import delayed\n\n def lazy_records(n):\n return ({\"operations\": [1, 2]} for _ in range(n))\n\n delayed_records = delayed(lazy_records, pure=False)\n bag = db.from_delayed([delayed_records(5) for _ in range(5)])\n assert (\n db.compute(\n bag.count(),\n bag.pluck(\"operations\").count(),\n bag.pluck(\"operations\").flatten().count(),\n scheduler=\"sync\",\n )\n == (25, 25, 50)\n )\n\n\ndef test_range():\n for npartitions in [1, 7, 10, 28]:\n b = db.range(100, npartitions=npartitions)\n assert len(b.dask) == npartitions\n assert b.npartitions == npartitions\n assert list(b) == list(range(100))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_zip_test_zip.assert_list_pairs_lis": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_zip_test_zip.assert_list_pairs_lis", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 1160, "end_line": 1166, "span_ids": ["test_zip"], "tokens": 125}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"npartitions\", [1, 7, 10, 28])\ndef test_zip(npartitions, hi=1000):\n evens = db.from_sequence(range(0, hi, 2), npartitions=npartitions)\n odds = db.from_sequence(range(1, hi, 2), npartitions=npartitions)\n pairs = db.zip(evens, odds)\n assert pairs.npartitions == npartitions\n assert list(pairs) == list(zip(range(0, hi, 2), range(1, hi, 2)))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_repartition_npartitions_test_repartition_npartitions.assert_all_results_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_repartition_npartitions_test_repartition_npartitions.assert_all_results_", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 1169, "end_line": 1177, "span_ids": ["test_repartition_npartitions"], "tokens": 123}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"nin\", [1, 2, 7, 11, 23])\n@pytest.mark.parametrize(\"nout\", [1, 2, 5, 12, 23])\ndef test_repartition_npartitions(nin, nout):\n b = db.from_sequence(range(100), npartitions=nin)\n c = b.repartition(npartitions=nout)\n assert c.npartitions == nout\n assert_eq(b, c)\n results = dask.get(c.dask, c.__dask_keys__())\n assert all(results)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_repartition_partition_size_test_repartition_partition_size.assert_eq_b_c_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_repartition_partition_size_test_repartition_partition_size.assert_eq_b_c_", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 1180, "end_line": 1199, "span_ids": ["test_repartition_partition_size"], "tokens": 157}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"nin, nout\",\n [\n (1, 1),\n (2, 1),\n (5, 1),\n (1, 2),\n (2, 2),\n (5, 2),\n (1, 5),\n (2, 5),\n (5, 5),\n ],\n)\ndef test_repartition_partition_size(nin, nout):\n b = db.from_sequence(range(1, 100), npartitions=nin)\n total_mem = sum(b.map_partitions(total_mem_usage).compute())\n c = b.repartition(partition_size=(total_mem // nout))\n assert c.npartitions >= nout\n assert_eq(b, c)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_multiple_repartition_partition_size_test_repartition_input_errors.with_pytest_raises_ValueE.bag_repartition_npartitio": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_multiple_repartition_partition_size_test_repartition_input_errors.with_pytest_raises_ValueE.bag_repartition_npartitio", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 1202, "end_line": 1243, "span_ids": ["test_multiple_repartition_partition_size", "test_repartition_names", "test_repartition_input_errors", "test_repartition_partition_size_complex_dtypes"], "tokens": 327}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_multiple_repartition_partition_size():\n b = db.from_sequence(range(1, 100), npartitions=1)\n total_mem = sum(b.map_partitions(total_mem_usage).compute())\n\n c = b.repartition(partition_size=(total_mem // 2))\n assert c.npartitions >= 2\n assert_eq(b, c)\n\n d = c.repartition(partition_size=(total_mem // 5))\n assert d.npartitions >= 5\n assert_eq(c, d)\n\n\ndef test_repartition_partition_size_complex_dtypes():\n np = pytest.importorskip(\"numpy\")\n\n b = db.from_sequence([np.array(range(100)) for _ in range(4)], npartitions=1)\n total_mem = sum(b.map_partitions(total_mem_usage).compute())\n\n new_partition_size = total_mem // 4\n c = b.repartition(partition_size=new_partition_size)\n assert c.npartitions >= 4\n assert_eq(b, c)\n\n\ndef test_repartition_names():\n b = db.from_sequence(range(100), npartitions=5)\n c = b.repartition(2)\n assert b.name != c.name\n\n d = b.repartition(20)\n assert b.name != c.name\n assert c.name != d.name\n\n c = b.repartition(5)\n assert b is c\n\n\ndef test_repartition_input_errors():\n with pytest.raises(ValueError):\n bag = db.from_sequence(range(10))\n bag.repartition(npartitions=5, partition_size=\"5MiB\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_accumulate_test_accumulate.None_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_accumulate_test_accumulate.None_5", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 1246, "end_line": 1258, "span_ids": ["test_accumulate"], "tokens": 242}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_accumulate():\n parts = [[1, 2, 3], [4, 5], [], [6, 7]]\n dsk = dict(((\"test\", i), p) for (i, p) in enumerate(parts))\n b = db.Bag(dsk, \"test\", len(parts))\n r = b.accumulate(add)\n assert r.name == b.accumulate(add).name\n assert r.name != b.accumulate(add, -1).name\n assert r.compute() == [1, 3, 6, 10, 15, 21, 28]\n assert b.accumulate(add, -1).compute() == [-1, 0, 2, 5, 9, 14, 20, 27]\n assert b.accumulate(add).map(inc).compute() == [2, 4, 7, 11, 16, 22, 29]\n\n b = db.from_sequence([1, 2, 3], npartitions=1)\n assert b.accumulate(add).compute() == [1, 3, 6]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_groupby_tasks_test_groupby_tasks.None_2.for_b_in_partitions_.if_a_is_not_b_.assert_not_set_pluck_0_a": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_groupby_tasks_test_groupby_tasks.None_2.for_b_in_partitions_.if_a_is_not_b_.assert_not_set_pluck_0_a", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 1261, "end_line": 1288, "span_ids": ["test_groupby_tasks"], "tokens": 300}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_groupby_tasks():\n b = db.from_sequence(range(160), npartitions=4)\n out = b.groupby(lambda x: x % 10, max_branch=4, shuffle=\"tasks\")\n partitions = dask.get(out.dask, out.__dask_keys__())\n\n for a in partitions:\n for b in partitions:\n if a is not b:\n assert not set(pluck(0, a)) & set(pluck(0, b))\n\n b = db.from_sequence(range(1000), npartitions=100)\n out = b.groupby(lambda x: x % 123, shuffle=\"tasks\")\n assert len(out.dask) < 100 ** 2\n partitions = dask.get(out.dask, out.__dask_keys__())\n\n for a in partitions:\n for b in partitions:\n if a is not b:\n assert not set(pluck(0, a)) & set(pluck(0, b))\n\n b = db.from_sequence(range(10000), npartitions=345)\n out = b.groupby(lambda x: x % 2834, max_branch=24, shuffle=\"tasks\")\n partitions = dask.get(out.dask, out.__dask_keys__())\n\n for a in partitions:\n for b in partitions:\n if a is not b:\n assert not set(pluck(0, a)) & set(pluck(0, b))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_groupby_tasks_names_test_groupby_tasks_names.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_groupby_tasks_names_test_groupby_tasks_names.None_2", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 1291, "end_line": 1303, "span_ids": ["test_groupby_tasks_names"], "tokens": 170}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_groupby_tasks_names():\n b = db.from_sequence(range(160), npartitions=4)\n func = lambda x: x % 10\n func2 = lambda x: x % 20\n assert set(b.groupby(func, max_branch=4, shuffle=\"tasks\").dask) == set(\n b.groupby(func, max_branch=4, shuffle=\"tasks\").dask\n )\n assert set(b.groupby(func, max_branch=4, shuffle=\"tasks\").dask) != set(\n b.groupby(func, max_branch=2, shuffle=\"tasks\").dask\n )\n assert set(b.groupby(func, max_branch=4, shuffle=\"tasks\").dask) != set(\n b.groupby(func2, max_branch=4, shuffle=\"tasks\").dask\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_groupby_tasks_2_test_groupby_tasks_2.assert_dict_result_gr": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_groupby_tasks_2_test_groupby_tasks_2.assert_dict_result_gr", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 1306, "end_line": 1313, "span_ids": ["test_groupby_tasks_2"], "tokens": 114}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"size,npartitions,groups\", [(1000, 20, 100), (12345, 234, 1042), (100, 1, 50)]\n)\ndef test_groupby_tasks_2(size, npartitions, groups):\n func = lambda x: x % groups\n b = db.range(size, npartitions=npartitions).groupby(func, shuffle=\"tasks\")\n result = b.compute(scheduler=\"sync\")\n assert dict(result) == groupby(func, range(size))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_groupby_tasks_3_test_reduction_empty.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_groupby_tasks_3_test_reduction_empty.None_1", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 1316, "end_line": 1334, "span_ids": ["test_groupby_tasks_3", "test_to_textfiles_empty_partitions", "test_reduction_empty"], "tokens": 210}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_groupby_tasks_3():\n func = lambda x: x % 10\n b = db.range(20, npartitions=5).groupby(func, shuffle=\"tasks\", max_branch=2)\n result = b.compute(scheduler=\"sync\")\n assert dict(result) == groupby(func, range(20))\n # assert b.npartitions == 5\n\n\ndef test_to_textfiles_empty_partitions():\n with tmpdir() as d:\n b = db.range(5, npartitions=5).filter(lambda x: x == 1).map(str)\n b.to_textfiles(os.path.join(d, \"*.txt\"))\n assert len(os.listdir(d)) == 5\n\n\ndef test_reduction_empty():\n b = db.from_sequence(range(10), npartitions=100)\n assert_eq(b.filter(lambda x: x % 2 == 0).max(), 8)\n assert_eq(b.filter(lambda x: x % 2 == 0).min(), 0)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_reduction_empty_aggregate_test_reduction_empty_aggregate.with_pytest_raises_ValueE.b_filter_None_min_split_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_reduction_empty_aggregate_test_reduction_empty_aggregate.with_pytest_raises_ValueE.b_filter_None_min_split_", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 1337, "end_line": 1345, "span_ids": ["test_reduction_empty_aggregate"], "tokens": 149}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"npartitions\", [1, 2, 4])\ndef test_reduction_empty_aggregate(npartitions):\n b = db.from_sequence([0, 0, 0, 1], npartitions=npartitions).filter(None)\n assert_eq(b.min(split_every=2), 1)\n vals = db.compute(b.min(split_every=2), b.max(split_every=2), scheduler=\"sync\")\n assert vals == (1, 1)\n with pytest.raises(ValueError):\n b = db.from_sequence([0, 0, 0, 0], npartitions=npartitions)\n b.filter(None).min(split_every=2).compute(scheduler=\"sync\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_StrictReal_test_bag_with_single_callable.assert_eq_b_f_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_StrictReal_test_bag_with_single_callable.assert_eq_b_f_", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 1348, "end_line": 1398, "span_ids": ["test_empty", "test_msgpack_unicode", "test_reduction_with_sparse_matrices", "test_reduction_with_non_comparable_objects", "test_bag_picklable", "test_bag_with_single_callable", "StrictReal.__ne__", "StrictReal.__eq__", "StrictReal"], "tokens": 351}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class StrictReal(int):\n def __eq__(self, other):\n assert isinstance(other, StrictReal)\n return self.real == other.real\n\n def __ne__(self, other):\n assert isinstance(other, StrictReal)\n return self.real != other.real\n\n\ndef test_reduction_with_non_comparable_objects():\n b = db.from_sequence([StrictReal(x) for x in range(10)], partition_size=2)\n assert_eq(b.fold(max, max), StrictReal(9))\n\n\ndef test_reduction_with_sparse_matrices():\n sp = pytest.importorskip(\"scipy.sparse\")\n b = db.from_sequence([sp.csr_matrix([0]) for x in range(4)], partition_size=2)\n\n def sp_reduce(a, b):\n return sp.vstack([a, b])\n\n assert b.fold(sp_reduce, sp_reduce).compute(scheduler=\"sync\").shape == (4, 1)\n\n\ndef test_empty():\n list(db.from_sequence([])) == []\n\n\ndef test_bag_picklable():\n from pickle import loads, dumps\n\n b = db.from_sequence(range(100))\n b2 = loads(dumps(b))\n assert b.compute() == b2.compute()\n\n s = b.sum()\n s2 = loads(dumps(s))\n assert s.compute() == s2.compute()\n\n\ndef test_msgpack_unicode():\n b = db.from_sequence([{\"a\": 1}]).groupby(\"a\")\n result = b.compute(scheduler=\"sync\")\n assert dict(result) == {1: [{\"a\": 1}]}\n\n\ndef test_bag_with_single_callable():\n f = lambda: None\n b = db.from_sequence([f])\n assert_eq(b, [f])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_optimize_fuse_keys_test_optimize_fuse_keys.assert_all_k_in_dsk_for_k": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_optimize_fuse_keys_test_optimize_fuse_keys.assert_all_k_in_dsk_for_k", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 1401, "end_line": 1410, "span_ids": ["test_optimize_fuse_keys"], "tokens": 121}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_optimize_fuse_keys():\n x = db.range(10, npartitions=2)\n y = x.map(inc)\n z = y.map(inc)\n\n dsk = z.__dask_optimize__(z.dask, z.__dask_keys__())\n assert not y.dask.keys() & dsk.keys()\n\n dsk = z.__dask_optimize__(z.dask, z.__dask_keys__(), fuse_keys=y.__dask_keys__())\n assert all(k in dsk for k in y.__dask_keys__())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_reductions_are_lazy_test_repeated_groupby.assert_valmap_len_dict_c": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_reductions_are_lazy_test_repeated_groupby.assert_valmap_len_dict_c", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 1413, "end_line": 1435, "span_ids": ["test_reductions_are_lazy", "test_repeated_groupby"], "tokens": 148}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_reductions_are_lazy():\n current = [None]\n\n def part():\n for i in range(10):\n current[0] = i\n yield i\n\n def func(part):\n assert current[0] == 0\n return sum(part)\n\n b = Bag({(\"foo\", 0): part()}, \"foo\", 1)\n\n res = b.reduction(func, sum)\n\n assert_eq(res, sum(range(10)))\n\n\ndef test_repeated_groupby():\n b = db.range(10, npartitions=4)\n c = b.groupby(lambda x: x % 3)\n assert valmap(len, dict(c)) == valmap(len, dict(c))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_temporary_directory_test_temporary_directory.with_pool_.with_dask_config_set_temp.assert_any_fn_endswith_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_temporary_directory_test_temporary_directory.with_pool_.with_dask_config_set_temp.assert_any_fn_endswith_", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 1438, "end_line": 1449, "span_ids": ["test_temporary_directory"], "tokens": 115}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_temporary_directory(tmpdir):\n b = db.range(10, npartitions=4)\n\n # We use a pool to avoid a race condition between the pool close\n # cleaning up files, and the assert below.\n pool = multiprocessing.Pool(4)\n\n with pool:\n with dask.config.set(temporary_directory=str(tmpdir), pool=pool):\n b2 = b.groupby(lambda x: x % 2)\n b2.compute()\n assert any(fn.endswith(\".partd\") for fn in os.listdir(str(tmpdir)))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_empty_bag_test_map_keynames.assert_set_b_map_inc___d": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_empty_bag_test_map_keynames.assert_set_b_map_inc___d", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 1452, "end_line": 1489, "span_ids": ["test_map_partitions_arg", "test_bag_paths", "test_empty_bag", "test_map_keynames"], "tokens": 316}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_empty_bag():\n b = db.from_sequence([])\n assert_eq(b.map(inc).all(), True)\n assert_eq(b.map(inc).any(), False)\n assert_eq(b.map(inc).sum(), False)\n assert_eq(b.map(inc).count(), False)\n\n\ndef test_bag_paths():\n b = db.from_sequence([\"abc\", \"123\", \"xyz\"], npartitions=2)\n paths = b.to_textfiles(\"foo*\")\n assert paths[0].endswith(\"foo0\")\n assert paths[1].endswith(\"foo1\")\n\n os.remove(\"foo0\")\n os.remove(\"foo1\")\n\n\ndef test_map_partitions_arg():\n def append_str(partition, s):\n return [x + s for x in partition]\n\n mybag = db.from_sequence([\"a\", \"b\", \"c\"])\n\n assert_eq(mybag.map_partitions(append_str, \"foo\"), [\"afoo\", \"bfoo\", \"cfoo\"])\n assert_eq(\n mybag.map_partitions(append_str, dask.delayed(\"foo\")), [\"afoo\", \"bfoo\", \"cfoo\"]\n )\n\n\ndef test_map_keynames():\n b = db.from_sequence([1, 2, 3])\n d = dict(b.map(inc).__dask_graph__())\n assert \"inc\" in map(dask.utils.key_split, d)\n\n assert set(b.map(inc).__dask_graph__()) != set(\n b.map_partitions(inc).__dask_graph__()\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_map_releases_element_references_as_soon_as_possible_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_map_releases_element_references_as_soon_as_possible_", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 1492, "end_line": 1551, "span_ids": ["test_bagged_array_delayed", "test_map_releases_element_references_as_soon_as_possible"], "tokens": 477}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_map_releases_element_references_as_soon_as_possible():\n # Ensure that Bag.map doesn't keep *element* references longer than\n # necessary. Previous map implementations used ``yield``, which would keep\n # a reference to the yielded element until the yielded method resumed (this\n # is just how generator functions work in CPython).\n #\n # See https://github.com/dask/dask/issues/5189\n #\n # We test 2 variant of potential extra references here:\n # 1. Within an element of a partition:\n # At the time of the second `f_create` for each element, the `C` from\n # the first `f_create` should be dropped.\n # 2. Within a partition:\n # When the second item within a partition is processed, `C` from the\n # first item should already be dropped.\n class C:\n def __init__(self, i):\n self.i = i\n\n # keep a weakref to all existing instances of `C`\n in_memory = weakref.WeakSet()\n\n def f_create(i):\n # check that there are no instances of `C` left\n assert len(in_memory) == 0\n\n # create new instance\n o = C(i)\n in_memory.add(o)\n\n return o\n\n def f_drop(o):\n # o reference dropped on return, should collect\n return o.i + 100\n\n b = (\n db.from_sequence(range(2), npartitions=1)\n .map(f_create)\n .map(f_drop)\n .map(f_create)\n .map(f_drop)\n .sum()\n )\n try:\n # Disable gc to ensure refcycles don't matter here\n gc.disable()\n b.compute(scheduler=\"sync\")\n finally:\n gc.enable()\n\n\ndef test_bagged_array_delayed():\n da = pytest.importorskip(\"dask.array\")\n\n obj = da.ones(10, chunks=5).to_delayed()[0]\n bag = db.from_delayed(obj)\n b = bag.compute()\n assert_eq(b, [1.0, 1.0, 1.0, 1.0, 1.0])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_random.py_pytest_test_sample_size_k_bigger_than_smallest_partition_size.assert_len_set_li_le": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_random.py_pytest_test_sample_size_k_bigger_than_smallest_partition_size.assert_len_set_li_le", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_random.py", "file_name": "test_random.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 89, "span_ids": ["test_choices_k_bigger_than_bag_size", "test_sample_size_k_bigger_than_smallest_partition_size", "test_sample_size_exactly_k", "imports", "test_sample_k_bigger_than_bag_size", "test_choices_empty_partition", "test_choices_k_equal_bag_size_with_unbalanced_partitions", "test_sample_empty_partition", "test_choices_size_exactly_k", "test_choices_k_bigger_than_smallest_partition_size"], "tokens": 766}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\n\nimport dask.bag as db\nfrom dask.bag import random\n\n\ndef test_choices_size_exactly_k():\n seq = range(20)\n sut = db.from_sequence(seq, npartitions=3)\n li = list(random.choices(sut, k=2).compute())\n assert len(li) == 2\n assert all(i in seq for i in li)\n\n\ndef test_choices_k_bigger_than_bag_size():\n seq = range(3)\n sut = db.from_sequence(seq, npartitions=3)\n li = list(random.choices(sut, k=4).compute())\n assert len(li) == 4\n assert all(i in seq for i in li)\n\n\ndef test_choices_empty_partition():\n seq = range(10)\n sut = db.from_sequence(seq, partition_size=9)\n sut = sut.repartition(3)\n li = list(random.choices(sut, k=2).compute())\n assert sut.map_partitions(len).compute() == (9, 0, 1)\n assert len(li) == 2\n assert all(i in seq for i in li)\n\n\ndef test_choices_k_bigger_than_smallest_partition_size():\n seq = range(10)\n sut = db.from_sequence(seq, partition_size=9)\n li = list(random.choices(sut, k=2).compute())\n assert sut.map_partitions(len).compute() == (9, 1)\n assert len(li) == 2\n assert all(i in seq for i in li)\n\n\ndef test_choices_k_equal_bag_size_with_unbalanced_partitions():\n seq = range(10)\n sut = db.from_sequence(seq, partition_size=9)\n li = list(random.choices(sut, k=10).compute())\n assert sut.map_partitions(len).compute() == (9, 1)\n assert len(li) == 10\n assert all(i in seq for i in li)\n\n\ndef test_sample_size_exactly_k():\n seq = range(20)\n sut = db.from_sequence(seq, npartitions=3)\n li = list(random.sample(sut, k=2).compute())\n assert sut.map_partitions(len).compute() == (7, 7, 6)\n assert len(li) == 2\n assert all(i in seq for i in li)\n assert len(set(li)) == len(li)\n\n\ndef test_sample_k_bigger_than_bag_size():\n seq = range(3)\n sut = db.from_sequence(seq, npartitions=3)\n # should raise: Sample larger than population or is negative\n with pytest.raises(\n ValueError, match=\"Sample larger than population or is negative\"\n ):\n random.sample(sut, k=4).compute()\n\n\ndef test_sample_empty_partition():\n seq = range(10)\n sut = db.from_sequence(seq, partition_size=9)\n sut = sut.repartition(3)\n li = list(random.sample(sut, k=2).compute())\n assert sut.map_partitions(len).compute() == (9, 0, 1)\n assert len(li) == 2\n assert all(i in seq for i in li)\n assert len(set(li)) == len(li)\n\n\ndef test_sample_size_k_bigger_than_smallest_partition_size():\n seq = range(10)\n sut = db.from_sequence(seq, partition_size=9)\n li = list(random.sample(sut, k=2).compute())\n assert sut.map_partitions(len).compute() == (9, 1)\n assert len(li) == 2\n assert all(i in seq for i in li)\n assert len(set(li)) == len(li)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_random.py_test_sample_k_equal_bag_size_with_unbalanced_partitions_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_random.py_test_sample_k_equal_bag_size_with_unbalanced_partitions_", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_random.py", "file_name": "test_random.py", "file_type": "text/x-python", "category": "test", "start_line": 92, "end_line": 110, "span_ids": ["test_sample_k_equal_bag_size_with_unbalanced_partitions", "test_weighted_sampling_without_replacement"], "tokens": 171}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_sample_k_equal_bag_size_with_unbalanced_partitions():\n seq = range(10)\n sut = db.from_sequence(seq, partition_size=9)\n li = list(random.sample(sut, k=10).compute())\n assert sut.map_partitions(len).compute() == (9, 1)\n assert len(li) == 10\n assert all(i in seq for i in li)\n assert len(set(li)) == len(li)\n\n\ndef test_weighted_sampling_without_replacement():\n population = range(4)\n p = [0.01, 0.33, 0.33, 0.33]\n k = 3\n sampled = random._weighted_sampling_without_replacement(\n population=population, weights=p, k=k\n )\n assert len(set(sampled)) == k", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_text.py_pytest_fmt_bs_enc_path._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_text.py_pytest_fmt_bs_enc_path._", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_text.py", "file_name": "test_text.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 41, "span_ids": ["imports"], "tokens": 334}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\nfrom functools import partial\nfrom tlz import concat\n\nimport dask\nfrom dask import compute\nfrom dask.utils import filetexts\nfrom dask.bytes import utils\nfrom dask.bag.text import read_text\nfrom fsspec.compression import compr\n\ncompute = partial(compute, scheduler=\"sync\")\n\n\nfiles = {\n \".test.accounts.1.json\": (\n '{\"amount\": 100, \"name\": \"Alice\"}\\n'\n '{\"amount\": 200, \"name\": \"Bob\"}\\n'\n '{\"amount\": 300, \"name\": \"Charlie\"}\\n'\n '{\"amount\": 400, \"name\": \"Dennis\"}\\n'\n ),\n \".test.accounts.2.json\": (\n '{\"amount\": 500, \"name\": \"Alice\"}\\n'\n '{\"amount\": 600, \"name\": \"Bob\"}\\n'\n '{\"amount\": 700, \"name\": \"Charlie\"}\\n'\n '{\"amount\": 800, \"name\": \"Dennis\"}\\n'\n ),\n}\n\n\nexpected = \"\".join([files[v] for v in sorted(files)])\n\nfmt_bs = [(fmt, None) for fmt in compr] + [(None, \"10 B\")]\n\nencodings = [\"ascii\", \"utf-8\"] # + ['utf-16', 'utf-16-le', 'utf-16-be']\nfmt_bs_enc_path = [\n (fmt, bs, encoding, include_path)\n for fmt, bs in fmt_bs\n for encoding in encodings\n for include_path in (True, False)\n]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_text.py_test_read_text_test_read_text.with_filetexts_files2_mo.assert_join_line_for_b": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_text.py_test_read_text_test_read_text.with_filetexts_files2_mo.assert_join_line_for_b", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_text.py", "file_name": "test_text.py", "file_type": "text/x-python", "category": "test", "start_line": 44, "end_line": 84, "span_ids": ["test_read_text"], "tokens": 340}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"fmt,bs,encoding,include_path\", fmt_bs_enc_path)\ndef test_read_text(fmt, bs, encoding, include_path):\n if fmt not in utils.compress:\n pytest.skip(\"compress function not provided for %s\" % fmt)\n compress = utils.compress[fmt]\n files2 = dict((k, compress(v.encode(encoding))) for k, v in files.items())\n with filetexts(files2, mode=\"b\"):\n b = read_text(\n \".test.accounts.*.json\", compression=fmt, blocksize=bs, encoding=encoding\n )\n (L,) = compute(b)\n assert \"\".join(L) == expected\n\n o = read_text(\n sorted(files),\n compression=fmt,\n blocksize=bs,\n encoding=encoding,\n include_path=include_path,\n )\n b = o.pluck(0) if include_path else o\n (L,) = compute(b)\n assert \"\".join(L) == expected\n if include_path:\n (paths,) = compute(o.pluck(1))\n expected_paths = list(\n concat([[k] * v.count(\"\\n\") for k, v in files.items()])\n )\n assert len(paths) == len(expected_paths)\n for path, expected_path in zip(paths, expected_paths):\n assert path.endswith(expected_path)\n\n blocks = read_text(\n \".test.accounts.*.json\",\n compression=fmt,\n blocksize=bs,\n encoding=encoding,\n collection=False,\n )\n L = compute(*blocks)\n assert \"\".join(line for block in L for line in block) == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_text.py_test_files_per_partition_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_text.py_test_files_per_partition_", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_text.py", "file_name": "test_text.py", "file_type": "text/x-python", "category": "test", "start_line": 87, "end_line": 122, "span_ids": ["test_errors", "test_files_per_partition"], "tokens": 343}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_files_per_partition():\n files3 = {\"{:02}.txt\".format(n): \"line from {:02}\" for n in range(20)}\n with filetexts(files3):\n # single-threaded scheduler to ensure the warning happens in the\n # same thread as the pytest.warns\n with dask.config.set({\"scheduler\": \"single-threaded\"}):\n with pytest.warns(UserWarning):\n b = read_text(\"*.txt\", files_per_partition=10)\n l = len(b.take(100, npartitions=1))\n\n assert l == 10, \"10 files should be grouped into one partition\"\n\n assert b.count().compute() == 20, \"All 20 lines should be read\"\n\n with pytest.warns(UserWarning):\n b = read_text(\"*.txt\", files_per_partition=10, include_path=True)\n p = b.take(100, npartitions=1)\n\n p_paths = tuple(zip(*p))[1]\n p_unique_paths = set(p_paths)\n assert len(p_unique_paths) == 10\n\n b_paths = tuple(zip(*b.compute()))[1]\n b_unique_paths = set(b_paths)\n assert len(b_unique_paths) == 20\n\n\ndef test_errors():\n with filetexts({\".test.foo\": b\"Jos\\xe9\\nAlice\"}, mode=\"b\"):\n with pytest.raises(UnicodeDecodeError):\n read_text(\".test.foo\", encoding=\"ascii\").compute()\n\n result = read_text(\".test.foo\", encoding=\"ascii\", errors=\"ignore\")\n result = result.compute(scheduler=\"sync\")\n assert result == [\"Jos\\n\", \"Alice\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/text.py_io_read_text.if_isinstance_blocksize_.blocksize.parse_bytes_blocksize_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/text.py_io_read_text.if_isinstance_blocksize_.blocksize.parse_bytes_blocksize_", "embedding": null, "metadata": {"file_path": "dask/bag/text.py", "file_name": "text.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 90, "span_ids": ["imports", "read_text"], "tokens": 795}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import io\nimport os\nfrom functools import partial\n\nfrom tlz import concat\n\nfrom ..bytes import open_files, read_bytes\nfrom ..delayed import delayed\nfrom ..utils import parse_bytes, system_encoding\nfrom .core import from_delayed\n\ndelayed = delayed(pure=True)\n\n\ndef read_text(\n urlpath,\n blocksize=None,\n compression=\"infer\",\n encoding=system_encoding,\n errors=\"strict\",\n linedelimiter=os.linesep,\n collection=True,\n storage_options=None,\n files_per_partition=None,\n include_path=False,\n):\n \"\"\"Read lines from text files\n\n Parameters\n ----------\n urlpath : string or list\n Absolute or relative filepath(s). Prefix with a protocol like ``s3://``\n to read from alternative filesystems. To read from multiple files you\n can pass a globstring or a list of paths, with the caveat that they\n must all have the same protocol.\n blocksize: None, int, or str\n Size (in bytes) to cut up larger files. Streams by default.\n Can be ``None`` for streaming, an integer number of bytes, or a string\n like \"128MiB\"\n compression: string\n Compression format like 'gzip' or 'xz'. Defaults to 'infer'\n encoding: string\n errors: string\n linedelimiter: string\n collection: bool, optional\n Return dask.bag if True, or list of delayed values if false\n storage_options: dict\n Extra options that make sense to a particular storage connection, e.g.\n host, port, username, password, etc.\n files_per_partition: None or int\n If set, group input files into partitions of the requested size,\n instead of one partition per file. Mutually exclusive with blocksize.\n include_path: bool\n Whether or not to include the path in the bag.\n If true, elements are tuples of (line, path).\n Default is False.\n\n Examples\n --------\n >>> b = read_text('myfiles.1.txt') # doctest: +SKIP\n >>> b = read_text('myfiles.*.txt') # doctest: +SKIP\n >>> b = read_text('myfiles.*.txt.gz') # doctest: +SKIP\n >>> b = read_text('s3://bucket/myfiles.*.txt') # doctest: +SKIP\n >>> b = read_text('s3://key:secret@bucket/myfiles.*.txt') # doctest: +SKIP\n >>> b = read_text('hdfs://namenode.example.com/myfiles.*.txt') # doctest: +SKIP\n\n Parallelize a large file by providing the number of uncompressed bytes to\n load into each partition.\n\n >>> b = read_text('largefile.txt', blocksize='10MB') # doctest: +SKIP\n\n Get file paths of the bag by setting include_path=True\n\n >>> b = read_text('myfiles.*.txt', include_path=True) # doctest: +SKIP\n >>> b.take(1) # doctest: +SKIP\n (('first line of the first file', '/home/dask/myfiles.0.txt'),)\n\n Returns\n -------\n dask.bag.Bag or list\n dask.bag.Bag if collection is True or list of Delayed lists otherwise.\n\n See Also\n --------\n from_sequence: Build bag from Python sequence\n \"\"\"\n if blocksize is not None and files_per_partition is not None:\n raise ValueError(\"Only one of blocksize or files_per_partition can be set\")\n if isinstance(blocksize, str):\n blocksize = parse_bytes(blocksize)\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/text.py_read_text.files_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/text.py_read_text.files_", "embedding": null, "metadata": {"file_path": "dask/bag/text.py", "file_name": "text.py", "file_type": "text/x-python", "category": "implementation", "start_line": 92, "end_line": 161, "span_ids": ["decode", "attach_path", "read_text", "file_to_blocks"], "tokens": 480}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def read_text(\n urlpath,\n blocksize=None,\n compression=\"infer\",\n encoding=system_encoding,\n errors=\"strict\",\n linedelimiter=os.linesep,\n collection=True,\n storage_options=None,\n files_per_partition=None,\n include_path=False,\n):\n # ... other code\n\n files = open_files(\n urlpath,\n mode=\"rt\",\n encoding=encoding,\n errors=errors,\n compression=compression,\n **(storage_options or {})\n )\n if blocksize is None:\n if files_per_partition is None:\n blocks = [\n delayed(list)(delayed(partial(file_to_blocks, include_path))(fil))\n for fil in files\n ]\n else:\n blocks = []\n for start in range(0, len(files), files_per_partition):\n block_files = files[start : (start + files_per_partition)]\n block_lines = delayed(concat)(\n delayed(map)(\n partial(file_to_blocks, include_path),\n block_files,\n )\n )\n blocks.append(block_lines)\n else:\n o = read_bytes(\n urlpath,\n delimiter=linedelimiter.encode(),\n blocksize=blocksize,\n sample=False,\n compression=compression,\n include_path=include_path,\n **(storage_options or {})\n )\n raw_blocks = o[1]\n blocks = [delayed(decode)(b, encoding, errors) for b in concat(raw_blocks)]\n if include_path:\n paths = list(\n concat([[path] * len(raw_blocks[i]) for i, path in enumerate(o[2])])\n )\n blocks = [\n delayed(attach_path)(entry, path) for entry, path in zip(blocks, paths)\n ]\n\n if not blocks:\n raise ValueError(\"No files found\", urlpath)\n\n if collection:\n blocks = from_delayed(blocks)\n\n return blocks\n\n\ndef file_to_blocks(include_path, lazy_file):\n with lazy_file as f:\n for line in f:\n yield (line, lazy_file.path) if include_path else line\n\n\ndef attach_path(block, path):\n for p in block:\n yield (p, path)\n\n\ndef decode(block, encoding, errors):\n text = block.decode(encoding, errors)\n lines = io.StringIO(text)\n return list(lines)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/utils.py__": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/utils.py__", "embedding": null, "metadata": {"file_path": "dask/bag/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 8, "span_ids": ["assert_eq"], "tokens": 48}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def assert_eq(a, b):\n if hasattr(a, \"compute\"):\n a = a.compute(scheduler=\"sync\")\n if hasattr(b, \"compute\"):\n b = b.compute(scheduler=\"sync\")\n\n assert a == b", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_from_collections_import_O_is_dask_collection.try_.except_AttributeError_T.return.False": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_from_collections_import_O_is_dask_collection.try_.except_AttributeError_T.return.False", "embedding": null, "metadata": {"file_path": "dask/base.py", "file_name": "base.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 41, "span_ids": ["is_dask_collection", "imports"], "tokens": 238}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from collections import OrderedDict\nfrom collections.abc import Mapping, Iterator\nfrom functools import partial\nfrom hashlib import md5\nfrom operator import getitem\nimport inspect\nimport pickle\nimport os\nimport threading\nimport uuid\nfrom distutils.version import LooseVersion\n\nfrom tlz import merge, groupby, curry, identity\nfrom tlz.functoolz import Compose\n\nfrom .compatibility import is_dataclass, dataclass_fields\nfrom .context import thread_state\nfrom .core import flatten, quote, get as simple_get, literal\nfrom .hashing import hash_buffer_hex\nfrom .utils import Dispatch, ensure_dict, apply\nfrom . import config, local, threaded\n\n\n__all__ = (\n \"DaskMethodsMixin\",\n \"is_dask_collection\",\n \"compute\",\n \"persist\",\n \"optimize\",\n \"visualize\",\n \"tokenize\",\n \"normalize_token\",\n)\n\n\ndef is_dask_collection(x):\n \"\"\"Returns ``True`` if ``x`` is a dask collection\"\"\"\n try:\n return x.__dask_graph__() is not None\n except (AttributeError, TypeError):\n return False", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_DaskMethodsMixin_DaskMethodsMixin.visualize.return.visualize_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_DaskMethodsMixin_DaskMethodsMixin.visualize.return.visualize_", "embedding": null, "metadata": {"file_path": "dask/base.py", "file_name": "base.py", "file_type": "text/x-python", "category": "implementation", "start_line": 44, "end_line": 99, "span_ids": ["DaskMethodsMixin", "DaskMethodsMixin.visualize"], "tokens": 421}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DaskMethodsMixin(object):\n \"\"\"A mixin adding standard dask collection methods\"\"\"\n\n __slots__ = ()\n\n def visualize(self, filename=\"mydask\", format=None, optimize_graph=False, **kwargs):\n \"\"\"Render the computation of this object's task graph using graphviz.\n\n Requires ``graphviz`` to be installed.\n\n Parameters\n ----------\n filename : str or None, optional\n The name of the file to write to disk. If the provided `filename`\n doesn't include an extension, '.png' will be used by default.\n If `filename` is None, no file will be written, and we communicate\n with dot using only pipes.\n format : {'png', 'pdf', 'dot', 'svg', 'jpeg', 'jpg'}, optional\n Format in which to write output file. Default is 'png'.\n optimize_graph : bool, optional\n If True, the graph is optimized before rendering. Otherwise,\n the graph is displayed as is. Default is False.\n color: {None, 'order'}, optional\n Options to color nodes. Provide ``cmap=`` keyword for additional\n colormap\n **kwargs\n Additional keyword arguments to forward to ``to_graphviz``.\n\n Examples\n --------\n >>> x.visualize(filename='dask.pdf') # doctest: +SKIP\n >>> x.visualize(filename='dask.pdf', color='order') # doctest: +SKIP\n\n Returns\n -------\n result : IPython.diplay.Image, IPython.display.SVG, or None\n See dask.dot.dot_graph for more information.\n\n See Also\n --------\n dask.base.visualize\n dask.dot.dot_graph\n\n Notes\n -----\n For more information on optimization see here:\n\n https://docs.dask.org/en/latest/optimize.html\n \"\"\"\n return visualize(\n self,\n filename=filename,\n format=format,\n optimize_graph=optimize_graph,\n **kwargs\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_DaskMethodsMixin.persist_DaskMethodsMixin.persist.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_DaskMethodsMixin.persist_DaskMethodsMixin.persist.return.result", "embedding": null, "metadata": {"file_path": "dask/base.py", "file_name": "base.py", "file_type": "text/x-python", "category": "implementation", "start_line": 101, "end_line": 141, "span_ids": ["DaskMethodsMixin.persist"], "tokens": 349}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DaskMethodsMixin(object):\n\n def persist(self, **kwargs):\n \"\"\"Persist this dask collection into memory\n\n This turns a lazy Dask collection into a Dask collection with the same\n metadata, but now with the results fully computed or actively computing\n in the background.\n\n The action of function differs significantly depending on the active\n task scheduler. If the task scheduler supports asynchronous computing,\n such as is the case of the dask.distributed scheduler, then persist\n will return *immediately* and the return value's task graph will\n contain Dask Future objects. However if the task scheduler only\n supports blocking computation then the call to persist will *block*\n and the return value's task graph will contain concrete Python results.\n\n This function is particularly useful when using distributed systems,\n because the results will be kept in distributed memory, rather than\n returned to the local process as with compute.\n\n Parameters\n ----------\n scheduler : string, optional\n Which scheduler to use like \"threads\", \"synchronous\" or \"processes\".\n If not provided, the default is to check the global settings first,\n and then fall back to the collection defaults.\n optimize_graph : bool, optional\n If True [default], the graph is optimized before computation.\n Otherwise the graph is run as is. This can be useful for debugging.\n **kwargs\n Extra keywords to forward to the scheduler function.\n\n Returns\n -------\n New dask collections backed by in-memory data\n\n See Also\n --------\n dask.base.persist\n \"\"\"\n (result,) = persist(self, traverse=False, **kwargs)\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_DaskMethodsMixin.compute_DaskMethodsMixin.__await__.return.f___await___": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_DaskMethodsMixin.compute_DaskMethodsMixin.__await__.return.f___await___", "embedding": null, "metadata": {"file_path": "dask/base.py", "file_name": "base.py", "file_type": "text/x-python", "category": "implementation", "start_line": 143, "end_line": 185, "span_ids": ["DaskMethodsMixin.__await__", "DaskMethodsMixin.compute"], "tokens": 305}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DaskMethodsMixin(object):\n\n def compute(self, **kwargs):\n \"\"\"Compute this dask collection\n\n This turns a lazy Dask collection into its in-memory equivalent.\n For example a Dask array turns into a NumPy array and a Dask dataframe\n turns into a Pandas dataframe. The entire dataset must fit into memory\n before calling this operation.\n\n Parameters\n ----------\n scheduler : string, optional\n Which scheduler to use like \"threads\", \"synchronous\" or \"processes\".\n If not provided, the default is to check the global settings first,\n and then fall back to the collection defaults.\n optimize_graph : bool, optional\n If True [default], the graph is optimized before computation.\n Otherwise the graph is run as is. This can be useful for debugging.\n kwargs\n Extra keywords to forward to the scheduler function.\n\n See Also\n --------\n dask.base.compute\n \"\"\"\n (result,) = compute(self, traverse=False, **kwargs)\n return result\n\n def __await__(self):\n try:\n from distributed import wait, futures_of\n except ImportError as e:\n raise ImportError(\n \"Using async/await with dask requires the `distributed` package\"\n ) from e\n from tornado import gen\n\n @gen.coroutine\n def f():\n if futures_of(self):\n yield wait(self)\n raise gen.Return(self)\n\n return f().__await__()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_compute_as_if_collection_optimization_function.return.getattr_x___dask_optimi": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_compute_as_if_collection_optimization_function.return.getattr_x___dask_optimi", "embedding": null, "metadata": {"file_path": "dask/base.py", "file_name": "base.py", "file_type": "text/x-python", "category": "implementation", "start_line": 188, "end_line": 202, "span_ids": ["compute_as_if_collection", "optimization_function", "dont_optimize"], "tokens": 131}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def compute_as_if_collection(cls, dsk, keys, scheduler=None, get=None, **kwargs):\n \"\"\"Compute a graph as if it were of type cls.\n\n Allows for applying the same optimizations and default scheduler.\"\"\"\n schedule = get_scheduler(scheduler=scheduler, cls=cls, get=get)\n dsk2 = optimization_function(cls)(ensure_dict(dsk), keys, **kwargs)\n return schedule(dsk2, keys, **kwargs)\n\n\ndef dont_optimize(dsk, keys, **kwargs):\n return dsk\n\n\ndef optimization_function(x):\n return getattr(x, \"__dask_optimize__\", dont_optimize)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_collections_to_dsk_collections_to_dsk.return.dsk": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_collections_to_dsk_collections_to_dsk.return.dsk", "embedding": null, "metadata": {"file_path": "dask/base.py", "file_name": "base.py", "file_type": "text/x-python", "category": "implementation", "start_line": 205, "end_line": 239, "span_ids": ["collections_to_dsk"], "tokens": 239}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def collections_to_dsk(collections, optimize_graph=True, **kwargs):\n \"\"\"\n Convert many collections into a single dask graph, after optimization\n \"\"\"\n optimizations = kwargs.pop(\"optimizations\", None) or config.get(\"optimizations\", [])\n\n if optimize_graph:\n groups = groupby(optimization_function, collections)\n\n _opt_list = []\n for opt, val in groups.items():\n dsk, keys = _extract_graph_and_keys(val)\n groups[opt] = (dsk, keys)\n _opt = opt(dsk, keys, **kwargs)\n _opt_list.append(_opt)\n\n for opt in optimizations:\n _opt_list = []\n group = {}\n for k, (dsk, keys) in groups.items():\n _opt = opt(dsk, keys, **kwargs)\n group[k] = (_opt, keys)\n _opt_list.append(_opt)\n groups = group\n\n dsk = merge(\n *map(\n ensure_dict,\n _opt_list,\n )\n )\n else:\n dsk, _ = _extract_graph_and_keys(collections)\n\n return dsk", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py__extract_graph_and_keys__extract_graph_and_keys.return.graph_keys": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py__extract_graph_and_keys__extract_graph_and_keys.return.graph_keys", "embedding": null, "metadata": {"file_path": "dask/base.py", "file_name": "base.py", "file_type": "text/x-python", "category": "implementation", "start_line": 242, "end_line": 257, "span_ids": ["_extract_graph_and_keys"], "tokens": 143}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _extract_graph_and_keys(vals):\n \"\"\"Given a list of dask vals, return a single graph and a list of keys such\n that ``get(dsk, keys)`` is equivalent to ``[v.compute() for v in vals]``.\"\"\"\n from .highlevelgraph import HighLevelGraph\n\n graphs, keys = [], []\n for v in vals:\n graphs.append(v.__dask_graph__())\n keys.append(v.__dask_keys__())\n\n if any(isinstance(graph, HighLevelGraph) for graph in graphs):\n graph = HighLevelGraph.merge(*graphs)\n else:\n graph = merge(*map(ensure_dict, graphs))\n\n return graph, keys", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_unpack_collections_unpack_collections.collections_token.uuid_uuid4_hex": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_unpack_collections_unpack_collections.collections_token.uuid_uuid4_hex", "embedding": null, "metadata": {"file_path": "dask/base.py", "file_name": "base.py", "file_type": "text/x-python", "category": "implementation", "start_line": 260, "end_line": 291, "span_ids": ["unpack_collections"], "tokens": 256}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def unpack_collections(*args, **kwargs):\n \"\"\"Extract collections in preparation for compute/persist/etc...\n\n Intended use is to find all collections in a set of (possibly nested)\n python objects, do something to them (compute, etc...), then repackage them\n in equivalent python objects.\n\n Parameters\n ----------\n *args\n Any number of objects. If it is a dask collection, it's extracted and\n added to the list of collections returned. By default, python builtin\n collections are also traversed to look for dask collections (for more\n information see the ``traverse`` keyword).\n traverse : bool, optional\n If True (default), builtin python collections are traversed looking for\n any dask collections they might contain.\n\n Returns\n -------\n collections : list\n A list of all dask collections contained in ``args``\n repack : callable\n A function to call on the transformed collections to repackage them as\n they were in the original ``args``.\n \"\"\"\n traverse = kwargs.pop(\"traverse\", True)\n\n collections = []\n repack_dsk = {}\n\n collections_token = uuid.uuid4().hex\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_unpack_collections._unpack_unpack_collections.return.collections_repack": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_unpack_collections._unpack_unpack_collections.return.collections_repack", "embedding": null, "metadata": {"file_path": "dask/base.py", "file_name": "base.py", "file_type": "text/x-python", "category": "implementation", "start_line": 293, "end_line": 338, "span_ids": ["unpack_collections"], "tokens": 335}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def unpack_collections(*args, **kwargs):\n # ... other code\n\n def _unpack(expr):\n if is_dask_collection(expr):\n tok = tokenize(expr)\n if tok not in repack_dsk:\n repack_dsk[tok] = (getitem, collections_token, len(collections))\n collections.append(expr)\n return tok\n\n tok = uuid.uuid4().hex\n if not traverse:\n tsk = quote(expr)\n else:\n # Treat iterators like lists\n typ = list if isinstance(expr, Iterator) else type(expr)\n if typ in (list, tuple, set):\n tsk = (typ, [_unpack(i) for i in expr])\n elif typ in (dict, OrderedDict):\n tsk = (typ, [[_unpack(k), _unpack(v)] for k, v in expr.items()])\n elif is_dataclass(expr) and not isinstance(expr, type):\n tsk = (\n apply,\n typ,\n (),\n (\n dict,\n [\n [f.name, _unpack(getattr(expr, f.name))]\n for f in dataclass_fields(expr)\n ],\n ),\n )\n else:\n return expr\n\n repack_dsk[tok] = tsk\n return tok\n\n out = uuid.uuid4().hex\n repack_dsk[out] = (tuple, [_unpack(i) for i in args])\n\n def repack(results):\n dsk = repack_dsk.copy()\n dsk[collections_token] = quote(results)\n return simple_get(dsk, out)\n\n return collections, repack", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_optimize_optimize.return.repack_postpersists_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_optimize_optimize.return.repack_postpersists_", "embedding": null, "metadata": {"file_path": "dask/base.py", "file_name": "base.py", "file_type": "text/x-python", "category": "implementation", "start_line": 341, "end_line": 392, "span_ids": ["optimize"], "tokens": 413}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def optimize(*args, **kwargs):\n \"\"\"Optimize several dask collections at once.\n\n Returns equivalent dask collections that all share the same merged and\n optimized underlying graph. This can be useful if converting multiple\n collections to delayed objects, or to manually apply the optimizations at\n strategic points.\n\n Note that in most cases you shouldn't need to call this method directly.\n\n Parameters\n ----------\n *args : objects\n Any number of objects. If a dask object, its graph is optimized and\n merged with all those of all other dask objects before returning an\n equivalent dask collection. Non-dask arguments are passed through\n unchanged.\n traverse : bool, optional\n By default dask traverses builtin python collections looking for dask\n objects passed to ``optimize``. For large collections this can be\n expensive. If none of the arguments contain any dask objects, set\n ``traverse=False`` to avoid doing this traversal.\n optimizations : list of callables, optional\n Additional optimization passes to perform.\n **kwargs\n Extra keyword arguments to forward to the optimization passes.\n\n Examples\n --------\n >>> import dask as d\n >>> import dask.array as da\n >>> a = da.arange(10, chunks=2).sum()\n >>> b = da.arange(10, chunks=2).mean()\n >>> a2, b2 = d.optimize(a, b)\n\n >>> a2.compute() == a.compute()\n True\n >>> b2.compute() == b.compute()\n True\n \"\"\"\n collections, repack = unpack_collections(*args, **kwargs)\n if not collections:\n return args\n\n dsk = collections_to_dsk(collections, **kwargs)\n\n postpersists = []\n for a in collections:\n r, s = a.__dask_postpersist__()\n postpersists.append(r(dsk, *s))\n\n return repack(postpersists)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_compute_compute.return.repack_f_r_a_for_r_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_compute_compute.return.repack_f_r_a_for_r_", "embedding": null, "metadata": {"file_path": "dask/base.py", "file_name": "base.py", "file_type": "text/x-python", "category": "implementation", "start_line": 395, "end_line": 455, "span_ids": ["compute"], "tokens": 569}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def compute(*args, **kwargs):\n \"\"\"Compute several dask collections at once.\n\n Parameters\n ----------\n args : object\n Any number of objects. If it is a dask object, it's computed and the\n result is returned. By default, python builtin collections are also\n traversed to look for dask objects (for more information see the\n ``traverse`` keyword). Non-dask arguments are passed through unchanged.\n traverse : bool, optional\n By default dask traverses builtin python collections looking for dask\n objects passed to ``compute``. For large collections this can be\n expensive. If none of the arguments contain any dask objects, set\n ``traverse=False`` to avoid doing this traversal.\n scheduler : string, optional\n Which scheduler to use like \"threads\", \"synchronous\" or \"processes\".\n If not provided, the default is to check the global settings first,\n and then fall back to the collection defaults.\n optimize_graph : bool, optional\n If True [default], the optimizations for each collection are applied\n before computation. Otherwise the graph is run as is. This can be\n useful for debugging.\n kwargs\n Extra keywords to forward to the scheduler function.\n\n Examples\n --------\n >>> import dask as d\n >>> import dask.array as da\n >>> a = da.arange(10, chunks=2).sum()\n >>> b = da.arange(10, chunks=2).mean()\n >>> d.compute(a, b)\n (45, 4.5)\n\n By default, dask objects inside python collections will also be computed:\n\n >>> d.compute({'a': a, 'b': b, 'c': 1})\n ({'a': 45, 'b': 4.5, 'c': 1},)\n \"\"\"\n traverse = kwargs.pop(\"traverse\", True)\n optimize_graph = kwargs.pop(\"optimize_graph\", True)\n\n collections, repack = unpack_collections(*args, traverse=traverse)\n if not collections:\n return args\n\n schedule = get_scheduler(\n scheduler=kwargs.pop(\"scheduler\", None),\n collections=collections,\n get=kwargs.pop(\"get\", None),\n )\n\n dsk = collections_to_dsk(collections, optimize_graph, **kwargs)\n keys, postcomputes = [], []\n for x in collections:\n keys.append(x.__dask_keys__())\n postcomputes.append(x.__dask_postcompute__())\n\n results = schedule(dsk, keys, **kwargs)\n return repack([f(r, *a) for r, (f, a) in zip(results, postcomputes)])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_visualize_visualize.color.kwargs_get_color_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_visualize_visualize.color.kwargs_get_color_", "embedding": null, "metadata": {"file_path": "dask/base.py", "file_name": "base.py", "file_type": "text/x-python", "category": "implementation", "start_line": 456, "end_line": 533, "span_ids": ["visualize"], "tokens": 621}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def visualize(*args, **kwargs):\n \"\"\"\n Visualize several dask graphs at once.\n\n Requires ``graphviz`` to be installed. All options that are not the dask\n graph(s) should be passed as keyword arguments.\n\n Parameters\n ----------\n dsk : dict(s) or collection(s)\n The dask graph(s) to visualize.\n filename : str or None, optional\n The name of the file to write to disk. If the provided `filename`\n doesn't include an extension, '.png' will be used by default.\n If `filename` is None, no file will be written, and we communicate\n with dot using only pipes.\n format : {'png', 'pdf', 'dot', 'svg', 'jpeg', 'jpg'}, optional\n Format in which to write output file. Default is 'png'.\n optimize_graph : bool, optional\n If True, the graph is optimized before rendering. Otherwise,\n the graph is displayed as is. Default is False.\n color : {None, 'order'}, optional\n Options to color nodes. Provide ``cmap=`` keyword for additional\n colormap\n collapse_outputs : bool, optional\n Whether to collapse output boxes, which often have empty labels.\n Default is False.\n verbose : bool, optional\n Whether to label output and input boxes even if the data aren't chunked.\n Beware: these labels can get very long. Default is False.\n **kwargs\n Additional keyword arguments to forward to ``to_graphviz``.\n\n Examples\n --------\n >>> x.visualize(filename='dask.pdf') # doctest: +SKIP\n >>> x.visualize(filename='dask.pdf', color='order') # doctest: +SKIP\n\n Returns\n -------\n result : IPython.diplay.Image, IPython.display.SVG, or None\n See dask.dot.dot_graph for more information.\n\n See Also\n --------\n dask.dot.dot_graph\n\n Notes\n -----\n For more information on optimization see here:\n\n https://docs.dask.org/en/latest/optimize.html\n \"\"\"\n from dask.dot import dot_graph\n\n filename = kwargs.pop(\"filename\", \"mydask\")\n optimize_graph = kwargs.pop(\"optimize_graph\", False)\n\n dsks = []\n args3 = []\n for arg in args:\n if isinstance(arg, (list, tuple, set)):\n for a in arg:\n if isinstance(a, Mapping):\n dsks.append(a)\n if is_dask_collection(a):\n args3.append(a)\n else:\n if isinstance(arg, Mapping):\n dsks.append(arg)\n if is_dask_collection(arg):\n args3.append(arg)\n\n dsk = dict(collections_to_dsk(args3, optimize_graph=optimize_graph))\n for d in dsks:\n dsk.update(d)\n\n color = kwargs.get(\"color\")\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_visualize.if_color_order__visualize.return.dot_graph_dsk_filename_f": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_visualize.if_color_order__visualize.return.dot_graph_dsk_filename_f", "embedding": null, "metadata": {"file_path": "dask/base.py", "file_name": "base.py", "file_type": "text/x-python", "category": "implementation", "start_line": 535, "end_line": 558, "span_ids": ["visualize"], "tokens": 218}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def visualize(*args, **kwargs):\n # ... other code\n\n if color == \"order\":\n from .order import order\n import matplotlib.pyplot as plt\n\n o = order(dsk)\n try:\n cmap = kwargs.pop(\"cmap\")\n except KeyError:\n cmap = plt.cm.RdBu\n if isinstance(cmap, str):\n import matplotlib.pyplot as plt\n\n cmap = getattr(plt.cm, cmap)\n mx = max(o.values()) + 1\n colors = {k: _colorize(cmap(v / mx, bytes=True)) for k, v in o.items()}\n\n kwargs[\"function_attributes\"] = {\n k: {\"color\": v, \"label\": str(o[k])} for k, v in colors.items()\n }\n kwargs[\"data_attributes\"] = {k: {\"color\": v} for k, v in colors.items()}\n elif color:\n raise NotImplementedError(\"Unknown value color=%s\" % color)\n\n return dot_graph(dsk, filename=filename, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_persist_persist.if_inspect_ismethod_sched.try_.else_.try_.else_.if_client_get_schedule.return.repack_results_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_persist_persist.if_inspect_ismethod_sched.try_.else_.try_.else_.if_client_get_schedule.return.repack_results_", "embedding": null, "metadata": {"file_path": "dask/base.py", "file_name": "base.py", "file_type": "text/x-python", "category": "implementation", "start_line": 561, "end_line": 647, "span_ids": ["persist"], "tokens": 763}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def persist(*args, **kwargs):\n \"\"\"Persist multiple Dask collections into memory\n\n This turns lazy Dask collections into Dask collections with the same\n metadata, but now with their results fully computed or actively computing\n in the background.\n\n For example a lazy dask.array built up from many lazy calls will now be a\n dask.array of the same shape, dtype, chunks, etc., but now with all of\n those previously lazy tasks either computed in memory as many small :class:`numpy.array`\n (in the single-machine case) or asynchronously running in the\n background on a cluster (in the distributed case).\n\n This function operates differently if a ``dask.distributed.Client`` exists\n and is connected to a distributed scheduler. In this case this function\n will return as soon as the task graph has been submitted to the cluster,\n but before the computations have completed. Computations will continue\n asynchronously in the background. When using this function with the single\n machine scheduler it blocks until the computations have finished.\n\n When using Dask on a single machine you should ensure that the dataset fits\n entirely within memory.\n\n Examples\n --------\n >>> df = dd.read_csv('/path/to/*.csv') # doctest: +SKIP\n >>> df = df[df.name == 'Alice'] # doctest: +SKIP\n >>> df['in-debt'] = df.balance < 0 # doctest: +SKIP\n >>> df = df.persist() # triggers computation # doctest: +SKIP\n\n >>> df.value().min() # future computations are now fast # doctest: +SKIP\n -10\n >>> df.value().max() # doctest: +SKIP\n 100\n\n >>> from dask import persist # use persist function on multiple collections\n >>> a, b = persist(a, b) # doctest: +SKIP\n\n Parameters\n ----------\n *args: Dask collections\n scheduler : string, optional\n Which scheduler to use like \"threads\", \"synchronous\" or \"processes\".\n If not provided, the default is to check the global settings first,\n and then fall back to the collection defaults.\n traverse : bool, optional\n By default dask traverses builtin python collections looking for dask\n objects passed to ``persist``. For large collections this can be\n expensive. If none of the arguments contain any dask objects, set\n ``traverse=False`` to avoid doing this traversal.\n optimize_graph : bool, optional\n If True [default], the graph is optimized before computation.\n Otherwise the graph is run as is. This can be useful for debugging.\n **kwargs\n Extra keywords to forward to the scheduler function.\n\n Returns\n -------\n New dask collections backed by in-memory data\n \"\"\"\n traverse = kwargs.pop(\"traverse\", True)\n optimize_graph = kwargs.pop(\"optimize_graph\", True)\n\n collections, repack = unpack_collections(*args, traverse=traverse)\n if not collections:\n return args\n\n schedule = get_scheduler(\n scheduler=kwargs.pop(\"scheduler\", None), collections=collections\n )\n\n if inspect.ismethod(schedule):\n try:\n from distributed.client import default_client\n except ImportError:\n pass\n else:\n try:\n client = default_client()\n except ValueError:\n pass\n else:\n if client.get == schedule:\n results = client.persist(\n collections, optimize_graph=optimize_graph, **kwargs\n )\n return repack(results)\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_persist.dsk_persist.return.repack_results2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_persist.dsk_persist.return.repack_results2_", "embedding": null, "metadata": {"file_path": "dask/base.py", "file_name": "base.py", "file_type": "text/x-python", "category": "implementation", "start_line": 649, "end_line": 660, "span_ids": ["persist"], "tokens": 151}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def persist(*args, **kwargs):\n # ... other code\n\n dsk = collections_to_dsk(collections, optimize_graph, **kwargs)\n keys, postpersists = [], []\n for a in collections:\n a_keys = list(flatten(a.__dask_keys__()))\n rebuild, state = a.__dask_postpersist__()\n keys.extend(a_keys)\n postpersists.append((rebuild, a_keys, state))\n\n results = schedule(dsk, keys, **kwargs)\n d = dict(zip(keys, results))\n results2 = [r({k: d[k] for k in ks}, *s) for r, ks, s in postpersists]\n return repack(results2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py___normalize_function.try_.except_TypeError_not_.return._normalize_function_func_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py___normalize_function.try_.except_TypeError_not_.return._normalize_function_func_", "embedding": null, "metadata": {"file_path": "dask/base.py", "file_name": "base.py", "file_type": "text/x-python", "category": "implementation", "start_line": 663, "end_line": 749, "span_ids": ["impl:3", "normalize_seq", "normalize_literal", "persist", "normalize_range", "normalize_function", "normalize_set", "normalize_object", "tokenize", "normalize_dict", "impl:6", "normalize_ordered_dict"], "tokens": 486}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "############\n# Tokenize #\n############\n\n\ndef tokenize(*args, **kwargs):\n \"\"\"Deterministic token\n\n >>> tokenize([1, 2, '3'])\n '7d6a880cd9ec03506eee6973ff551339'\n\n >>> tokenize('Hello') == tokenize('Hello')\n True\n \"\"\"\n if kwargs:\n args = args + (kwargs,)\n return md5(str(tuple(map(normalize_token, args))).encode()).hexdigest()\n\n\nnormalize_token = Dispatch()\nnormalize_token.register(\n (int, float, str, bytes, type(None), type, slice, complex, type(Ellipsis)), identity\n)\n\n\n@normalize_token.register(dict)\ndef normalize_dict(d):\n return normalize_token(sorted(d.items(), key=str))\n\n\n@normalize_token.register(OrderedDict)\ndef normalize_ordered_dict(d):\n return type(d).__name__, normalize_token(list(d.items()))\n\n\n@normalize_token.register(set)\ndef normalize_set(s):\n return normalize_token(sorted(s, key=str))\n\n\n@normalize_token.register((tuple, list))\ndef normalize_seq(seq):\n def func(seq):\n try:\n return list(map(normalize_token, seq))\n except RecursionError:\n return str(uuid.uuid4())\n\n return type(seq).__name__, func(seq)\n\n\n@normalize_token.register(literal)\ndef normalize_literal(lit):\n return \"literal\", normalize_token(lit())\n\n\n@normalize_token.register(range)\ndef normalize_range(r):\n return list(map(normalize_token, [r.start, r.stop, r.step]))\n\n\n@normalize_token.register(object)\ndef normalize_object(o):\n method = getattr(o, \"__dask_tokenize__\", None)\n if method is not None:\n return method()\n return normalize_function(o) if callable(o) else uuid.uuid4().hex\n\n\nfunction_cache = {}\nfunction_cache_lock = threading.Lock()\n\n\ndef normalize_function(func):\n try:\n return function_cache[func]\n except KeyError:\n result = _normalize_function(func)\n if len(function_cache) >= 500: # clear half of cache if full\n with function_cache_lock:\n if len(function_cache) >= 500:\n for k in list(function_cache)[::2]:\n del function_cache[k]\n function_cache[func] = result\n return result\n except TypeError: # not hashable\n return _normalize_function(func)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py__normalize_function__normalize_function.if_isinstance_func_Compo.else_.None_1.except_Exception_.return.str_func_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py__normalize_function__normalize_function.if_isinstance_func_Compo.else_.None_1.except_Exception_.return.str_func_", "embedding": null, "metadata": {"file_path": "dask/base.py", "file_name": "base.py", "file_type": "text/x-python", "category": "implementation", "start_line": 752, "end_line": 778, "span_ids": ["_normalize_function"], "tokens": 210}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _normalize_function(func):\n if isinstance(func, Compose):\n first = getattr(func, \"first\", None)\n funcs = reversed((first,) + func.funcs) if first else func.funcs\n return tuple(normalize_function(f) for f in funcs)\n elif isinstance(func, (partial, curry)):\n args = tuple(normalize_token(i) for i in func.args)\n if func.keywords:\n kws = tuple(\n (k, normalize_token(v)) for k, v in sorted(func.keywords.items())\n )\n else:\n kws = None\n return (normalize_function(func.func), args, kws)\n else:\n try:\n result = pickle.dumps(func, protocol=0)\n if b\"__main__\" not in result: # abort on dynamic functions\n return result\n except Exception:\n pass\n try:\n import cloudpickle\n\n return cloudpickle.dumps(func, protocol=0)\n except Exception:\n return str(func)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_register_pandas_register_pandas.normalize_period_dtype.return.normalize_token_dtype_nam": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_register_pandas_register_pandas.normalize_period_dtype.return.normalize_token_dtype_nam", "embedding": null, "metadata": {"file_path": "dask/base.py", "file_name": "base.py", "file_type": "text/x-python", "category": "implementation", "start_line": 781, "end_line": 854, "span_ids": ["register_pandas"], "tokens": 521}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@normalize_token.register_lazy(\"pandas\")\ndef register_pandas():\n import pandas as pd\n\n # Intentionally not importing PANDAS_GT_0240 from dask.dataframe._compat\n # to avoid ImportErrors from extra dependencies\n PANDAS_GT_0240 = LooseVersion(pd.__version__) >= LooseVersion(\"0.24.0\")\n\n @normalize_token.register(pd.Index)\n def normalize_index(ind):\n if PANDAS_GT_0240:\n values = ind.array\n else:\n values = ind.values\n return [ind.name, normalize_token(values)]\n\n @normalize_token.register(pd.MultiIndex)\n def normalize_index(ind):\n codes = ind.codes if PANDAS_GT_0240 else ind.levels\n return (\n [ind.name]\n + [normalize_token(x) for x in ind.levels]\n + [normalize_token(x) for x in codes]\n )\n\n @normalize_token.register(pd.Categorical)\n def normalize_categorical(cat):\n return [normalize_token(cat.codes), normalize_token(cat.dtype)]\n\n if PANDAS_GT_0240:\n\n @normalize_token.register(pd.arrays.PeriodArray)\n @normalize_token.register(pd.arrays.DatetimeArray)\n @normalize_token.register(pd.arrays.TimedeltaArray)\n def normalize_period_array(arr):\n return [normalize_token(arr.asi8), normalize_token(arr.dtype)]\n\n @normalize_token.register(pd.arrays.IntervalArray)\n def normalize_interval_array(arr):\n return [\n normalize_token(arr.left),\n normalize_token(arr.right),\n normalize_token(arr.closed),\n ]\n\n @normalize_token.register(pd.Series)\n def normalize_series(s):\n return [\n s.name,\n s.dtype,\n normalize_token(s._data.blocks[0].values),\n normalize_token(s.index),\n ]\n\n @normalize_token.register(pd.DataFrame)\n def normalize_dataframe(df):\n data = [block.values for block in df._data.blocks]\n data.extend([df.columns, df.index])\n return list(map(normalize_token, data))\n\n @normalize_token.register(pd.api.extensions.ExtensionArray)\n def normalize_extension_array(arr):\n import numpy as np\n\n return normalize_token(np.asarray(arr))\n\n # Dtypes\n @normalize_token.register(pd.api.types.CategoricalDtype)\n def normalize_categorical_dtype(dtype):\n return [normalize_token(dtype.categories), normalize_token(dtype.ordered)]\n\n @normalize_token.register(pd.api.extensions.ExtensionDtype)\n def normalize_period_dtype(dtype):\n return normalize_token(dtype.name)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_register_numpy_register_numpy.normalize_ufunc.try_.except_AttributeError_.return.normalize_function_x_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_register_numpy_register_numpy.normalize_ufunc.try_.except_AttributeError_.return.normalize_function_x_", "embedding": null, "metadata": {"file_path": "dask/base.py", "file_name": "base.py", "file_type": "text/x-python", "category": "implementation", "start_line": 857, "end_line": 924, "span_ids": ["register_numpy"], "tokens": 497}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@normalize_token.register_lazy(\"numpy\")\ndef register_numpy():\n import numpy as np\n\n @normalize_token.register(np.ndarray)\n def normalize_array(x):\n if not x.shape:\n return (x.item(), x.dtype)\n if hasattr(x, \"mode\") and getattr(x, \"filename\", None):\n if hasattr(x.base, \"ctypes\"):\n offset = (\n x.ctypes.get_as_parameter().value\n - x.base.ctypes.get_as_parameter().value\n )\n else:\n offset = 0 # root memmap's have mmap object as base\n if hasattr(\n x, \"offset\"\n ): # offset numpy used while opening, and not the offset to the beginning of the file\n offset += getattr(x, \"offset\")\n return (\n x.filename,\n os.path.getmtime(x.filename),\n x.dtype,\n x.shape,\n x.strides,\n offset,\n )\n if x.dtype.hasobject:\n try:\n try:\n # string fast-path\n data = hash_buffer_hex(\n \"-\".join(x.flat).encode(\n encoding=\"utf-8\", errors=\"surrogatepass\"\n )\n )\n except UnicodeDecodeError:\n # bytes fast-path\n data = hash_buffer_hex(b\"-\".join(x.flat))\n except (TypeError, UnicodeDecodeError):\n try:\n data = hash_buffer_hex(pickle.dumps(x, pickle.HIGHEST_PROTOCOL))\n except Exception:\n # pickling not supported, use UUID4-based fallback\n data = uuid.uuid4().hex\n else:\n try:\n data = hash_buffer_hex(x.ravel(order=\"K\").view(\"i1\"))\n except (BufferError, AttributeError, ValueError):\n data = hash_buffer_hex(x.copy().ravel(order=\"K\").view(\"i1\"))\n return (data, x.dtype, x.shape, x.strides)\n\n @normalize_token.register(np.matrix)\n def normalize_matrix(x):\n return type(x).__name__, normalize_array(x.view(type=np.ndarray))\n\n normalize_token.register(np.dtype, repr)\n normalize_token.register(np.generic, repr)\n\n @normalize_token.register(np.ufunc)\n def normalize_ufunc(x):\n try:\n name = x.__name__\n if getattr(np, name) is x:\n return \"np.\" + name\n except AttributeError:\n return normalize_function(x)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_register_scipy_register_scipy.normalize_dok_matrix.return.type_x___name___normali": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_register_scipy_register_scipy.normalize_dok_matrix.return.type_x___name___normali", "embedding": null, "metadata": {"file_path": "dask/base.py", "file_name": "base.py", "file_type": "text/x-python", "category": "implementation", "start_line": 927, "end_line": 949, "span_ids": ["register_scipy"], "tokens": 231}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@normalize_token.register_lazy(\"scipy\")\ndef register_scipy():\n import scipy.sparse as sp\n\n def normalize_sparse_matrix(x, attrs):\n return (\n type(x).__name__,\n normalize_seq((normalize_token(getattr(x, key)) for key in attrs)),\n )\n\n for cls, attrs in [\n (sp.dia_matrix, (\"data\", \"offsets\", \"shape\")),\n (sp.bsr_matrix, (\"data\", \"indices\", \"indptr\", \"blocksize\", \"shape\")),\n (sp.coo_matrix, (\"data\", \"row\", \"col\", \"shape\")),\n (sp.csr_matrix, (\"data\", \"indices\", \"indptr\", \"shape\")),\n (sp.csc_matrix, (\"data\", \"indices\", \"indptr\", \"shape\")),\n (sp.lil_matrix, (\"data\", \"rows\", \"shape\")),\n ]:\n normalize_token.register(cls, partial(normalize_sparse_matrix, attrs=attrs))\n\n @normalize_token.register(sp.dok_matrix)\n def normalize_dok_matrix(x):\n return type(x).__name__, normalize_token(sorted(x.items()))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py__colorize__colorize.return._h": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py__colorize__colorize.return._h", "embedding": null, "metadata": {"file_path": "dask/base.py", "file_name": "base.py", "file_type": "text/x-python", "category": "implementation", "start_line": 952, "end_line": 968, "span_ids": ["_colorize"], "tokens": 148}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _colorize(t):\n \"\"\"Convert (r, g, b) triple to \"#RRGGBB\" string\n\n For use with ``visualize(color=...)``\n\n Examples\n --------\n >>> _colorize((255, 255, 255))\n '#FFFFFF'\n >>> _colorize((0, 32, 128))\n '#002080'\n \"\"\"\n t = t[:3]\n i = sum(v * 256 ** (len(t) - i - 1) for i, v in enumerate(t))\n h = hex(int(i))[2:].upper()\n h = \"0\" * (6 - len(h)) + h\n return \"#\" + h", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_named_schedulers_get_err_msg._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_named_schedulers_get_err_msg._", "embedding": null, "metadata": {"file_path": "dask/base.py", "file_name": "base.py", "file_type": "text/x-python", "category": "implementation", "start_line": 971, "end_line": 1009, "span_ids": ["impl:10"], "tokens": 206}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "named_schedulers = {\n \"sync\": local.get_sync,\n \"synchronous\": local.get_sync,\n \"single-threaded\": local.get_sync,\n \"threads\": threaded.get,\n \"threading\": threaded.get,\n}\n\ntry:\n from dask import multiprocessing as dask_multiprocessing\nexcept ImportError:\n pass\nelse:\n named_schedulers.update(\n {\n \"processes\": dask_multiprocessing.get,\n \"multiprocessing\": dask_multiprocessing.get,\n }\n )\n\n\nget_err_msg = \"\"\"\nThe get= keyword has been removed.\n\nPlease use the scheduler= keyword instead with the name of\nthe desired scheduler like 'threads' or 'processes'\n\n x.compute(scheduler='single-threaded')\n x.compute(scheduler='threads')\n x.compute(scheduler='processes')\n\nor with a function that takes the graph and keys\n\n x.compute(scheduler=my_scheduler_function)\n\nor with a Dask client\n\n x.compute(scheduler=client)\n\"\"\".strip()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_get_scheduler_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_get_scheduler_", "embedding": null, "metadata": {"file_path": "dask/base.py", "file_name": "base.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1012, "end_line": 1089, "span_ids": ["wait", "get_scheduler"], "tokens": 543}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def get_scheduler(get=None, scheduler=None, collections=None, cls=None):\n \"\"\"Get scheduler function\n\n There are various ways to specify the scheduler to use:\n\n 1. Passing in scheduler= parameters\n 2. Passing these into global configuration\n 3. Using defaults of a dask collection\n\n This function centralizes the logic to determine the right scheduler to use\n from those many options\n \"\"\"\n if get:\n raise TypeError(get_err_msg)\n\n if scheduler is not None:\n if callable(scheduler):\n return scheduler\n elif \"Client\" in type(scheduler).__name__ and hasattr(scheduler, \"get\"):\n return scheduler.get\n elif scheduler.lower() in named_schedulers:\n return named_schedulers[scheduler.lower()]\n elif scheduler.lower() in (\"dask.distributed\", \"distributed\"):\n from distributed.worker import get_client\n\n return get_client().get\n else:\n raise ValueError(\n \"Expected one of [distributed, %s]\"\n % \", \".join(sorted(named_schedulers))\n )\n # else: # try to connect to remote scheduler with this name\n # return get_client(scheduler).get\n\n if config.get(\"scheduler\", None):\n return get_scheduler(scheduler=config.get(\"scheduler\", None))\n\n if config.get(\"get\", None):\n raise ValueError(get_err_msg)\n\n if getattr(thread_state, \"key\", False):\n from distributed.worker import get_worker\n\n return get_worker().client.get\n\n if cls is not None:\n return cls.__dask_scheduler__\n\n if collections:\n collections = [c for c in collections if c is not None]\n if collections:\n get = collections[0].__dask_scheduler__\n if not all(c.__dask_scheduler__ == get for c in collections):\n raise ValueError(\n \"Compute called on multiple collections with \"\n \"differing default schedulers. Please specify a \"\n \"scheduler=` parameter explicitly in compute or \"\n \"globally with `dask.config.set`.\"\n )\n return get\n\n return None\n\n\ndef wait(x, timeout=None, return_when=\"ALL_COMPLETED\"):\n \"\"\"Wait until computation has finished\n\n This is a compatibility alias for ``dask.distributed.wait``.\n If it is applied onto Dask collections without Dask Futures or if Dask\n distributed is not installed then it is a no-op\n \"\"\"\n try:\n from distributed import wait\n\n return wait(x, timeout=timeout, return_when=return_when)\n except (ImportError, ValueError):\n return x", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_blockwise_blockwise.return.subgraph": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_blockwise_blockwise.return.subgraph", "embedding": null, "metadata": {"file_path": "dask/blockwise.py", "file_name": "blockwise.py", "file_type": "text/x-python", "category": "implementation", "start_line": 44, "end_line": 132, "span_ids": ["blockwise"], "tokens": 645}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def blockwise(\n func,\n output,\n output_indices,\n *arrind_pairs,\n numblocks=None,\n concatenate=None,\n new_axes=None,\n dependencies=(),\n **kwargs\n):\n \"\"\"Create a Blockwise symbolic mutable mapping\n\n This is like the ``make_blockwise_graph`` function, but rather than construct a dict, it\n returns a symbolic Blockwise object.\n\n See Also\n --------\n make_blockwise_graph\n Blockwise\n \"\"\"\n new_axes = new_axes or {}\n\n arrind_pairs = list(arrind_pairs)\n\n # Transform indices to canonical elements\n # We use terms like _0, and _1 rather than provided index elements\n unique_indices = {\n i for ii in arrind_pairs[1::2] if ii is not None for i in ii\n } | set(output_indices)\n sub = {k: blockwise_token(i, \".\") for i, k in enumerate(sorted(unique_indices))}\n output_indices = index_subs(tuple(output_indices), sub)\n a_pairs_list = []\n for a in arrind_pairs[1::2]:\n if a is not None:\n val = tuple(a)\n else:\n val = a\n a_pairs_list.append(index_subs(val, sub))\n\n arrind_pairs[1::2] = a_pairs_list\n new_axes = {index_subs((k,), sub)[0]: v for k, v in new_axes.items()}\n\n # Unpack dask values in non-array arguments\n argpairs = toolz.partition(2, arrind_pairs)\n\n # separate argpairs into two separate tuples\n inputs = []\n inputs_indices = []\n for name, index in argpairs:\n inputs.append(name)\n inputs_indices.append(index)\n\n # Unpack delayed objects in kwargs\n new_keys = {n for c in dependencies for n in c.__dask_layers__()}\n if kwargs:\n # replace keys in kwargs with _0 tokens\n new_tokens = tuple(\n blockwise_token(i) for i in range(len(inputs), len(inputs) + len(new_keys))\n )\n sub = dict(zip(new_keys, new_tokens))\n inputs.extend(new_keys)\n inputs_indices.extend((None,) * len(new_keys))\n kwargs = subs(kwargs, sub)\n\n indices = [(k, v) for k, v in zip(inputs, inputs_indices)]\n keys = map(blockwise_token, range(len(inputs)))\n\n # Construct local graph\n if not kwargs:\n subgraph = {output: (func,) + tuple(keys)}\n else:\n _keys = list(keys)\n if new_keys:\n _keys = _keys[: -len(new_keys)]\n kwargs2 = (dict, list(map(list, kwargs.items())))\n subgraph = {output: (apply, func, _keys, kwargs2)}\n\n # Construct final output\n subgraph = Blockwise(\n output,\n output_indices,\n subgraph,\n indices,\n numblocks=numblocks,\n concatenate=concatenate,\n new_axes=new_axes,\n )\n return subgraph", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_Blockwise.__getitem___make_blockwise_graph": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_Blockwise.__getitem___make_blockwise_graph", "embedding": null, "metadata": {"file_path": "dask/blockwise.py", "file_name": "blockwise.py", "file_type": "text/x-python", "category": "implementation", "start_line": 335, "end_line": 613, "span_ids": ["make_blockwise_graph", "Blockwise.get_dependencies", "Blockwise._out_numblocks", "Blockwise.map_tasks", "Blockwise.__getitem__", "Blockwise.__iter__", "Blockwise.cull", "Blockwise.is_materialized", "Blockwise.__len__"], "tokens": 389}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Blockwise(Layer):\n\n def __getitem__(self, key):\n return self._dict[key]\n\n def __iter__(self):\n return iter(self._dict)\n\n def __len__(self):\n return int(np.prod(list(self._out_numblocks().values())))\n\n def _out_numblocks(self):\n d = {}\n out_d = {}\n indices = {k: v for k, v in self.indices if v is not None}\n for k, v in self.numblocks.items():\n for a, b in zip(indices[k], v):\n d[a] = max(d.get(a, 0), b)\n if a in self.output_indices:\n out_d[a] = d[a]\n\n return out_d\n\n def is_materialized(self):\n return hasattr(self, \"_cached_dict\")\n\n def get_dependencies(self, key, all_hlg_keys):\n _ = self._dict # trigger materialization\n return self._cached_dict[\"basic_layer\"].get_dependencies(key, all_hlg_keys)\n\n def cull(self, keys, all_hlg_keys):\n _ = self._dict # trigger materialization\n return self._cached_dict[\"basic_layer\"].cull(keys, all_hlg_keys)\n\n def map_tasks(self, func):\n new_indices = []\n for key, input_indices in self.indices:\n if input_indices is None: # A literal\n new_indices.append((func([key]), None))\n else:\n new_indices.append((key, input_indices))\n ret = copy.copy(self)\n ret.indices = new_indices\n\n # This operation invalidate the cache\n try:\n del self._cached_dict\n except AttributeError:\n pass\n return ret\n\n\ndef make_blockwise_graph(func, output, out_indices, *arrind_pairs, **kwargs):\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_make_blockwise_graph._Tensor_operation_make_blockwise_graph._Tensor_operation": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_make_blockwise_graph._Tensor_operation_make_blockwise_graph._Tensor_operation", "embedding": null, "metadata": {"file_path": "dask/blockwise.py", "file_name": "blockwise.py", "file_type": "text/x-python", "category": "implementation", "start_line": 248, "end_line": 352, "span_ids": ["make_blockwise_graph"], "tokens": 1707}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def make_blockwise_graph(func, output, out_indices, *arrind_pairs, **kwargs):\n \"\"\"Tensor operation\n\n Applies a function, ``func``, across blocks from many different input\n collections. We arrange the pattern with which those blocks interact with\n sets of matching indices. E.g.::\n\n make_blockwise_graph(func, 'z', 'i', 'x', 'i', 'y', 'i')\n\n yield an embarrassingly parallel communication pattern and is read as\n\n $$ z_i = func(x_i, y_i) $$\n\n More complex patterns may emerge, including multiple indices::\n\n make_blockwise_graph(func, 'z', 'ij', 'x', 'ij', 'y', 'ji')\n\n $$ z_{ij} = func(x_{ij}, y_{ji}) $$\n\n Indices missing in the output but present in the inputs results in many\n inputs being sent to one function (see examples).\n\n Examples\n --------\n\n Simple embarrassing map operation\n\n >>> inc = lambda x: x + 1\n >>> make_blockwise_graph(inc, 'z', 'ij', 'x', 'ij', numblocks={'x': (2, 2)}) # doctest: +SKIP\n {('z', 0, 0): (inc, ('x', 0, 0)),\n ('z', 0, 1): (inc, ('x', 0, 1)),\n ('z', 1, 0): (inc, ('x', 1, 0)),\n ('z', 1, 1): (inc, ('x', 1, 1))}\n\n Simple operation on two datasets\n\n >>> add = lambda x, y: x + y\n >>> make_blockwise_graph(add, 'z', 'ij', 'x', 'ij', 'y', 'ij', numblocks={'x': (2, 2),\n ... 'y': (2, 2)}) # doctest: +SKIP\n {('z', 0, 0): (add, ('x', 0, 0), ('y', 0, 0)),\n ('z', 0, 1): (add, ('x', 0, 1), ('y', 0, 1)),\n ('z', 1, 0): (add, ('x', 1, 0), ('y', 1, 0)),\n ('z', 1, 1): (add, ('x', 1, 1), ('y', 1, 1))}\n\n Operation that flips one of the datasets\n\n >>> addT = lambda x, y: x + y.T # Transpose each chunk\n >>> # z_ij ~ x_ij y_ji\n >>> # .. .. .. notice swap\n >>> make_blockwise_graph(addT, 'z', 'ij', 'x', 'ij', 'y', 'ji', numblocks={'x': (2, 2),\n ... 'y': (2, 2)}) # doctest: +SKIP\n {('z', 0, 0): (add, ('x', 0, 0), ('y', 0, 0)),\n ('z', 0, 1): (add, ('x', 0, 1), ('y', 1, 0)),\n ('z', 1, 0): (add, ('x', 1, 0), ('y', 0, 1)),\n ('z', 1, 1): (add, ('x', 1, 1), ('y', 1, 1))}\n\n Dot product with contraction over ``j`` index. Yields list arguments\n\n >>> make_blockwise_graph(dotmany, 'z', 'ik', 'x', 'ij', 'y', 'jk', numblocks={'x': (2, 2),\n ... 'y': (2, 2)}) # doctest: +SKIP\n {('z', 0, 0): (dotmany, [('x', 0, 0), ('x', 0, 1)],\n [('y', 0, 0), ('y', 1, 0)]),\n ('z', 0, 1): (dotmany, [('x', 0, 0), ('x', 0, 1)],\n [('y', 0, 1), ('y', 1, 1)]),\n ('z', 1, 0): (dotmany, [('x', 1, 0), ('x', 1, 1)],\n [('y', 0, 0), ('y', 1, 0)]),\n ('z', 1, 1): (dotmany, [('x', 1, 0), ('x', 1, 1)],\n [('y', 0, 1), ('y', 1, 1)])}\n\n Pass ``concatenate=True`` to concatenate arrays ahead of time\n\n >>> make_blockwise_graph(f, 'z', 'i', 'x', 'ij', 'y', 'ij', concatenate=True,\n ... numblocks={'x': (2, 2), 'y': (2, 2,)}) # doctest: +SKIP\n {('z', 0): (f, (concatenate_axes, [('x', 0, 0), ('x', 0, 1)], (1,)),\n (concatenate_axes, [('y', 0, 0), ('y', 0, 1)], (1,)))\n ('z', 1): (f, (concatenate_axes, [('x', 1, 0), ('x', 1, 1)], (1,)),\n (concatenate_axes, [('y', 1, 0), ('y', 1, 1)], (1,)))}\n\n Supports Broadcasting rules\n\n >>> make_blockwise_graph(add, 'z', 'ij', 'x', 'ij', 'y', 'ij', numblocks={'x': (1, 2),\n ... 'y': (2, 2)}) # doctest: +SKIP\n {('z', 0, 0): (add, ('x', 0, 0), ('y', 0, 0)),\n ('z', 0, 1): (add, ('x', 0, 1), ('y', 0, 1)),\n ('z', 1, 0): (add, ('x', 0, 0), ('y', 1, 0)),\n ('z', 1, 1): (add, ('x', 0, 1), ('y', 1, 1))}\n\n Support keyword arguments with apply\n\n >>> def f(a, b=0): return a + b\n >>> make_blockwise_graph(f, 'z', 'i', 'x', 'i', numblocks={'x': (2,)}, b=10) # doctest: +SKIP\n {('z', 0): (apply, f, [('x', 0)], {'b': 10}),\n ('z', 1): (apply, f, [('x', 1)], {'b': 10})}\n\n Include literals by indexing with ``None``\n\n >>> make_blockwise_graph(add, 'z', 'i', 'x', 'i', 100, None, numblocks={'x': (2,)}) # doctest: +SKIP\n {('z', 0): (add, ('x', 0), 100),\n ('z', 1): (add, ('x', 1), 100)}\n\n\n See Also\n --------\n dask.array.blockwise\n dask.blockwise.blockwise\n \"\"\"\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_make_blockwise_graph.numblocks_make_blockwise_graph._Create_argument_lists": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_make_blockwise_graph.numblocks_make_blockwise_graph._Create_argument_lists", "embedding": null, "metadata": {"file_path": "dask/blockwise.py", "file_name": "blockwise.py", "file_type": "text/x-python", "category": "implementation", "start_line": 491, "end_line": 579, "span_ids": ["make_blockwise_graph"], "tokens": 790}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def make_blockwise_graph(func, output, out_indices, *arrind_pairs, **kwargs):\n numblocks = kwargs.pop(\"numblocks\")\n concatenate = kwargs.pop(\"concatenate\", None)\n new_axes = kwargs.pop(\"new_axes\", {})\n key_deps = kwargs.pop(\"key_deps\", None)\n non_blockwise_keys = kwargs.pop(\"non_blockwise_keys\", None)\n argpairs = list(toolz.partition(2, arrind_pairs))\n\n if concatenate is True:\n from dask.array.core import concatenate_axes as concatenate\n\n block_names = set()\n all_indices = set()\n for name, ind in argpairs:\n if ind is not None:\n block_names.add(name)\n for x in ind:\n all_indices.add(x)\n assert set(numblocks) == block_names\n\n dummy_indices = all_indices - set(out_indices)\n\n # Dictionary mapping {i: 3, j: 4, ...} for i, j, ... the dimensions\n dims = broadcast_dimensions(argpairs, numblocks)\n for k, v in new_axes.items():\n dims[k] = len(v) if isinstance(v, tuple) else 1\n\n # For each position in the output space, we'll construct a\n # \"coordinate set\" that consists of\n # - the output indices\n # - the dummy indices\n # - the dummy indices, with indices replaced by zeros (for broadcasting), we\n # are careful to only emit a single dummy zero when concatenate=True to not\n # concatenate the same array with itself several times.\n # - a 0 to assist with broadcasting.\n\n index_pos, zero_pos = {}, {}\n for i, ind in enumerate(out_indices):\n index_pos[ind] = i\n zero_pos[ind] = -1\n\n _dummies_list = []\n for i, ind in enumerate(dummy_indices):\n index_pos[ind] = 2 * i + len(out_indices)\n zero_pos[ind] = 2 * i + 1 + len(out_indices)\n reps = 1 if concatenate else dims[ind]\n _dummies_list.append([list(range(dims[ind])), [0] * reps])\n\n # ([0, 1, 2], [0, 0, 0], ...) For a dummy index of dimension 3\n dummies = tuple(itertools.chain.from_iterable(_dummies_list))\n dummies += (0,)\n\n # For each coordinate position in each input, gives the position in\n # the coordinate set.\n coord_maps = []\n\n # Axes along which to concatenate, for each input\n concat_axes = []\n for arg, ind in argpairs:\n if ind is not None:\n coord_maps.append(\n [\n zero_pos[i] if nb == 1 else index_pos[i]\n for i, nb in zip(ind, numblocks[arg])\n ]\n )\n concat_axes.append([n for n, i in enumerate(ind) if i in dummy_indices])\n else:\n coord_maps.append(None)\n concat_axes.append(None)\n\n # Unpack delayed objects in kwargs\n dsk2 = {}\n if kwargs:\n task, dsk2 = unpack_collections(kwargs)\n if dsk2:\n kwargs2 = task\n else:\n kwargs2 = kwargs\n if non_blockwise_keys is not None:\n non_blockwise_keys |= find_all_possible_keys([kwargs2])\n\n # Find all non-blockwise keys in the input arguments\n if non_blockwise_keys is not None:\n for arg, ind in argpairs:\n if ind is None:\n non_blockwise_keys |= find_all_possible_keys([arg])\n\n dsk = {}\n # Create argument lists\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_make_blockwise_graph.for_out_coords_in_itertoo_make_blockwise_graph.return.dsk": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_make_blockwise_graph.for_out_coords_in_itertoo_make_blockwise_graph.return.dsk", "embedding": null, "metadata": {"file_path": "dask/blockwise.py", "file_name": "blockwise.py", "file_type": "text/x-python", "category": "implementation", "start_line": 580, "end_line": 613, "span_ids": ["make_blockwise_graph"], "tokens": 272}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def make_blockwise_graph(func, output, out_indices, *arrind_pairs, **kwargs):\n # ... other code\n for out_coords in itertools.product(*[range(dims[i]) for i in out_indices]):\n deps = set()\n coords = out_coords + dummies\n args = []\n for cmap, axes, (arg, ind) in zip(coord_maps, concat_axes, argpairs):\n if ind is None:\n args.append(arg)\n else:\n arg_coords = tuple(coords[c] for c in cmap)\n if axes:\n tups = lol_product((arg,), arg_coords)\n deps.update(flatten(tups))\n\n if concatenate:\n tups = (concatenate, tups, axes)\n else:\n tups = (arg,) + arg_coords\n deps.add(tups)\n args.append(tups)\n out_key = (output,) + out_coords\n\n if kwargs:\n val = (apply, func, args, kwargs2)\n else:\n args.insert(0, func)\n val = tuple(args)\n dsk[out_key] = val\n\n if key_deps is not None:\n key_deps[out_key] = deps\n if dsk2:\n dsk.update(ensure_dict(dsk2))\n\n return dsk", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_lol_product_lol_product.if_not_values_.else_.return.lol_product_head_value": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_lol_product_lol_product.if_not_values_.else_.return.lol_product_head_value", "embedding": null, "metadata": {"file_path": "dask/blockwise.py", "file_name": "blockwise.py", "file_type": "text/x-python", "category": "implementation", "start_line": 461, "end_line": 486, "span_ids": ["lol_product"], "tokens": 273}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def lol_product(head, values):\n \"\"\"List of list of tuple keys, similar to `itertools.product`.\n\n Parameters\n ----------\n\n head : tuple\n Prefix prepended to all results.\n values : sequence\n Mix of singletons and lists. Each list is substituted with every\n possible value and introduces another level of list in the output.\n Examples\n --------\n\n >>> lol_product(('x',), (1, 2, 3))\n ('x', 1, 2, 3)\n >>> lol_product(('x',), (1, [2, 3], 4, [5, 6])) # doctest: +NORMALIZE_WHITESPACE\n [[('x', 1, 2, 4, 5), ('x', 1, 2, 4, 6)],\n [('x', 1, 3, 4, 5), ('x', 1, 3, 4, 6)]]\n \"\"\"\n if not values:\n return head\n elif isinstance(values[0], list):\n return [lol_product(head + (x,), values[1:]) for x in values[0]]\n else:\n return lol_product(head + (values[0],), values[1:])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_lol_tuples_lol_tuples.if_ind_0_not_in_dummies_.else_.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_lol_tuples_lol_tuples.if_ind_0_not_in_dummies_.else_.return._", "embedding": null, "metadata": {"file_path": "dask/blockwise.py", "file_name": "blockwise.py", "file_type": "text/x-python", "category": "implementation", "start_line": 489, "end_line": 525, "span_ids": ["lol_tuples"], "tokens": 379}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def lol_tuples(head, ind, values, dummies):\n \"\"\"List of list of tuple keys\n\n Parameters\n ----------\n\n head : tuple\n The known tuple so far\n ind : Iterable\n An iterable of indices not yet covered\n values : dict\n Known values for non-dummy indices\n dummies : dict\n Ranges of values for dummy indices\n\n Examples\n --------\n\n >>> lol_tuples(('x',), 'ij', {'i': 1, 'j': 0}, {})\n ('x', 1, 0)\n\n >>> lol_tuples(('x',), 'ij', {'i': 1}, {'j': range(3)})\n [('x', 1, 0), ('x', 1, 1), ('x', 1, 2)]\n\n >>> lol_tuples(('x',), 'ijk', {'i': 1}, {'j': [0, 1, 2], 'k': [0, 1]}) # doctest: +NORMALIZE_WHITESPACE\n [[('x', 1, 0, 0), ('x', 1, 0, 1)],\n [('x', 1, 1, 0), ('x', 1, 1, 1)],\n [('x', 1, 2, 0), ('x', 1, 2, 1)]]\n \"\"\"\n if not ind:\n return head\n if ind[0] not in dummies:\n return lol_tuples(head + (values[ind[0]],), ind[1:], values, dummies)\n else:\n return [\n lol_tuples(head + (v,), ind[1:], values, dummies) for v in dummies[ind[0]]\n ]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_optimize_blockwise_optimize_blockwise.return.out": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_optimize_blockwise_optimize_blockwise.return.out", "embedding": null, "metadata": {"file_path": "dask/blockwise.py", "file_name": "blockwise.py", "file_type": "text/x-python", "category": "implementation", "start_line": 528, "end_line": 573, "span_ids": ["optimize_blockwise"], "tokens": 418}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def optimize_blockwise(graph, keys=()):\n \"\"\"High level optimization of stacked Blockwise layers\n\n For operations that have multiple Blockwise operations one after the other, like\n ``x.T + 123`` we can fuse these into a single Blockwise operation. This happens\n before any actual tasks are generated, and so can reduce overhead.\n\n This finds groups of Blockwise operations that can be safely fused, and then\n passes them to ``rewrite_blockwise`` for rewriting.\n\n Parameters\n ----------\n full_graph: HighLevelGraph\n keys: Iterable\n The keys of all outputs of all collections.\n Used to make sure that we don't fuse a layer needed by an output\n\n Returns\n -------\n HighLevelGraph\n\n See Also\n --------\n rewrite_blockwise\n \"\"\"\n with warnings.catch_warnings():\n # In some cases, rewrite_blockwise (called internally) will do a bad\n # thing like `string in array[int].\n # See dask/array/tests/test_atop.py::test_blockwise_numpy_arg for\n # an example. NumPy currently raises a warning that 'a' == array([1, 2])\n # will change from returning `False` to `array([False, False])`.\n #\n # Users shouldn't see those warnings, so we filter them.\n # We set the filter here, rather than lower down, to avoid having to\n # create and remove the filter many times inside a tight loop.\n\n # https://github.com/dask/dask/pull/4805#discussion_r286545277 explains\n # why silencing this warning shouldn't cause issues.\n warnings.filterwarnings(\n \"ignore\", \"elementwise comparison failed\", Warning\n ) # FutureWarning or DeprecationWarning\n out = _optimize_blockwise(graph, keys=keys)\n while out.dependencies != graph.dependencies:\n graph = out\n out = _optimize_blockwise(graph, keys=keys)\n return out", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py__optimize_blockwise__optimize_blockwise.return.HighLevelGraph_out_depen": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py__optimize_blockwise__optimize_blockwise.return.HighLevelGraph_out_depen", "embedding": null, "metadata": {"file_path": "dask/blockwise.py", "file_name": "blockwise.py", "file_type": "text/x-python", "category": "implementation", "start_line": 731, "end_line": 809, "span_ids": ["_optimize_blockwise"], "tokens": 564}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _optimize_blockwise(full_graph, keys=()):\n keep = {k[0] if type(k) is tuple else k for k in keys}\n layers = full_graph.dicts\n dependents = reverse_dict(full_graph.dependencies)\n roots = {k for k in full_graph.dicts if not dependents.get(k)}\n stack = list(roots)\n\n out = {}\n dependencies = {}\n seen = set()\n io_names = set()\n\n while stack:\n layer = stack.pop()\n if layer in seen or layer not in layers:\n continue\n seen.add(layer)\n\n # Outer loop walks through possible output Blockwise layers\n if isinstance(layers[layer], Blockwise):\n blockwise_layers = {layer}\n deps = set(blockwise_layers)\n io_names.add(layers[layer].io_name)\n while deps: # we gather as many sub-layers as we can\n dep = deps.pop()\n if dep not in layers:\n stack.append(dep)\n continue\n if not isinstance(layers[dep], Blockwise):\n stack.append(dep)\n continue\n if dep != layer and dep in keep:\n stack.append(dep)\n continue\n if layers[dep].concatenate != layers[layer].concatenate:\n stack.append(dep)\n continue\n if (\n sum(k == dep for k, ind in layers[layer].indices if ind is not None)\n > 1\n ):\n stack.append(dep)\n continue\n\n # passed everything, proceed\n blockwise_layers.add(dep)\n\n # traverse further to this child's children\n for d in full_graph.dependencies.get(dep, ()):\n # Don't allow reductions to proceed\n output_indices = set(layers[dep].output_indices)\n input_indices = {\n i for _, ind in layers[dep].indices if ind for i in ind\n }\n\n if len(dependents[d]) <= 1 and output_indices.issuperset(\n input_indices\n ):\n deps.add(d)\n else:\n stack.append(d)\n\n # Merge these Blockwise layers into one\n new_layer = rewrite_blockwise([layers[l] for l in blockwise_layers])\n out[layer] = new_layer\n\n new_deps = set()\n for k, v in new_layer.indices:\n if v is None:\n new_deps |= keys_in_tasks(full_graph.dependencies, [k])\n elif k not in io_names:\n new_deps.add(k)\n dependencies[layer] = new_deps\n else:\n out[layer] = layers[layer]\n dependencies[layer] = full_graph.dependencies.get(layer, set())\n stack.extend(full_graph.dependencies.get(layer, ()))\n\n return HighLevelGraph(out, dependencies)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_rewrite_blockwise_rewrite_blockwise.changed.True": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_rewrite_blockwise_rewrite_blockwise.changed.True", "embedding": null, "metadata": {"file_path": "dask/blockwise.py", "file_name": "blockwise.py", "file_type": "text/x-python", "category": "implementation", "start_line": 812, "end_line": 857, "span_ids": ["rewrite_blockwise"], "tokens": 332}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def rewrite_blockwise(inputs):\n \"\"\"Rewrite a stack of Blockwise expressions into a single blockwise expression\n\n Given a set of Blockwise layers, combine them into a single layer. The provided\n layers are expected to fit well together. That job is handled by\n ``optimize_blockwise``\n\n Parameters\n ----------\n inputs : List[Blockwise]\n\n Returns\n -------\n blockwise: Blockwise\n\n See Also\n --------\n optimize_blockwise\n \"\"\"\n if len(inputs) == 1:\n # Fast path: if there's only one input we can just use it as-is.\n return inputs[0]\n\n inputs = {inp.output: inp for inp in inputs}\n dependencies = {\n inp.output: {d for d, v in inp.indices if v is not None and d in inputs}\n for inp in inputs.values()\n }\n dependents = reverse_dict(dependencies)\n\n new_index_iter = (\n c + (str(d) if d else \"\") # A, B, ... A1, B1, ...\n for d in itertools.count()\n for c in \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n )\n\n [root] = [k for k, v in dependents.items() if not v]\n\n # Our final results. These will change during fusion below\n indices = list(inputs[root].indices)\n new_axes = inputs[root].new_axes\n concatenate = inputs[root].concatenate\n dsk = dict(inputs[root].dsk)\n\n io_info = None\n changed = True\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_rewrite_blockwise.while_changed__rewrite_blockwise.return.out": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_rewrite_blockwise.while_changed__rewrite_blockwise.return.out", "embedding": null, "metadata": {"file_path": "dask/blockwise.py", "file_name": "blockwise.py", "file_type": "text/x-python", "category": "implementation", "start_line": 858, "end_line": 953, "span_ids": ["rewrite_blockwise"], "tokens": 894}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def rewrite_blockwise(inputs):\n # ... other code\n while changed:\n changed = False\n for i, (dep, ind) in enumerate(indices):\n if ind is None:\n continue\n if dep not in inputs:\n continue\n\n changed = True\n\n # Update IO-subgraph information\n if not io_info and inputs[dep].io_name:\n io_info = (inputs[dep].io_name, inputs[dep].io_subgraph)\n\n # Replace _n with dep name in existing tasks\n # (inc, _0) -> (inc, 'b')\n dsk = {k: subs(v, {blockwise_token(i): dep}) for k, v in dsk.items()}\n\n # Remove current input from input indices\n # [('a', 'i'), ('b', 'i')] -> [('a', 'i')]\n _, current_dep_indices = indices.pop(i)\n sub = {\n blockwise_token(i): blockwise_token(i - 1)\n for i in range(i + 1, len(indices) + 1)\n }\n dsk = subs(dsk, sub)\n\n # Change new input_indices to match give index from current computation\n # [('c', j')] -> [('c', 'i')]\n new_indices = inputs[dep].indices\n sub = dict(zip(inputs[dep].output_indices, current_dep_indices))\n contracted = {\n x\n for _, j in new_indices\n if j is not None\n for x in j\n if x not in inputs[dep].output_indices\n }\n extra = dict(zip(contracted, new_index_iter))\n sub.update(extra)\n new_indices = [(x, index_subs(j, sub)) for x, j in new_indices]\n\n # Update new_axes\n for k, v in inputs[dep].new_axes.items():\n new_axes[sub[k]] = v\n\n # Bump new inputs up in list\n sub = {}\n # Map from (id(key), inds or None) -> index in indices. Used to deduplicate indices.\n index_map = {(id(k), inds): n for n, (k, inds) in enumerate(indices)}\n for i, index in enumerate(new_indices):\n id_key = (id(index[0]), index[1])\n if id_key in index_map: # use old inputs if available\n sub[blockwise_token(i)] = blockwise_token(index_map[id_key])\n else:\n index_map[id_key] = len(indices)\n sub[blockwise_token(i)] = blockwise_token(len(indices))\n indices.append(index)\n new_dsk = subs(inputs[dep].dsk, sub)\n\n # indices.extend(new_indices)\n dsk.update(new_dsk)\n\n # De-duplicate indices like [(a, ij), (b, i), (a, ij)] -> [(a, ij), (b, i)]\n # Make sure that we map everything else appropriately as we remove inputs\n new_indices = []\n seen = {}\n sub = {} # like {_0: _0, _1: _0, _2: _1}\n for i, x in enumerate(indices):\n if x[1] is not None and x in seen:\n sub[i] = seen[x]\n else:\n if x[1] is not None:\n seen[x] = len(new_indices)\n sub[i] = len(new_indices)\n new_indices.append(x)\n\n sub = {blockwise_token(k): blockwise_token(v) for k, v in sub.items()}\n dsk = {k: subs(v, sub) for k, v in dsk.items()}\n\n indices_check = {k for k, v in indices if v is not None}\n numblocks = toolz.merge([inp.numblocks for inp in inputs.values()])\n numblocks = {k: v for k, v in numblocks.items() if v is None or k in indices_check}\n\n out = Blockwise(\n root,\n inputs[root].output_indices,\n dsk,\n new_indices,\n numblocks=numblocks,\n new_axes=new_axes,\n concatenate=concatenate,\n io_subgraph=io_info,\n )\n\n return out", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_zero_broadcast_dimensions_zero_broadcast_dimensions.return.homogeneous_deepmap_f_lo": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_zero_broadcast_dimensions_zero_broadcast_dimensions.return.homogeneous_deepmap_f_lo", "embedding": null, "metadata": {"file_path": "dask/blockwise.py", "file_name": "blockwise.py", "file_type": "text/x-python", "category": "implementation", "start_line": 793, "end_line": 812, "span_ids": ["zero_broadcast_dimensions"], "tokens": 307}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def zero_broadcast_dimensions(lol, nblocks):\n \"\"\"\n\n >>> lol = [('x', 1, 0), ('x', 1, 1), ('x', 1, 2)]\n >>> nblocks = (4, 1, 2) # note singleton dimension in second place\n >>> lol = [[('x', 1, 0, 0), ('x', 1, 0, 1)],\n ... [('x', 1, 1, 0), ('x', 1, 1, 1)],\n ... [('x', 1, 2, 0), ('x', 1, 2, 1)]]\n\n >>> zero_broadcast_dimensions(lol, nblocks) # doctest: +NORMALIZE_WHITESPACE\n [[('x', 1, 0, 0), ('x', 1, 0, 1)],\n [('x', 1, 0, 0), ('x', 1, 0, 1)],\n [('x', 1, 0, 0), ('x', 1, 0, 1)]]\n\n See Also\n --------\n lol_tuples\n \"\"\"\n f = lambda t: (t[0],) + tuple(0 if d == 1 else i for i, d in zip(t[1:], nblocks))\n return homogeneous_deepmap(f, lol)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_broadcast_dimensions_broadcast_dimensions.return.toolz_valmap_toolz_first_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_broadcast_dimensions_broadcast_dimensions.return.toolz_valmap_toolz_first_", "embedding": null, "metadata": {"file_path": "dask/blockwise.py", "file_name": "blockwise.py", "file_type": "text/x-python", "category": "implementation", "start_line": 815, "end_line": 872, "span_ids": ["broadcast_dimensions"], "tokens": 554}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def broadcast_dimensions(argpairs, numblocks, sentinels=(1, (1,)), consolidate=None):\n \"\"\"Find block dimensions from arguments\n\n Parameters\n ----------\n argpairs: iterable\n name, ijk index pairs\n numblocks: dict\n maps {name: number of blocks}\n sentinels: iterable (optional)\n values for singleton dimensions\n consolidate: func (optional)\n use this to reduce each set of common blocks into a smaller set\n\n Examples\n --------\n >>> argpairs = [('x', 'ij'), ('y', 'ji')]\n >>> numblocks = {'x': (2, 3), 'y': (3, 2)}\n >>> broadcast_dimensions(argpairs, numblocks)\n {'i': 2, 'j': 3}\n\n Supports numpy broadcasting rules\n\n >>> argpairs = [('x', 'ij'), ('y', 'ij')]\n >>> numblocks = {'x': (2, 1), 'y': (1, 3)}\n >>> broadcast_dimensions(argpairs, numblocks)\n {'i': 2, 'j': 3}\n\n Works in other contexts too\n\n >>> argpairs = [('x', 'ij'), ('y', 'ij')]\n >>> d = {'x': ('Hello', 1), 'y': (1, (2, 3))}\n >>> broadcast_dimensions(argpairs, d)\n {'i': 'Hello', 'j': (2, 3)}\n \"\"\"\n # List like [('i', 2), ('j', 1), ('i', 1), ('j', 2)]\n argpairs2 = [(a, ind) for a, ind in argpairs if ind is not None]\n L = toolz.concat(\n [\n zip(inds, dims)\n for (x, inds), (x, dims) in toolz.join(\n toolz.first, argpairs2, toolz.first, numblocks.items()\n )\n ]\n )\n\n g = toolz.groupby(0, L)\n g = dict((k, set([d for i, d in v])) for k, v in g.items())\n\n g2 = dict((k, v - set(sentinels) if len(v) > 1 else v) for k, v in g.items())\n\n if consolidate:\n return toolz.valmap(consolidate, g2)\n\n if g2 and not set(map(len, g2.values())) == set([1]):\n raise ValueError(\"Shapes do not align %s\" % g)\n\n return toolz.valmap(toolz.first, g2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_fuse_roots_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_fuse_roots_", "embedding": null, "metadata": {"file_path": "dask/blockwise.py", "file_name": "blockwise.py", "file_type": "text/x-python", "category": "implementation", "start_line": 875, "end_line": 930, "span_ids": ["fuse_roots"], "tokens": 396}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def fuse_roots(graph: HighLevelGraph, keys: list):\n \"\"\"\n Fuse nearby layers if they don't have dependencies\n\n Often Blockwise sections of the graph fill out all of the computation\n except for the initial data access or data loading layers::\n\n Large Blockwise Layer\n | | |\n X Y Z\n\n This can be troublesome because X, Y, and Z tasks may be executed on\n different machines, and then require communication to move around.\n\n This optimization identifies this situation, lowers all of the graphs to\n concrete dicts, and then calls ``fuse`` on them, with a width equal to the\n number of layers like X, Y, and Z.\n\n This is currently used within array and dataframe optimizations.\n\n Parameters\n ----------\n graph: HighLevelGraph\n The full graph of the computation\n keys: list\n The output keys of the computation, to be passed on to fuse\n\n See Also\n --------\n Blockwise\n fuse\n \"\"\"\n layers = graph.layers.copy()\n dependencies = graph.dependencies.copy()\n dependents = reverse_dict(dependencies)\n\n for name, layer in graph.layers.items():\n deps = graph.dependencies[name]\n if (\n isinstance(layer, Blockwise)\n and len(deps) > 1\n and not any(dependencies[dep] for dep in deps) # no need to fuse if 0 or 1\n and all(len(dependents[dep]) == 1 for dep in deps)\n ):\n new = toolz.merge(layer, *[layers[dep] for dep in deps])\n new, _ = fuse(new, keys, ave_width=len(deps))\n\n for dep in deps:\n del layers[dep]\n del dependencies[dep]\n\n layers[name] = new\n dependencies[name] = set()\n\n return HighLevelGraph(layers, dependencies)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/__init__.py_from_distutils_version_im_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/__init__.py_from_distutils_version_im_", "embedding": null, "metadata": {"file_path": "dask/bytes/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 18, "span_ids": ["imports"], "tokens": 138}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from distutils.version import LooseVersion\n\ntry:\n import fsspec\nexcept ImportError as e:\n fsspec = None\n\nif fsspec is None or LooseVersion(fsspec.__version__) < LooseVersion(\"0.3.3\"):\n raise ImportError(\n \"fsspec is required to use any file-system functionality.\"\n \" Please install using\\n\"\n \"conda install -c conda-forge 'fsspec>=0.3.3'\\n\"\n \"or\\n\"\n \"python -m pip install 'fsspec>=0.3.3'\"\n )\n\nfrom .core import read_bytes, open_file, open_files", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/_compatibility.py__": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/_compatibility.py__", "embedding": null, "metadata": {"file_path": "dask/bytes/_compatibility.py", "file_name": "_compatibility.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 6, "span_ids": ["imports"], "tokens": 44}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from distutils.version import LooseVersion\nimport fsspec\n\nFSSPEC_VERSION = LooseVersion(fsspec.__version__)\nFSSPEC_042 = FSSPEC_VERSION > \"0.4.1\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/core.py_os_is_integer": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/core.py_os_is_integer", "embedding": null, "metadata": {"file_path": "dask/bytes/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 25, "span_ids": ["imports"], "tokens": 234}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import os\nimport copy\n\nfrom fsspec.core import ( # noqa: F401\n OpenFile, # noqa: F401\n open_files, # noqa: F401\n get_fs_token_paths, # noqa: F401\n expand_paths_if_needed, # noqa: F401\n _expand_paths, # noqa: F401\n get_compression, # noqa: F401\n)\nfrom fsspec.core import open as open_file # noqa: F401\nfrom fsspec.utils import ( # noqa: F401\n read_block, # noqa: F401\n seek_delimiter, # noqa: F401\n infer_storage_options, # noqa: F401\n stringify_path, # noqa: F401\n infer_compression, # noqa: F401\n)\nfrom fsspec import get_mapper # noqa: F401\nfrom fsspec.compression import compr # noqa: F401\n\nfrom ..base import tokenize\nfrom ..delayed import delayed\nfrom ..utils import is_integer, parse_bytes", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/core.py_read_bytes_read_bytes.if_blocksize_is_not_None_.blocksize.int_blocksize_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/core.py_read_bytes_read_bytes.if_blocksize_is_not_None_.blocksize.int_blocksize_", "embedding": null, "metadata": {"file_path": "dask/bytes/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 28, "end_line": 107, "span_ids": ["read_bytes"], "tokens": 764}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def read_bytes(\n urlpath,\n delimiter=None,\n not_zero=False,\n blocksize=\"128 MiB\",\n sample=\"10 kiB\",\n compression=None,\n include_path=False,\n **kwargs\n):\n \"\"\"Given a path or paths, return delayed objects that read from those paths.\n\n The path may be a filename like ``'2015-01-01.csv'`` or a globstring\n like ``'2015-*-*.csv'``.\n\n The path may be preceded by a protocol, like ``s3://`` or ``hdfs://`` if\n those libraries are installed.\n\n This cleanly breaks data by a delimiter if given, so that block boundaries\n start directly after a delimiter and end on the delimiter.\n\n Parameters\n ----------\n urlpath : string or list\n Absolute or relative filepath(s). Prefix with a protocol like ``s3://``\n to read from alternative filesystems. To read from multiple files you\n can pass a globstring or a list of paths, with the caveat that they\n must all have the same protocol.\n delimiter : bytes\n An optional delimiter, like ``b'\\\\n'`` on which to split blocks of\n bytes.\n not_zero : bool\n Force seek of start-of-file delimiter, discarding header.\n blocksize : int, str\n Chunk size in bytes, defaults to \"128 MiB\"\n compression : string or None\n String like 'gzip' or 'xz'. Must support efficient random access.\n sample : int, string, or boolean\n Whether or not to return a header sample.\n Values can be ``False`` for \"no sample requested\"\n Or an integer or string value like ``2**20`` or ``\"1 MiB\"``\n include_path : bool\n Whether or not to include the path with the bytes representing a particular file.\n Default is False.\n **kwargs : dict\n Extra options that make sense to a particular storage connection, e.g.\n host, port, username, password, etc.\n\n Examples\n --------\n >>> sample, blocks = read_bytes('2015-*-*.csv', delimiter=b'\\\\n') # doctest: +SKIP\n >>> sample, blocks = read_bytes('s3://bucket/2015-*-*.csv', delimiter=b'\\\\n') # doctest: +SKIP\n >>> sample, paths, blocks = read_bytes('2015-*-*.csv', include_path=True) # doctest: +SKIP\n\n Returns\n -------\n sample : bytes\n The sample header\n blocks : list of lists of ``dask.Delayed``\n Each list corresponds to a file, and each delayed object computes to a\n block of bytes from that file.\n paths : list of strings, only included if include_path is True\n List of same length as blocks, where each item is the path to the file\n represented in the corresponding block.\n\n \"\"\"\n if not isinstance(urlpath, (str, list, tuple, os.PathLike)):\n raise TypeError(\"Path should be a string, os.PathLike, list or tuple\")\n\n fs, fs_token, paths = get_fs_token_paths(urlpath, mode=\"rb\", storage_options=kwargs)\n\n if len(paths) == 0:\n raise IOError(\"%s resolved to no files\" % urlpath)\n\n if blocksize is not None:\n if isinstance(blocksize, str):\n blocksize = parse_bytes(blocksize)\n if not is_integer(blocksize):\n raise TypeError(\"blocksize must be an integer\")\n blocksize = int(blocksize)\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/core.py_read_bytes.if_blocksize_is_None__": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/core.py_read_bytes.if_blocksize_is_None__", "embedding": null, "metadata": {"file_path": "dask/bytes/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 109, "end_line": 189, "span_ids": ["read_bytes", "read_block_from_file"], "tokens": 620}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def read_bytes(\n urlpath,\n delimiter=None,\n not_zero=False,\n blocksize=\"128 MiB\",\n sample=\"10 kiB\",\n compression=None,\n include_path=False,\n **kwargs\n):\n # ... other code\n\n if blocksize is None:\n offsets = [[0]] * len(paths)\n lengths = [[None]] * len(paths)\n else:\n offsets = []\n lengths = []\n for path in paths:\n if compression == \"infer\":\n comp = infer_compression(path)\n else:\n comp = compression\n if comp is not None:\n raise ValueError(\n \"Cannot do chunked reads on compressed files. \"\n \"To read, set blocksize=None\"\n )\n size = fs.info(path)[\"size\"]\n if size is None:\n raise ValueError(\n \"Backing filesystem couldn't determine file size, cannot \"\n \"do chunked reads. To read, set blocksize=None.\"\n )\n off = list(range(0, size, blocksize))\n length = [blocksize] * len(off)\n if not_zero:\n off[0] = 1\n length[0] -= 1\n offsets.append(off)\n lengths.append(length)\n\n delayed_read = delayed(read_block_from_file)\n\n out = []\n for path, offset, length in zip(paths, offsets, lengths):\n token = tokenize(fs_token, delimiter, path, fs.ukey(path), compression, offset)\n keys = [\"read-block-%s-%s\" % (o, token) for o in offset]\n values = [\n delayed_read(\n OpenFile(fs, path, compression=compression),\n o,\n l,\n delimiter,\n dask_key_name=key,\n )\n for o, key, l in zip(offset, keys, length)\n ]\n out.append(values)\n\n if sample:\n if sample is True:\n sample = \"10 kiB\" # backwards compatibility\n if isinstance(sample, str):\n sample = parse_bytes(sample)\n with OpenFile(fs, paths[0], compression=compression) as f:\n # read block without seek (because we start at zero)\n if delimiter is None:\n sample = f.read(sample)\n else:\n sample_buff = f.read(sample)\n while True:\n new = f.read(sample)\n if not new:\n break\n if delimiter in new:\n sample_buff = (\n sample_buff + new.split(delimiter, 1)[0] + delimiter\n )\n break\n sample_buff = sample_buff + new\n sample = sample_buff\n if include_path:\n return sample, out, paths\n return sample, out\n\n\ndef read_block_from_file(lazy_file, off, bs, delimiter):\n with copy.copy(lazy_file) as f:\n if off == 0 and bs is None:\n return f.read()\n return read_block(f, off, bs, delimiter)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_bytes_utils.py_io_test_read_block.for_ols_in_0_3_3_.assert_b_join_filter_No": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_bytes_utils.py_io_test_read_block.for_ols_in_0_3_3_.assert_b_join_filter_No", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_bytes_utils.py", "file_name": "test_bytes_utils.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 33, "span_ids": ["imports", "test_read_block"], "tokens": 391}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import io\nimport os\nimport pathlib\n\nimport pytest\n\nfrom dask.bytes.core import (\n read_block,\n seek_delimiter,\n infer_storage_options,\n stringify_path,\n)\n\n\ndef test_read_block():\n delimiter = b\"\\n\"\n data = delimiter.join([b\"123\", b\"456\", b\"789\"])\n f = io.BytesIO(data)\n\n assert read_block(f, 1, 2) == b\"23\"\n assert read_block(f, 0, 1, delimiter=b\"\\n\") == b\"123\\n\"\n assert read_block(f, 0, 2, delimiter=b\"\\n\") == b\"123\\n\"\n assert read_block(f, 0, 3, delimiter=b\"\\n\") == b\"123\\n\"\n assert read_block(f, 0, 5, delimiter=b\"\\n\") == b\"123\\n456\\n\"\n assert read_block(f, 0, 8, delimiter=b\"\\n\") == b\"123\\n456\\n789\"\n assert read_block(f, 0, 100, delimiter=b\"\\n\") == b\"123\\n456\\n789\"\n assert read_block(f, 1, 1, delimiter=b\"\\n\") == b\"\"\n assert read_block(f, 1, 5, delimiter=b\"\\n\") == b\"456\\n\"\n assert read_block(f, 1, 8, delimiter=b\"\\n\") == b\"456\\n789\"\n\n for ols in [[(0, 3), (3, 3), (6, 3), (9, 2)], [(0, 4), (4, 4), (8, 4)]]:\n out = [read_block(f, o, l, b\"\\n\") for o, l in ols]\n assert b\"\".join(filter(None, out)) == data", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_bytes_utils.py_test_seek_delimiter_endline_test_seek_delimiter_endline.assert_f_tell_7": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_bytes_utils.py_test_seek_delimiter_endline_test_seek_delimiter_endline.assert_f_tell_7", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_bytes_utils.py", "file_name": "test_bytes_utils.py", "file_type": "text/x-python", "category": "test", "start_line": 36, "end_line": 60, "span_ids": ["test_seek_delimiter_endline"], "tokens": 233}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_seek_delimiter_endline():\n f = io.BytesIO(b\"123\\n456\\n789\")\n\n # if at zero, stay at zero\n seek_delimiter(f, b\"\\n\", 5)\n assert f.tell() == 0\n\n # choose the first block\n for bs in [1, 5, 100]:\n f.seek(1)\n seek_delimiter(f, b\"\\n\", blocksize=bs)\n assert f.tell() == 4\n\n # handle long delimiters well, even with short blocksizes\n f = io.BytesIO(b\"123abc456abc789\")\n for bs in [1, 2, 3, 4, 5, 6, 10]:\n f.seek(1)\n seek_delimiter(f, b\"abc\", blocksize=bs)\n assert f.tell() == 6\n\n # End at the end\n f = io.BytesIO(b\"123\\n456\")\n f.seek(5)\n seek_delimiter(f, b\"\\n\", 5)\n assert f.tell() == 7", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_bytes_utils.py_test_infer_storage_options_test_infer_storage_options.None_2.infer_storage_options_hd": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_bytes_utils.py_test_infer_storage_options_test_infer_storage_options.None_2.infer_storage_options_hd", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_bytes_utils.py", "file_name": "test_bytes_utils.py", "file_type": "text/x-python", "category": "test", "start_line": 63, "end_line": 115, "span_ids": ["test_infer_storage_options"], "tokens": 599}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_infer_storage_options():\n so = infer_storage_options(\"/mnt/datasets/test.csv\")\n assert so.pop(\"protocol\") == \"file\"\n assert so.pop(\"path\") == \"/mnt/datasets/test.csv\"\n assert not so\n\n assert infer_storage_options(\"./test.csv\")[\"path\"] == \"./test.csv\"\n assert infer_storage_options(\"../test.csv\")[\"path\"] == \"../test.csv\"\n\n so = infer_storage_options(\"C:\\\\test.csv\")\n assert so.pop(\"protocol\") == \"file\"\n assert so.pop(\"path\") == \"C:\\\\test.csv\"\n assert not so\n\n assert infer_storage_options(\"d:\\\\test.csv\")[\"path\"] == \"d:\\\\test.csv\"\n assert infer_storage_options(\"\\\\test.csv\")[\"path\"] == \"\\\\test.csv\"\n assert infer_storage_options(\".\\\\test.csv\")[\"path\"] == \".\\\\test.csv\"\n assert infer_storage_options(\"test.csv\")[\"path\"] == \"test.csv\"\n\n so = infer_storage_options(\n \"hdfs://username:pwd@Node:123/mnt/datasets/test.csv?q=1#fragm\",\n inherit_storage_options={\"extra\": \"value\"},\n )\n assert so.pop(\"protocol\") == \"hdfs\"\n assert so.pop(\"username\") == \"username\"\n assert so.pop(\"password\") == \"pwd\"\n assert so.pop(\"host\") == \"Node\"\n assert so.pop(\"port\") == 123\n assert so.pop(\"path\") == \"/mnt/datasets/test.csv#fragm\"\n assert so.pop(\"url_query\") == \"q=1\"\n assert so.pop(\"url_fragment\") == \"fragm\"\n assert so.pop(\"extra\") == \"value\"\n assert not so\n\n so = infer_storage_options(\"hdfs://User-name@Node-name.com/mnt/datasets/test.csv\")\n assert so.pop(\"username\") == \"User-name\"\n assert so.pop(\"host\") == \"Node-name.com\"\n\n u = \"http://127.0.0.1:8080/test.csv\"\n assert infer_storage_options(u) == {\"protocol\": \"http\", \"path\": u}\n\n # For s3 and gcs the netloc is actually the bucket name, so we want to\n # include it in the path. Test that:\n # - Parsing doesn't lowercase the bucket\n # - The bucket is included in path\n for protocol in [\"s3\", \"gcs\", \"gs\"]:\n options = infer_storage_options(\"%s://Bucket-name.com/test.csv\" % protocol)\n assert options[\"path\"] == \"Bucket-name.com/test.csv\"\n\n with pytest.raises(KeyError):\n infer_storage_options(\"file:///bucket/file.csv\", {\"path\": \"collide\"})\n with pytest.raises(KeyError):\n infer_storage_options(\"hdfs:///bucket/file.csv\", {\"protocol\": \"collide\"})", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_bytes_utils.py_test_infer_storage_options_c_test_infer_storage_options_c.assert_so_path_expe": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_bytes_utils.py_test_infer_storage_options_c_test_infer_storage_options_c.assert_so_path_expe", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_bytes_utils.py", "file_name": "test_bytes_utils.py", "file_type": "text/x-python", "category": "test", "start_line": 118, "end_line": 132, "span_ids": ["test_infer_storage_options_c"], "tokens": 158}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"urlpath, expected_path\",\n (\n (r\"c:\\foo\\bar\", r\"c:\\foo\\bar\"),\n (r\"C:\\\\foo\\bar\", r\"C:\\\\foo\\bar\"),\n (r\"c:/foo/bar\", r\"c:/foo/bar\"),\n (r\"file:///c|\\foo\\bar\", r\"c:\\foo\\bar\"),\n (r\"file:///C|/foo/bar\", r\"C:/foo/bar\"),\n (r\"file:///C:/foo/bar\", r\"C:/foo/bar\"),\n ),\n)\ndef test_infer_storage_options_c(urlpath, expected_path):\n so = infer_storage_options(urlpath)\n assert so[\"protocol\"] == \"file\"\n assert so[\"path\"] == expected_path", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_bytes_utils.py_test_stringify_path_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_bytes_utils.py_test_stringify_path_", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_bytes_utils.py", "file_name": "test_bytes_utils.py", "file_type": "text/x-python", "category": "test", "start_line": 135, "end_line": 158, "span_ids": ["test_stringify_path"], "tokens": 145}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_stringify_path():\n test_filepath = os.path.join(\"path\", \"to\", \"file.txt\")\n\n # Pathlib.path\n path = pathlib.Path(test_filepath)\n assert stringify_path(path) == test_filepath\n\n # fspath protocol\n class CustomFSPath:\n \"\"\"For testing fspath on unknown objects\"\"\"\n\n def __init__(self, path):\n self.path = path\n\n def __fspath__(self):\n return self.path\n\n path = CustomFSPath(test_filepath)\n assert stringify_path(path) == test_filepath\n\n # Non path-like input is unaffected\n path = (1, 2, 3)\n assert stringify_path(path) is path", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_compression.py_from_io_import_BytesIO_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_compression.py_from_io_import_BytesIO_", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_compression.py", "file_name": "test_compression.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 23, "span_ids": ["imports", "test_files"], "tokens": 126}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from io import BytesIO\n\nimport pytest\nfrom fsspec.compression import compr\n\nfrom dask.bytes.utils import compress\n\n\n@pytest.mark.parametrize(\"fmt,File\", compr.items())\ndef test_files(fmt, File):\n if fmt not in compress:\n pytest.skip(\"compression function not provided\")\n if fmt is None:\n return\n data = b\"1234\" * 1000\n compressed = compress[fmt](data)\n\n b = BytesIO(compressed)\n g = File(b, mode=\"rb\")\n data2 = g.read()\n g.close()\n assert data == data2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_os_require_pyarrow.pytest_mark_skipif_not_py": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_os_require_pyarrow.pytest_mark_skipif_not_py", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_hdfs.py", "file_name": "test_hdfs.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 46, "span_ids": ["imports", "impl:14", "hdfs"], "tokens": 245}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import os\nimport posixpath\n\nimport pytest\nfrom tlz import concat\n\nimport dask\nfrom dask.bytes.core import read_bytes, open_files, get_fs_token_paths\n\n\ntry:\n import distributed\n from distributed import Client\n from distributed.utils_test import cluster, loop # noqa: F401\nexcept (ImportError, SyntaxError):\n distributed = None\n\ntry:\n import pyarrow\nexcept ImportError:\n pyarrow = None\n\n\nif not os.environ.get(\"DASK_RUN_HDFS_TESTS\", \"\"):\n pytestmark = pytest.mark.skip(reason=\"HDFS tests not configured to run\")\n\n\nbasedir = \"/tmp/test-dask\"\n\n\n@pytest.fixture\ndef hdfs(request):\n hdfs = pyarrow.hdfs.connect(host=\"localhost\", port=8020)\n\n if hdfs.exists(basedir):\n hdfs.rm(basedir, recursive=True)\n hdfs.mkdir(basedir)\n\n yield hdfs\n\n if hdfs.exists(basedir):\n hdfs.rm(basedir, recursive=True)\n\n\n# This mark doesn't check the minimum pyarrow version.\nrequire_pyarrow = pytest.mark.skipif(not pyarrow, reason=\"pyarrow not installed\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_read_bytes_test_read_bytes.assert_b_join_r_for_r": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_read_bytes_test_read_bytes.assert_b_join_r_for_r", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_hdfs.py", "file_name": "test_hdfs.py", "file_type": "text/x-python", "category": "test", "start_line": 49, "end_line": 61, "span_ids": ["test_read_bytes"], "tokens": 124}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_bytes(hdfs):\n nfiles = 10\n\n data = b\"a\" * int(1e3)\n\n for fn in [\"%s/file.%d\" % (basedir, i) for i in range(nfiles)]:\n with hdfs.open(fn, \"wb\", replication=1) as f:\n f.write(data)\n\n sample, values = read_bytes(\"hdfs://%s/file.*\" % basedir)\n\n (results,) = dask.compute(values)\n assert [b\"\".join(r) for r in results] == nfiles * [data]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_read_bytes_URL_test_read_bytes_URL.assert_b_join_r_for_r": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_read_bytes_URL_test_read_bytes_URL.assert_b_join_r_for_r", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_hdfs.py", "file_name": "test_hdfs.py", "file_type": "text/x-python", "category": "test", "start_line": 64, "end_line": 76, "span_ids": ["test_read_bytes_URL"], "tokens": 134}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_bytes_URL(hdfs):\n nfiles = 10\n data = b\"a\" * int(1e3)\n\n for fn in [\"%s/file.%d\" % (basedir, i) for i in range(nfiles)]:\n with hdfs.open(fn, \"wb\", replication=1) as f:\n f.write(data)\n\n path = \"hdfs://localhost:8020%s/file.*\" % basedir\n sample, values = read_bytes(path)\n\n (results,) = dask.compute(values)\n assert [b\"\".join(r) for r in results] == nfiles * [data]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_read_bytes_big_file_test_read_bytes_big_file.for_r_in_results_.assert_set_r_decode_utf_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_read_bytes_big_file_test_read_bytes_big_file.for_r_in_results_.assert_set_r_decode_utf_", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_hdfs.py", "file_name": "test_hdfs.py", "file_type": "text/x-python", "category": "test", "start_line": 79, "end_line": 98, "span_ids": ["test_read_bytes_big_file"], "tokens": 178}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_bytes_big_file(hdfs):\n fn = \"%s/file\" % basedir\n\n # Write 100 MB file\n nblocks = int(1e3)\n blocksize = int(1e5)\n data = b\"a\" * blocksize\n with hdfs.open(fn, \"wb\", replication=1) as f:\n for i in range(nblocks):\n f.write(data)\n\n sample, values = read_bytes(\"hdfs://\" + fn, blocksize=blocksize)\n\n assert sample[:5] == b\"aaaaa\"\n assert len(values[0]) == nblocks\n\n (results,) = dask.compute(values[0])\n assert sum(map(len, results)) == nblocks * blocksize\n for r in results:\n assert set(r.decode(\"utf-8\")) == {\"a\"}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_deterministic_key_names_test_deterministic_key_names.None_4": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_deterministic_key_names_test_deterministic_key_names.None_4", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_hdfs.py", "file_name": "test_hdfs.py", "file_type": "text/x-python", "category": "test", "start_line": 101, "end_line": 113, "span_ids": ["test_deterministic_key_names"], "tokens": 174}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_deterministic_key_names(hdfs):\n data = b\"abc\\n\" * int(1e3)\n fn = \"%s/file\" % basedir\n\n with hdfs.open(fn, \"wb\", replication=1) as fil:\n fil.write(data)\n\n _, x = read_bytes(\"hdfs://%s/*\" % basedir, delimiter=b\"\\n\", sample=False)\n _, y = read_bytes(\"hdfs://%s/*\" % basedir, delimiter=b\"\\n\", sample=False)\n _, z = read_bytes(\"hdfs://%s/*\" % basedir, delimiter=b\"c\", sample=False)\n\n assert [f.key for f in concat(x)] == [f.key for f in concat(y)]\n assert [f.key for f in concat(x)] != [f.key for f in concat(z)]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_open_files_write_test_open_files_write.assert_data_results": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_open_files_write_test_open_files_write.assert_data_results", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_hdfs.py", "file_name": "test_hdfs.py", "file_type": "text/x-python", "category": "test", "start_line": 116, "end_line": 128, "span_ids": ["test_open_files_write"], "tokens": 114}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_open_files_write(hdfs):\n path = \"hdfs://%s/\" % basedir\n data = [b\"test data %i\" % i for i in range(5)]\n\n files = open_files(path, num=len(data), mode=\"wb\")\n for fil, b in zip(files, data):\n with fil as f:\n f.write(b)\n\n sample, vals = read_bytes(\"hdfs://%s/*.part\" % basedir)\n\n (results,) = dask.compute(list(concat(vals)))\n assert data == results", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_read_csv_test_read_csv.assert_df_id_sum_comput": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_read_csv_test_read_csv.assert_df_id_sum_comput", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_hdfs.py", "file_name": "test_hdfs.py", "file_type": "text/x-python", "category": "test", "start_line": 131, "end_line": 143, "span_ids": ["test_read_csv"], "tokens": 151}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_csv(hdfs):\n dd = pytest.importorskip(\"dask.dataframe\")\n\n with hdfs.open(\"%s/1.csv\" % basedir, \"wb\") as f:\n f.write(b\"name,amount,id\\nAlice,100,1\\nBob,200,2\")\n\n with hdfs.open(\"%s/2.csv\" % basedir, \"wb\") as f:\n f.write(b\"name,amount,id\\nCharlie,300,3\\nDennis,400,4\")\n\n df = dd.read_csv(\"hdfs://%s/*.csv\" % basedir)\n\n assert isinstance(df, dd.DataFrame)\n assert df.id.sum().compute() == 1 + 2 + 3 + 4", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_read_text_test_read_text.with_pool_.assert_result_a_b": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_read_text_test_read_text.with_pool_.assert_result_a_b", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_hdfs.py", "file_name": "test_hdfs.py", "file_type": "text/x-python", "category": "test", "start_line": 146, "end_line": 172, "span_ids": ["test_read_text"], "tokens": 280}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_text(hdfs):\n db = pytest.importorskip(\"dask.bag\")\n import multiprocessing as mp\n\n pool = mp.get_context(\"spawn\").Pool(2)\n\n with pool:\n with hdfs.open(\"%s/text.1.txt\" % basedir, \"wb\") as f:\n f.write(\"Alice 100\\nBob 200\\nCharlie 300\".encode())\n\n with hdfs.open(\"%s/text.2.txt\" % basedir, \"wb\") as f:\n f.write(\"Dan 400\\nEdith 500\\nFrank 600\".encode())\n\n with hdfs.open(\"%s/other.txt\" % basedir, \"wb\") as f:\n f.write(\"a b\\nc d\".encode())\n\n b = db.read_text(\"hdfs://%s/text.*.txt\" % basedir)\n with dask.config.set(pool=pool):\n result = b.str.strip().str.split().map(len).compute()\n\n assert result == [2, 2, 2, 2, 2, 2]\n\n b = db.read_text(\"hdfs://%s/other.txt\" % basedir)\n with dask.config.set(pool=pool):\n result = b.str.split().flatten().compute()\n\n assert result == [\"a\", \"b\", \"c\", \"d\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_read_text_unicode_test_read_text_unicode.assert_len_result_0_stri": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_read_text_unicode_test_read_text_unicode.assert_len_result_0_stri", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_hdfs.py", "file_name": "test_hdfs.py", "file_type": "text/x-python", "category": "test", "start_line": 175, "end_line": 188, "span_ids": ["test_read_text_unicode"], "tokens": 138}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_text_unicode(hdfs):\n db = pytest.importorskip(\"dask.bag\")\n\n data = b\"abcd\\xc3\\xa9\"\n fn = \"%s/data.txt\" % basedir\n with hdfs.open(fn, \"wb\") as f:\n f.write(b\"\\n\".join([data, data]))\n\n f = db.read_text(\"hdfs://\" + fn, collection=False)\n\n result = f[0].compute()\n assert len(result) == 2\n assert list(map(str.strip, result)) == [data.decode(\"utf-8\")] * 2\n assert len(result[0].strip()) == 5", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_parquet_pyarrow_test_parquet_pyarrow._smoke_test_on_read": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_parquet_pyarrow_test_parquet_pyarrow._smoke_test_on_read", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_hdfs.py", "file_name": "test_hdfs.py", "file_type": "text/x-python", "category": "test", "start_line": 191, "end_line": 208, "span_ids": ["test_parquet_pyarrow"], "tokens": 167}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@require_pyarrow\ndef test_parquet_pyarrow(hdfs):\n dd = pytest.importorskip(\"dask.dataframe\")\n import pandas as pd\n import numpy as np\n\n fn = \"%s/test.parquet\" % basedir\n hdfs_fn = \"hdfs://%s\" % fn\n df = pd.DataFrame(np.random.normal(size=(1000, 4)), columns=list(\"abcd\"))\n ddf = dd.from_pandas(df, npartitions=4)\n\n ddf.to_parquet(hdfs_fn, engine=\"pyarrow\")\n\n assert len(hdfs.ls(fn)) # Files are written\n\n ddf2 = dd.read_parquet(hdfs_fn, engine=\"pyarrow\")\n\n assert len(ddf2) == 1000 # smoke test on read", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_glob_test_glob.None_10": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_glob_test_glob.None_10", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_hdfs.py", "file_name": "test_hdfs.py", "file_type": "text/x-python", "category": "test", "start_line": 211, "end_line": 254, "span_ids": ["test_glob"], "tokens": 493}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_glob(hdfs):\n\n tree = {\n basedir: ([\"c\", \"c2\"], [\"a\", \"a1\", \"a2\", \"a3\", \"b1\"]),\n basedir + \"/c\": ([\"d\"], [\"x1\", \"x2\"]),\n basedir + \"/c2\": ([\"d\"], [\"x1\", \"x2\"]),\n basedir + \"/c/d\": ([], [\"x3\"]),\n }\n\n hdfs, _, _ = get_fs_token_paths(\"hdfs:///\")\n hdfs.makedirs(basedir + \"/c/d\")\n hdfs.makedirs(basedir + \"/c2/d/\")\n for fn in (\n posixpath.join(dirname, f)\n for (dirname, (_, fils)) in tree.items()\n for f in fils\n ):\n with hdfs.open(fn, mode=\"wb\") as f2:\n f2.write(b\"000\")\n\n assert set(hdfs.glob(basedir + \"/a*\")) == {\n basedir + p for p in [\"/a\", \"/a1\", \"/a2\", \"/a3\"]\n }\n\n assert set(hdfs.glob(basedir + \"/c/*\")) == {\n basedir + p for p in [\"/c/x1\", \"/c/x2\", \"/c/d\"]\n }\n\n assert set(hdfs.glob(basedir + \"/*/x*\")) == {\n basedir + p for p in [\"/c/x1\", \"/c/x2\", \"/c2/x1\", \"/c2/x2\"]\n }\n assert set(hdfs.glob(basedir + \"/*/x1\")) == {\n basedir + p for p in [\"/c/x1\", \"/c2/x1\"]\n }\n\n assert hdfs.find(\"/this-path-doesnt-exist\") == []\n assert hdfs.find(basedir + \"/missing/\") == []\n assert hdfs.find(basedir + \"/missing/x1\") == []\n assert hdfs.glob(basedir + \"/missing/*\") == []\n assert hdfs.glob(basedir + \"/*/missing\") == []\n\n assert set(hdfs.glob(basedir + \"/*\")) == {\n basedir + p for p in [\"/a\", \"/a1\", \"/a2\", \"/a3\", \"/b1\", \"/c\", \"/c2\"]\n }", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_distributed_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_distributed_", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_hdfs.py", "file_name": "test_hdfs.py", "file_type": "text/x-python", "category": "test", "start_line": 257, "end_line": 273, "span_ids": ["test_distributed"], "tokens": 217}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n not distributed, reason=\"Skipped as distributed is not installed.\" # noqa: F811\n) # noqa: F811\ndef test_distributed(hdfs, loop): # noqa: F811\n dd = pytest.importorskip(\"dask.dataframe\")\n\n with hdfs.open(\"%s/1.csv\" % basedir, \"wb\") as f:\n f.write(b\"name,amount,id\\nAlice,100,1\\nBob,200,2\")\n\n with hdfs.open(\"%s/2.csv\" % basedir, \"wb\") as f:\n f.write(b\"name,amount,id\\nCharlie,300,3\\nDennis,400,4\")\n\n with cluster() as (s, [a, b]):\n with Client(s[\"address\"], loop=loop): # noqa: F811\n df = dd.read_csv(\"hdfs://%s/*.csv\" % basedir)\n assert df.id.sum().compute() == 1 + 2 + 3 + 4", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_http.py_os_if_LooseVersion_fsspec___.errs.errs_aiohttp_client_ex": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_http.py_os_if_LooseVersion_fsspec___.errs.errs_aiohttp_client_ex", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_http.py", "file_name": "test_http.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 18, "span_ids": ["imports"], "tokens": 129}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import os\nimport pytest\nimport subprocess\nimport sys\nimport time\nimport fsspec\nfrom distutils.version import LooseVersion\n\nfrom dask.bytes.core import open_files\nfrom dask.bytes._compatibility import FSSPEC_042\nfrom dask.utils import tmpdir\n\nfiles = [\"a\", \"b\"]\nrequests = pytest.importorskip(\"requests\")\nerrs = (requests.exceptions.RequestException,)\nif LooseVersion(fsspec.__version__) > \"0.7.4\":\n aiohttp = pytest.importorskip(\"aiohttp\")\n errs = errs + (aiohttp.client_exceptions.ClientResponseError,)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_http.py_dir_server_dir_server.with_tmpdir_as_d_.p_terminate_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_http.py_dir_server_dir_server.with_tmpdir_as_d_.p_terminate_", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_http.py", "file_name": "test_http.py", "file_type": "text/x-python", "category": "test", "start_line": 21, "end_line": 41, "span_ids": ["dir_server"], "tokens": 159}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.fixture(scope=\"module\")\ndef dir_server():\n with tmpdir() as d:\n for fn in files:\n with open(os.path.join(d, fn), \"wb\") as f:\n f.write(b\"a\" * 10000)\n\n cmd = [sys.executable, \"-m\", \"http.server\", \"8999\"]\n p = subprocess.Popen(cmd, cwd=d)\n timeout = 10\n while True:\n try:\n requests.get(\"http://localhost:8999\")\n break\n except requests.exceptions.ConnectionError as e:\n time.sleep(0.1)\n timeout -= 0.1\n if timeout < 0:\n raise RuntimeError(\"Server did not appear\") from e\n yield d\n p.terminate()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_http.py_test_simple_test_loc.with_f_as_f_.assert_f_loc_4": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_http.py_test_simple_test_loc.with_f_as_f_.assert_f_loc_4", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_http.py", "file_name": "test_http.py", "file_type": "text/x-python", "category": "test", "start_line": 44, "end_line": 66, "span_ids": ["test_loc", "test_simple"], "tokens": 185}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_simple(dir_server):\n root = \"http://localhost:8999/\"\n fn = files[0]\n f = open_files(root + fn)[0]\n with f as f:\n data = f.read()\n assert data == open(os.path.join(dir_server, fn), \"rb\").read()\n\n\ndef test_loc(dir_server):\n root = \"http://localhost:8999/\"\n fn = files[0]\n f = open_files(root + fn)[0]\n expected = open(os.path.join(dir_server, fn), \"rb\").read()\n with f as f:\n data = f.read(2)\n assert data == expected[:2]\n assert f.loc == 2\n f.seek(0)\n data = f.read(3)\n assert data == expected[:3]\n f.seek(1, 1)\n assert f.loc == 4", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_http.py_test_fetch_range_with_headers_test_fetch_range_with_headers.assert_data_open_os_pa": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_http.py_test_fetch_range_with_headers_test_fetch_range_with_headers.assert_data_open_os_pa", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_http.py", "file_name": "test_http.py", "file_type": "text/x-python", "category": "test", "start_line": 69, "end_line": 77, "span_ids": ["test_fetch_range_with_headers"], "tokens": 119}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fetch_range_with_headers(dir_server):\n # https://github.com/dask/dask/issues/4479\n root = \"http://localhost:8999/\"\n fn = files[0]\n headers = {\"Date\": \"Wed, 21 Oct 2015 07:28:00 GMT\"}\n f = open_files(root + fn, headers=headers)[0]\n with f as f:\n data = f.read(length=1) + f.read(length=-1)\n assert data == open(os.path.join(dir_server, fn), \"rb\").read()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_http.py_test_ops_test_ops.with_f_as_f_.assert_f_read_data_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_http.py_test_ops_test_ops.with_f_as_f_.assert_f_read_data_", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_http.py", "file_name": "test_http.py", "file_type": "text/x-python", "category": "test", "start_line": 80, "end_line": 93, "span_ids": ["test_ops"], "tokens": 146}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"block_size\", [None, 99999])\ndef test_ops(dir_server, block_size):\n root = \"http://localhost:8999/\"\n fn = files[0]\n f = open_files(root + fn)[0]\n data = open(os.path.join(dir_server, fn), \"rb\").read()\n with f as f:\n # these pass because the default\n assert f.read(10) == data[:10]\n f.seek(0)\n assert f.read(10) == data[:10]\n assert f.read(10) == data[10:20]\n f.seek(-10, 2)\n assert f.read() == data[-10:]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_http.py_test_ops_blocksize_test_ops_blocksize.None_1.with_pytest_raises_ValueE.assert_f_read_10_data": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_http.py_test_ops_blocksize_test_ops_blocksize.None_1.with_pytest_raises_ValueE.assert_f_read_10_data", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_http.py", "file_name": "test_http.py", "file_type": "text/x-python", "category": "test", "start_line": 96, "end_line": 114, "span_ids": ["test_ops_blocksize"], "tokens": 193}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_ops_blocksize(dir_server):\n root = \"http://localhost:8999/\"\n fn = files[0]\n f = open_files(root + fn, block_size=2)[0]\n data = open(os.path.join(dir_server, fn), \"rb\").read()\n with f as f:\n # it's OK to read the whole file\n assert f.read() == data\n # and now the file magically has a size\n assert f.size == len(data)\n\n # note that if we reuse f from above, because it is tokenized, we get\n # the same open file - where is this cached?\n fn = files[1]\n f = open_files(root + fn, block_size=2)[0]\n with f as f:\n # fails because we want only 12 bytes\n with pytest.raises(ValueError):\n assert f.read(10) == data[:10]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_http.py_test_errors_test_errors.with_f_as_f_.with_pytest_raises_ValueE.f_seek_1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_http.py_test_errors_test_errors.with_f_as_f_.with_pytest_raises_ValueE.f_seek_1_", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_http.py", "file_name": "test_http.py", "file_type": "text/x-python", "category": "test", "start_line": 117, "end_line": 141, "span_ids": ["test_errors"], "tokens": 174}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_errors(dir_server):\n f = open_files(\"http://localhost:8999/doesnotexist\")[0]\n with pytest.raises(errs):\n with f as f:\n f.read()\n f = open_files(\"http://nohost/\")[0]\n\n if FSSPEC_042:\n expected = FileNotFoundError\n else:\n expected = requests.exceptions.RequestException\n\n with pytest.raises(expected):\n with f as f:\n f.read()\n root = \"http://localhost:8999/\"\n fn = files[0]\n f = open_files(root + fn, mode=\"wb\")[0]\n with pytest.raises(NotImplementedError):\n with f:\n pass\n f = open_files(root + fn)[0]\n with f as f:\n with pytest.raises(ValueError):\n f.seek(-1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_http.py_test_files_test_open_glob.assert_fs_1_path_htt": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_http.py_test_files_test_open_glob.assert_fs_1_path_htt", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_http.py", "file_name": "test_http.py", "file_type": "text/x-python", "category": "test", "start_line": 144, "end_line": 156, "span_ids": ["test_files", "test_open_glob"], "tokens": 132}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_files(dir_server):\n root = \"http://localhost:8999/\"\n fs = open_files([root + f for f in files])\n for f, f2 in zip(fs, files):\n with f as f:\n assert f.read() == open(os.path.join(dir_server, f2), \"rb\").read()\n\n\ndef test_open_glob(dir_server):\n root = \"http://localhost:8999/\"\n fs = open_files(root + \"/*\")\n assert fs[0].path == \"http://localhost:8999/a\"\n assert fs[1].path == \"http://localhost:8999/b\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_http.py_test_parquet_test_parquet.assert_df_columns_tolist_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_http.py_test_parquet_test_parquet.assert_df_columns_tolist_", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_http.py", "file_name": "test_http.py", "file_type": "text/x-python", "category": "test", "start_line": 159, "end_line": 173, "span_ids": ["test_parquet"], "tokens": 181}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.network\n@pytest.mark.xfail(reason=\"https://github.com/dask/dask/issues/5042\", strict=False)\ndef test_parquet():\n pytest.importorskip(\"requests\", minversion=\"2.21.0\")\n dd = pytest.importorskip(\"dask.dataframe\")\n pytest.importorskip(\"fastparquet\") # no pyarrow compatibility FS yet\n df = dd.read_parquet(\n [\n \"https://github.com/Parquet/parquet-compatibility/raw/\"\n \"master/parquet-testdata/impala/1.1.1-NONE/\"\n \"nation.impala.parquet\"\n ]\n ).compute()\n assert df.n_nationkey.tolist() == list(range(25))\n assert df.columns.tolist() == [\"n_nationkey\", \"n_name\", \"n_regionkey\", \"n_comment\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_http.py_test_bag_test_bag.b_compute_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_http.py_test_bag_test_bag.b_compute_", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_http.py", "file_name": "test_http.py", "file_type": "text/x-python", "category": "test", "start_line": 176, "end_line": 188, "span_ids": ["test_bag"], "tokens": 122}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.xfail(reason=\"https://github.com/dask/dask/issues/3696\", strict=False)\n@pytest.mark.network\ndef test_bag():\n # This test pulls from different hosts\n db = pytest.importorskip(\"dask.bag\")\n urls = [\n \"https://raw.githubusercontent.com/weierophinney/pastebin/\"\n \"master/public/js-src/dojox/data/tests/stores/patterns.csv\",\n \"https://en.wikipedia.org\",\n ]\n b = db.read_text(urls)\n assert b.npartitions == 2\n b.compute()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_http.py_test_read_csv_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_http.py_test_read_csv_", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_http.py", "file_name": "test_http.py", "file_type": "text/x-python", "category": "test", "start_line": 191, "end_line": 204, "span_ids": ["test_read_csv"], "tokens": 114}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.xfail(\n LooseVersion(fsspec.__version__) <= \"0.4.1\",\n reason=\"https://github.com/dask/dask/pull/5231\",\n)\n@pytest.mark.network\ndef test_read_csv():\n dd = pytest.importorskip(\"dask.dataframe\")\n url = (\n \"https://raw.githubusercontent.com/weierophinney/pastebin/\"\n \"master/public/js-src/dojox/data/tests/stores/patterns.csv\"\n )\n b = dd.read_csv(url)\n b.compute()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_gzip_test_unordered_urlpath_errors.with_pytest_raises_TypeEr.read_bytes_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_gzip_test_unordered_urlpath_errors.with_pytest_raises_TypeEr.read_bytes_", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_local.py", "file_name": "test_local.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 58, "span_ids": ["imports", "test_unordered_urlpath_errors", "to_uri"], "tokens": 425}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import gzip\nimport os\nimport pathlib\nimport sys\nfrom time import sleep\nfrom functools import partial\n\nimport pytest\nfrom tlz import concat, valmap\n\nfrom dask import compute\nfrom dask.utils import filetexts\nfrom fsspec.implementations.local import LocalFileSystem\nfrom fsspec.compression import compr\nfrom dask.bytes.core import read_bytes, open_files\nfrom dask.bytes.utils import compress\n\ncompute = partial(compute, scheduler=\"sync\")\n\nfiles = {\n \".test.accounts.1.json\": (\n b'{\"amount\": 100, \"name\": \"Alice\"}\\n'\n b'{\"amount\": 200, \"name\": \"Bob\"}\\n'\n b'{\"amount\": 300, \"name\": \"Charlie\"}\\n'\n b'{\"amount\": 400, \"name\": \"Dennis\"}\\n'\n ),\n \".test.accounts.2.json\": (\n b'{\"amount\": 500, \"name\": \"Alice\"}\\n'\n b'{\"amount\": 600, \"name\": \"Bob\"}\\n'\n b'{\"amount\": 700, \"name\": \"Charlie\"}\\n'\n b'{\"amount\": 800, \"name\": \"Dennis\"}\\n'\n ),\n}\n\n\ncsv_files = {\n \".test.fakedata.1.csv\": (b\"a,b\\n\" b\"1,2\\n\"),\n \".test.fakedata.2.csv\": (b\"a,b\\n\" b\"3,4\\n\"),\n \"subdir/.test.fakedata.2.csv\": (b\"a,b\\n\" b\"5,6\\n\"),\n}\n\n\ndef to_uri(path):\n return pathlib.Path(os.path.abspath(path)).as_uri()\n\n\ndef test_unordered_urlpath_errors():\n\n # Unordered urlpath argument\n with pytest.raises(TypeError):\n read_bytes(\n {\n \"sets/are.csv\",\n \"unordered/so/they.csv\",\n \"should/not/be.csv\",\n \"allowed.csv\",\n }\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_read_bytes_test_read_bytes.with_filetexts_files_mod.assert_set_results_se": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_read_bytes_test_read_bytes.with_filetexts_files_mod.assert_set_results_se", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_local.py", "file_name": "test_local.py", "file_type": "text/x-python", "category": "test", "start_line": 61, "end_line": 74, "span_ids": ["test_read_bytes"], "tokens": 124}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_bytes():\n with filetexts(files, mode=\"b\"):\n sample, values = read_bytes(\".test.accounts.*\")\n assert isinstance(sample, bytes)\n assert sample[:5] == files[sorted(files)[0]][:5]\n assert sample.endswith(b\"\\n\")\n\n assert isinstance(values, (list, tuple))\n assert isinstance(values[0], (list, tuple))\n assert hasattr(values[0][0], \"dask\")\n\n assert sum(map(len, values)) >= len(files)\n results = compute(*concat(values))\n assert set(results) == set(files.values())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_read_bytes_sample_delimiter_test_read_bytes_sample_delimiter.with_filetexts_files_mod.None_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_read_bytes_sample_delimiter_test_read_bytes_sample_delimiter.with_filetexts_files_mod.None_5", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_local.py", "file_name": "test_local.py", "file_type": "text/x-python", "category": "test", "start_line": 77, "end_line": 84, "span_ids": ["test_read_bytes_sample_delimiter"], "tokens": 109}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_bytes_sample_delimiter():\n with filetexts(files, mode=\"b\"):\n sample, values = read_bytes(\".test.accounts.*\", sample=80, delimiter=b\"\\n\")\n assert sample.endswith(b\"\\n\")\n sample, values = read_bytes(\".test.accounts.1.json\", sample=80, delimiter=b\"\\n\")\n assert sample.endswith(b\"\\n\")\n sample, values = read_bytes(\".test.accounts.1.json\", sample=2, delimiter=b\"\\n\")\n assert sample.endswith(b\"\\n\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_parse_sample_bytes_test_read_bytes_include_path.with_filetexts_files_mod.assert_os_path_split_pat": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_parse_sample_bytes_test_read_bytes_include_path.with_filetexts_files_mod.assert_os_path_split_pat", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_local.py", "file_name": "test_local.py", "file_type": "text/x-python", "category": "test", "start_line": 87, "end_line": 124, "span_ids": ["test_read_bytes_blocksize_float_errs", "test_read_bytes_blocksize_none", "test_read_bytes_include_path", "test_read_bytes_blocksize_types", "test_parse_sample_bytes", "test_read_bytes_no_sample"], "tokens": 323}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_parse_sample_bytes():\n with filetexts(files, mode=\"b\"):\n sample, values = read_bytes(\".test.accounts.*\", sample=\"40 B\")\n assert len(sample) == 40\n\n\ndef test_read_bytes_no_sample():\n with filetexts(files, mode=\"b\"):\n sample, _ = read_bytes(\".test.accounts.1.json\", sample=False)\n assert sample is False\n\n\ndef test_read_bytes_blocksize_none():\n with filetexts(files, mode=\"b\"):\n sample, values = read_bytes(\".test.accounts.*\", blocksize=None)\n assert sum(map(len, values)) == len(files)\n\n\n@pytest.mark.parametrize(\"blocksize\", [5.0, \"5 B\"])\ndef test_read_bytes_blocksize_types(blocksize):\n with filetexts(files, mode=\"b\"):\n sample, vals = read_bytes(\".test.account*\", blocksize=blocksize)\n results = compute(*concat(vals))\n ourlines = b\"\".join(results).split(b\"\\n\")\n testlines = b\"\".join(files.values()).split(b\"\\n\")\n assert set(ourlines) == set(testlines)\n\n\ndef test_read_bytes_blocksize_float_errs():\n with filetexts(files, mode=\"b\"):\n with pytest.raises(TypeError):\n read_bytes(\".test.account*\", blocksize=5.5)\n\n\ndef test_read_bytes_include_path():\n with filetexts(files, mode=\"b\"):\n _, _, paths = read_bytes(\".test.accounts.*\", include_path=True)\n assert {os.path.split(path)[1] for path in paths} == set(files.keys())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_with_urls_test_with_urls.with_filetexts_files_mod.assert_sum_map_len_value": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_with_urls_test_with_urls.with_filetexts_files_mod.assert_sum_map_len_value", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_local.py", "file_name": "test_local.py", "file_type": "text/x-python", "category": "test", "start_line": 127, "end_line": 139, "span_ids": ["test_with_urls"], "tokens": 138}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.xfail(\n os.environ.get(\"GITHUB_ACTIONS\")\n and sys.platform == \"win32\"\n and sys.version_info[:2] == (3, 6),\n reason=\"TODO: Fails on GitHub Actions when running Python 3.6 on Windows.\"\n \"See https://github.com/dask/dask/pull/5862.\",\n)\ndef test_with_urls():\n with filetexts(files, mode=\"b\"):\n # OS-independent file:// URI with glob *\n url = to_uri(\".test.accounts.\") + \"*\"\n sample, values = read_bytes(url, blocksize=None)\n assert sum(map(len, values)) == len(files)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_with_paths_test_with_paths.with_pytest_raises_OSErro.read_bytes_url_blocksize": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_with_paths_test_with_paths.with_pytest_raises_OSErro.read_bytes_url_blocksize", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_local.py", "file_name": "test_local.py", "file_type": "text/x-python", "category": "test", "start_line": 142, "end_line": 151, "span_ids": ["test_with_paths"], "tokens": 109}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(sys.platform == \"win32\", reason=\"pathlib and moto clash on windows\")\ndef test_with_paths():\n with filetexts(files, mode=\"b\"):\n url = pathlib.Path(\"./.test.accounts.*\")\n sample, values = read_bytes(url, blocksize=None)\n assert sum(map(len, values)) == len(files)\n with pytest.raises(OSError):\n # relative path doesn't work\n url = pathlib.Path(\"file://.test.accounts.*\")\n read_bytes(url, blocksize=None)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_read_bytes_block_test_read_bytes_block.with_filetexts_files_mod.for_bs_in_5_15_45_150.assert_set_ourlines_s": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_read_bytes_block_test_read_bytes_block.with_filetexts_files_mod.for_bs_in_5_15_45_150.assert_set_ourlines_s", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_local.py", "file_name": "test_local.py", "file_type": "text/x-python", "category": "test", "start_line": 154, "end_line": 165, "span_ids": ["test_read_bytes_block"], "tokens": 145}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_bytes_block():\n with filetexts(files, mode=\"b\"):\n for bs in [5, 15, 45, 1500]:\n sample, vals = read_bytes(\".test.account*\", blocksize=bs)\n assert list(map(len, vals)) == [(len(v) // bs + 1) for v in files.values()]\n\n results = compute(*concat(vals))\n assert sum(len(r) for r in results) == sum(len(v) for v in files.values())\n\n ourlines = b\"\".join(results).split(b\"\\n\")\n testlines = b\"\".join(files.values()).split(b\"\\n\")\n assert set(ourlines) == set(testlines)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_read_bytes_delimited_test_read_bytes_delimited.with_filetexts_files_mod.for_bs_in_5_15_45_1_.assert_ours_test": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_read_bytes_delimited_test_read_bytes_delimited.with_filetexts_files_mod.for_bs_in_5_15_45_1_.assert_ours_test", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_local.py", "file_name": "test_local.py", "file_type": "text/x-python", "category": "test", "start_line": 168, "end_line": 191, "span_ids": ["test_read_bytes_delimited"], "tokens": 287}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_bytes_delimited():\n with filetexts(files, mode=\"b\"):\n for bs in [5, 15, 45, \"1.5 kB\"]:\n _, values = read_bytes(\".test.accounts*\", blocksize=bs, delimiter=b\"\\n\")\n _, values2 = read_bytes(\".test.accounts*\", blocksize=bs, delimiter=b\"foo\")\n assert [a.key for a in concat(values)] != [b.key for b in concat(values2)]\n\n results = compute(*concat(values))\n res = [r for r in results if r]\n assert all(r.endswith(b\"\\n\") for r in res)\n ourlines = b\"\".join(res).split(b\"\\n\")\n testlines = b\"\".join(files[k] for k in sorted(files)).split(b\"\\n\")\n assert ourlines == testlines\n\n # delimiter not at the end\n d = b\"}\"\n _, values = read_bytes(\".test.accounts*\", blocksize=bs, delimiter=d)\n results = compute(*concat(values))\n res = [r for r in results if r]\n # All should end in } except EOF\n assert sum(r.endswith(b\"}\") for r in res) == len(res) - 2\n ours = b\"\".join(res)\n test = b\"\".join(files[v] for v in sorted(files))\n assert ours == test", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_fmt_bs_test_compression.with_filetexts_files2_mo.assert_b_join_results_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_fmt_bs_test_compression.with_filetexts_files2_mo.assert_b_join_results_", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_local.py", "file_name": "test_local.py", "file_type": "text/x-python", "category": "test", "start_line": 194, "end_line": 222, "span_ids": ["test_compression", "impl:7"], "tokens": 224}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "fmt_bs = [(fmt, None) for fmt in compr] + [(fmt, 10) for fmt in compr]\n\n\n@pytest.mark.parametrize(\"fmt,blocksize\", fmt_bs)\ndef test_compression(fmt, blocksize):\n if fmt not in compress:\n pytest.skip(\"compression function not provided\")\n files2 = valmap(compress[fmt], files)\n with filetexts(files2, mode=\"b\"):\n if fmt and blocksize:\n with pytest.raises(ValueError):\n read_bytes(\n \".test.accounts.*.json\",\n blocksize=blocksize,\n delimiter=b\"\\n\",\n compression=fmt,\n )\n return\n sample, values = read_bytes(\n \".test.accounts.*.json\",\n blocksize=blocksize,\n delimiter=b\"\\n\",\n compression=fmt,\n )\n assert sample[:5] == files[sorted(files)[0]][:5]\n assert sample.endswith(b\"\\n\")\n\n results = compute(*concat(values))\n assert b\"\".join(results) == b\"\".join([files[k] for k in sorted(files)])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_open_files_test_open_files_text_mode.with_filetexts_files_mod.assert_list_data_fil": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_open_files_test_open_files_text_mode.with_filetexts_files_mod.assert_list_data_fil", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_local.py", "file_name": "test_local.py", "file_type": "text/x-python", "category": "test", "start_line": 225, "end_line": 244, "span_ids": ["test_open_files", "test_open_files_text_mode"], "tokens": 175}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_open_files():\n with filetexts(files, mode=\"b\"):\n myfiles = open_files(\".test.accounts.*\")\n assert len(myfiles) == len(files)\n for lazy_file, data_file in zip(myfiles, sorted(files)):\n with lazy_file as f:\n x = f.read()\n assert x == files[data_file]\n\n\n@pytest.mark.parametrize(\"encoding\", [\"utf-8\", \"ascii\"])\ndef test_open_files_text_mode(encoding):\n with filetexts(files, mode=\"b\"):\n myfiles = open_files(\".test.accounts.*\", mode=\"rt\", encoding=encoding)\n assert len(myfiles) == len(files)\n data = []\n for file in myfiles:\n with file as f:\n data.append(f.read())\n assert list(data) == [files[k].decode(encoding) for k in sorted(files)]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_open_files_compression_test_open_files_compression.with_filetexts_files2_mo.assert_list_data_sol": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_open_files_compression_test_open_files_compression.with_filetexts_files2_mo.assert_list_data_sol", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_local.py", "file_name": "test_local.py", "file_type": "text/x-python", "category": "test", "start_line": 247, "end_line": 262, "span_ids": ["test_open_files_compression"], "tokens": 153}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"mode\", [\"rt\", \"rb\"])\n@pytest.mark.parametrize(\"fmt\", list(compr))\ndef test_open_files_compression(mode, fmt):\n if fmt not in compress:\n pytest.skip(\"compression function not provided\")\n files2 = valmap(compress[fmt], files)\n with filetexts(files2, mode=\"b\"):\n myfiles = open_files(\".test.accounts.*\", mode=mode, compression=fmt)\n data = []\n for file in myfiles:\n with file as f:\n data.append(f.read())\n sol = [files[k] for k in sorted(files)]\n if mode == \"rt\":\n sol = [b.decode() for b in sol]\n assert list(data) == sol", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_bad_compression_test_names.with_filetexts_files_mod.None_4": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_bad_compression_test_names.with_filetexts_files_mod.None_4", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_local.py", "file_name": "test_local.py", "file_type": "text/x-python", "category": "test", "start_line": 265, "end_line": 295, "span_ids": ["test_bad_compression", "test_not_found", "test_names"], "tokens": 224}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_bad_compression():\n with filetexts(files, mode=\"b\"):\n for func in [read_bytes, open_files]:\n with pytest.raises(ValueError):\n sample, values = func(\".test.accounts.*\", compression=\"not-found\")\n\n\ndef test_not_found():\n fn = \"not-a-file\"\n with pytest.raises((FileNotFoundError, OSError), match=fn):\n read_bytes(fn)\n\n\n@pytest.mark.slow\ndef test_names():\n with filetexts(files, mode=\"b\"):\n _, a = read_bytes(\".test.accounts.*\")\n _, b = read_bytes(\".test.accounts.*\")\n a = list(concat(a))\n b = list(concat(b))\n\n assert [aa._key for aa in a] == [bb._key for bb in b]\n\n sleep(1)\n for fn in files:\n with open(fn, \"ab\") as f:\n f.write(b\"x\")\n\n _, c = read_bytes(\".test.accounts.*\")\n c = list(concat(c))\n assert [aa._key for aa in a] != [cc._key for cc in c]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_open_files_write_test_open_files_write.assert_d_b_000_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_open_files_write_test_open_files_write.assert_d_b_000_", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_local.py", "file_name": "test_local.py", "file_type": "text/x-python", "category": "test", "start_line": 298, "end_line": 313, "span_ids": ["test_open_files_write"], "tokens": 164}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"compression_opener\", [(None, open), (\"gzip\", gzip.open)])\ndef test_open_files_write(tmpdir, compression_opener):\n compression, opener = compression_opener\n tmpdir = str(tmpdir)\n files = open_files(tmpdir, num=2, mode=\"wb\", compression=compression)\n assert len(files) == 2\n assert {f.mode for f in files} == {\"wb\"}\n for fil in files:\n with fil as f:\n f.write(b\"000\")\n files = sorted(os.listdir(tmpdir))\n assert files == [\"0.part\", \"1.part\"]\n\n with opener(os.path.join(tmpdir, files[0]), \"rb\") as f:\n d = f.read()\n assert d == b\"000\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_pickability_of_lazy_files_test_py2_local_bytes.with_files_0_as_f_.assert_all_isinstance_lin": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_pickability_of_lazy_files_test_py2_local_bytes.with_files_0_as_f_.assert_all_isinstance_lin", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_local.py", "file_name": "test_local.py", "file_type": "text/x-python", "category": "test", "start_line": 316, "end_line": 339, "span_ids": ["test_pickability_of_lazy_files", "test_py2_local_bytes"], "tokens": 208}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_pickability_of_lazy_files(tmpdir):\n tmpdir = str(tmpdir)\n cloudpickle = pytest.importorskip(\"cloudpickle\")\n\n with filetexts(files, mode=\"b\"):\n myfiles = open_files(\".test.accounts.*\")\n myfiles2 = cloudpickle.loads(cloudpickle.dumps(myfiles))\n\n for f, f2 in zip(myfiles, myfiles2):\n assert f.path == f2.path\n assert type(f.fs) == type(f2.fs)\n with f as f_open, f2 as f2_open:\n assert f_open.read() == f2_open.read()\n\n\ndef test_py2_local_bytes(tmpdir):\n fn = str(tmpdir / \"myfile.txt.gz\")\n with gzip.open(fn, mode=\"wb\") as f:\n f.write(b\"hello\\nworld\")\n\n files = open_files(fn, compression=\"gzip\", mode=\"rt\")\n\n with files[0] as f:\n assert all(isinstance(line, str) for line in f)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_abs_paths_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_abs_paths_", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_local.py", "file_name": "test_local.py", "file_type": "text/x-python", "category": "test", "start_line": 342, "end_line": 367, "span_ids": ["test_abs_paths", "test_get_pyarrow_filesystem"], "tokens": 168}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_abs_paths(tmpdir):\n tmpdir = str(tmpdir)\n here = os.getcwd()\n os.chdir(tmpdir)\n with open(\"tmp\", \"w\") as f:\n f.write(\"hi\")\n out = LocalFileSystem().glob(\"*\")\n assert len(out) == 1\n assert \"/\" in out[0]\n assert \"tmp\" in out[0]\n\n fs = LocalFileSystem()\n os.chdir(here)\n with fs.open(out[0], \"r\") as f:\n res = f.read()\n assert res == \"hi\"\n\n\ndef test_get_pyarrow_filesystem():\n from fsspec.implementations.local import LocalFileSystem\n\n pa = pytest.importorskip(\"pyarrow\")\n\n fs = LocalFileSystem()\n assert isinstance(fs, pa.filesystem.FileSystem)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_io_endpoint_uri._http_127_0_0_1_5555_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_io_endpoint_uri._http_127_0_0_1_5555_", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_s3.py", "file_name": "test_s3.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 72, "span_ids": ["imports", "ensure_safe_environment_variables", "impl:18", "s3so"], "tokens": 533}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import io\nimport os\nfrom contextlib import contextmanager\nfrom functools import partial\nfrom distutils.version import LooseVersion\nimport shlex\nimport subprocess\nimport sys\nimport time\n\nimport pytest\nimport numpy as np\n\ns3fs = pytest.importorskip(\"s3fs\")\nboto3 = pytest.importorskip(\"boto3\")\nmoto = pytest.importorskip(\"moto\", minversion=\"1.3.14\")\npytest.importorskip(\"flask\") # server mode needs flask too\nrequests = pytest.importorskip(\"requests\")\n\nfrom tlz import concat, valmap\n\nfrom dask import compute\nfrom dask.bytes.core import read_bytes, open_files\nfrom s3fs import S3FileSystem as DaskS3FileSystem\nfrom dask.bytes.utils import compress\nfrom fsspec.compression import compr\n\n\ncompute = partial(compute, scheduler=\"sync\")\nnumpy_120_mark = pytest.mark.xfail(\n LooseVersion(np.__version__) >= \"1.20.0\", reason=\"Upstream incompatibility\"\n)\n\n\ntest_bucket_name = \"test\"\nfiles = {\n \"test/accounts.1.json\": (\n b'{\"amount\": 100, \"name\": \"Alice\"}\\n'\n b'{\"amount\": 200, \"name\": \"Bob\"}\\n'\n b'{\"amount\": 300, \"name\": \"Charlie\"}\\n'\n b'{\"amount\": 400, \"name\": \"Dennis\"}\\n'\n ),\n \"test/accounts.2.json\": (\n b'{\"amount\": 500, \"name\": \"Alice\"}\\n'\n b'{\"amount\": 600, \"name\": \"Bob\"}\\n'\n b'{\"amount\": 700, \"name\": \"Charlie\"}\\n'\n b'{\"amount\": 800, \"name\": \"Dennis\"}\\n'\n ),\n}\n\n\n@contextmanager\ndef ensure_safe_environment_variables():\n \"\"\"\n Get a context manager to safely set environment variables\n All changes will be undone on close, hence environment variables set\n within this contextmanager will neither persist nor change global state.\n \"\"\"\n saved_environ = dict(os.environ)\n try:\n yield\n finally:\n os.environ.clear()\n os.environ.update(saved_environ)\n\n\n@pytest.fixture\ndef s3so():\n return dict(client_kwargs={\"endpoint_url\": \"http://127.0.0.1:5555/\"})\n\n\nendpoint_uri = \"http://127.0.0.1:5555/\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_s3_base_s3_base.with_ensure_safe_environm.try_.except_subprocess_Timeout.if_sys_platform_win32.subprocess_call_TASKKILL": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_s3_base_s3_base.with_ensure_safe_environm.try_.except_subprocess_Timeout.if_sys_platform_win32.subprocess_call_TASKKILL", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_s3.py", "file_name": "test_s3.py", "file_type": "text/x-python", "category": "test", "start_line": 75, "end_line": 108, "span_ids": ["s3_base"], "tokens": 238}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.fixture(scope=\"module\")\ndef s3_base():\n with ensure_safe_environment_variables():\n os.environ[\"AWS_ACCESS_KEY_ID\"] = \"foobar_key\"\n os.environ[\"AWS_SECRET_ACCESS_KEY\"] = \"foobar_secret\"\n\n # pipe to null to avoid logging in terminal\n proc = subprocess.Popen(\n shlex.split(\"moto_server s3 -p 5555\"), stdout=subprocess.DEVNULL\n )\n\n timeout = 8\n while True:\n try:\n # OK to go once server is accepting connections\n r = requests.get(endpoint_uri)\n if r.ok:\n break\n except Exception:\n pass\n timeout -= 0.1\n time.sleep(0.1)\n assert timeout > 0, \"Timed out waiting for moto server\"\n yield\n\n # shut down external process\n proc.terminate()\n try:\n proc.wait(timeout=3)\n except subprocess.TimeoutExpired:\n proc.kill()\n if sys.platform == \"win32\":\n # belt & braces\n subprocess.call(\"TASKKILL /F /PID {pid} /T\".format(pid=proc.pid))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_s3_s3_context.try_.finally_.fs_rm_bucket_recursive_T": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_s3_s3_context.try_.finally_.fs_rm_bucket_recursive_T", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_s3.py", "file_name": "test_s3.py", "file_type": "text/x-python", "category": "test", "start_line": 111, "end_line": 131, "span_ids": ["s3_context", "s3"], "tokens": 166}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.fixture\ndef s3(s3_base):\n with s3_context() as fs:\n yield fs\n\n\n@contextmanager\ndef s3_context(bucket=test_bucket_name, files=files):\n client = boto3.client(\"s3\", endpoint_url=endpoint_uri)\n client.create_bucket(Bucket=bucket, ACL=\"public-read-write\")\n for f, data in files.items():\n client.put_object(Bucket=bucket, Key=f, Body=data)\n fs = s3fs.S3FileSystem(\n anon=True, client_kwargs={\"endpoint_url\": \"http://127.0.0.1:5555/\"}\n )\n s3fs.S3FileSystem.clear_instance_cache()\n fs.invalidate_cache()\n try:\n yield fs\n finally:\n fs.rm(bucket, recursive=True)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_s3_with_yellow_tripdata_s3_with_yellow_tripdata.data._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_s3_with_yellow_tripdata_s3_with_yellow_tripdata.data._", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_s3.py", "file_name": "test_s3.py", "file_type": "text/x-python", "category": "test", "start_line": 134, "end_line": 204, "span_ids": ["s3_with_yellow_tripdata"], "tokens": 1161}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.fixture()\n@pytest.mark.slow\ndef s3_with_yellow_tripdata(s3):\n \"\"\"\n Fixture with sample yellowtrip CSVs loaded into S3.\n\n Provides the following CSVs:\n\n * s3://test/nyc-taxi/2015/yellow_tripdata_2015-01.csv\n * s3://test/nyc-taxi/2014/yellow_tripdata_2015-mm.csv\n for mm from 01 - 12.\n \"\"\"\n pd = pytest.importorskip(\"pandas\")\n\n data = {\n \"VendorID\": {0: 2, 1: 1, 2: 1, 3: 1, 4: 1},\n \"tpep_pickup_datetime\": {\n 0: \"2015-01-15 19:05:39\",\n 1: \"2015-01-10 20:33:38\",\n 2: \"2015-01-10 20:33:38\",\n 3: \"2015-01-10 20:33:39\",\n 4: \"2015-01-10 20:33:39\",\n },\n \"tpep_dropoff_datetime\": {\n 0: \"2015-01-15 19:23:42\",\n 1: \"2015-01-10 20:53:28\",\n 2: \"2015-01-10 20:43:41\",\n 3: \"2015-01-10 20:35:31\",\n 4: \"2015-01-10 20:52:58\",\n },\n \"passenger_count\": {0: 1, 1: 1, 2: 1, 3: 1, 4: 1},\n \"trip_distance\": {0: 1.59, 1: 3.3, 2: 1.8, 3: 0.5, 4: 3.0},\n \"pickup_longitude\": {\n 0: -73.993896484375,\n 1: -74.00164794921875,\n 2: -73.96334075927734,\n 3: -74.00908660888672,\n 4: -73.97117614746094,\n },\n \"pickup_latitude\": {\n 0: 40.7501106262207,\n 1: 40.7242431640625,\n 2: 40.80278778076172,\n 3: 40.71381759643555,\n 4: 40.762428283691406,\n },\n \"RateCodeID\": {0: 1, 1: 1, 2: 1, 3: 1, 4: 1},\n \"store_and_fwd_flag\": {0: \"N\", 1: \"N\", 2: \"N\", 3: \"N\", 4: \"N\"},\n \"dropoff_longitude\": {\n 0: -73.97478485107422,\n 1: -73.99441528320312,\n 2: -73.95182037353516,\n 3: -74.00432586669923,\n 4: -74.00418090820312,\n },\n \"dropoff_latitude\": {\n 0: 40.75061798095703,\n 1: 40.75910949707031,\n 2: 40.82441329956055,\n 3: 40.71998596191406,\n 4: 40.742652893066406,\n },\n \"payment_type\": {0: 1, 1: 1, 2: 2, 3: 2, 4: 2},\n \"fare_amount\": {0: 12.0, 1: 14.5, 2: 9.5, 3: 3.5, 4: 15.0},\n \"extra\": {0: 1.0, 1: 0.5, 2: 0.5, 3: 0.5, 4: 0.5},\n \"mta_tax\": {0: 0.5, 1: 0.5, 2: 0.5, 3: 0.5, 4: 0.5},\n \"tip_amount\": {0: 3.25, 1: 2.0, 2: 0.0, 3: 0.0, 4: 0.0},\n \"tolls_amount\": {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0},\n \"improvement_surcharge\": {0: 0.3, 1: 0.3, 2: 0.3, 3: 0.3, 4: 0.3},\n \"total_amount\": {0: 17.05, 1: 17.8, 2: 10.8, 3: 4.8, 4: 16.3},\n }\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_s3_with_yellow_tripdata.sample_s3_with_yellow_tripdata.yield": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_s3_with_yellow_tripdata.sample_s3_with_yellow_tripdata.yield", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_s3.py", "file_name": "test_s3.py", "file_type": "text/x-python", "category": "test", "start_line": 205, "end_line": 219, "span_ids": ["s3_with_yellow_tripdata"], "tokens": 202}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.fixture()\n@pytest.mark.slow\ndef s3_with_yellow_tripdata(s3):\n # ... other code\n sample = pd.DataFrame(data)\n df = sample.take(np.arange(5).repeat(10000))\n file = io.BytesIO()\n sfile = io.TextIOWrapper(file)\n df.to_csv(sfile, index=False)\n\n key = \"nyc-taxi/2015/yellow_tripdata_2015-01.csv\"\n client = boto3.client(\"s3\", endpoint_url=\"http://127.0.0.1:5555/\")\n client.put_object(Bucket=test_bucket_name, Key=key, Body=file)\n key = \"nyc-taxi/2014/yellow_tripdata_2014-{:0>2d}.csv\"\n\n for i in range(1, 13):\n file.seek(0)\n client.put_object(Bucket=test_bucket_name, Key=key.format(i), Body=file)\n yield", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_get_s3_test_get_s3.None_1.DaskS3FileSystem_secret_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_get_s3_test_get_s3.None_1.DaskS3FileSystem_secret_", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_s3.py", "file_name": "test_s3.py", "file_type": "text/x-python", "category": "test", "start_line": 222, "end_line": 234, "span_ids": ["test_get_s3"], "tokens": 118}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_get_s3():\n s3 = DaskS3FileSystem(key=\"key\", secret=\"secret\")\n assert s3.key == \"key\"\n assert s3.secret == \"secret\"\n\n s3 = DaskS3FileSystem(username=\"key\", password=\"secret\")\n assert s3.key == \"key\"\n assert s3.secret == \"secret\"\n\n with pytest.raises(KeyError):\n DaskS3FileSystem(key=\"key\", username=\"key\")\n with pytest.raises(KeyError):\n DaskS3FileSystem(secret=\"key\", password=\"key\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_open_files_write_test_open_files_write.assert_set_list_files_val": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_open_files_write_test_open_files_write.assert_set_list_files_val", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_s3.py", "file_name": "test_s3.py", "file_type": "text/x-python", "category": "test", "start_line": 237, "end_line": 247, "span_ids": ["test_open_files_write"], "tokens": 124}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_open_files_write(s3, s3so):\n paths = [\"s3://\" + test_bucket_name + \"/more/\" + f for f in files]\n fils = open_files(paths, mode=\"wb\", **s3so)\n for fil, data in zip(fils, files.values()):\n with fil as f:\n f.write(data)\n sample, values = read_bytes(\n \"s3://\" + test_bucket_name + \"/more/test/accounts.*\", **s3so\n )\n results = compute(*concat(values))\n assert set(list(files.values())) == set(results)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_read_bytes_test_read_bytes.assert_set_results_se": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_read_bytes_test_read_bytes.assert_set_results_se", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_s3.py", "file_name": "test_s3.py", "file_type": "text/x-python", "category": "test", "start_line": 250, "end_line": 262, "span_ids": ["test_read_bytes"], "tokens": 134}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_bytes(s3, s3so):\n sample, values = read_bytes(\"s3://\" + test_bucket_name + \"/test/accounts.*\", **s3so)\n assert isinstance(sample, bytes)\n assert sample[:5] == files[sorted(files)[0]][:5]\n assert sample.endswith(b\"\\n\")\n\n assert isinstance(values, (list, tuple))\n assert isinstance(values[0], (list, tuple))\n assert hasattr(values[0][0], \"dask\")\n\n assert sum(map(len, values)) >= len(files)\n results = compute(*concat(values))\n assert set(results) == set(files.values())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_read_bytes_sample_delimiter_test_read_bytes_sample_delimiter.None_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_read_bytes_sample_delimiter_test_read_bytes_sample_delimiter.None_5", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_s3.py", "file_name": "test_s3.py", "file_type": "text/x-python", "category": "test", "start_line": 265, "end_line": 286, "span_ids": ["test_read_bytes_sample_delimiter"], "tokens": 168}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_bytes_sample_delimiter(s3, s3so):\n sample, values = read_bytes(\n \"s3://\" + test_bucket_name + \"/test/accounts.*\",\n sample=80,\n delimiter=b\"\\n\",\n **s3so\n )\n assert sample.endswith(b\"\\n\")\n sample, values = read_bytes(\n \"s3://\" + test_bucket_name + \"/test/accounts.1.json\",\n sample=80,\n delimiter=b\"\\n\",\n **s3so\n )\n assert sample.endswith(b\"\\n\")\n sample, values = read_bytes(\n \"s3://\" + test_bucket_name + \"/test/accounts.1.json\",\n sample=2,\n delimiter=b\"\\n\",\n **s3so\n )\n assert sample.endswith(b\"\\n\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_read_bytes_non_existing_glob_test_read_bytes_blocksize_on_large_data.assert_len_L_12": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_read_bytes_non_existing_glob_test_read_bytes_blocksize_on_large_data.assert_len_L_12", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_s3.py", "file_name": "test_s3.py", "file_type": "text/x-python", "category": "test", "start_line": 289, "end_line": 316, "span_ids": ["test_read_bytes_blocksize_none", "test_read_bytes_non_existing_glob", "test_read_bytes_blocksize_on_large_data"], "tokens": 237}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_bytes_non_existing_glob(s3, s3so):\n with pytest.raises(IOError):\n read_bytes(\"s3://\" + test_bucket_name + \"/non-existing/*\", **s3so)\n\n\ndef test_read_bytes_blocksize_none(s3, s3so):\n _, values = read_bytes(\n \"s3://\" + test_bucket_name + \"/test/accounts.*\", blocksize=None, **s3so\n )\n assert sum(map(len, values)) == len(files)\n\n\ndef test_read_bytes_blocksize_on_large_data(s3_with_yellow_tripdata, s3so):\n _, L = read_bytes(\n \"s3://{}/nyc-taxi/2015/yellow_tripdata_2015-01.csv\".format(test_bucket_name),\n blocksize=None,\n anon=True,\n **s3so\n )\n assert len(L) == 1\n\n _, L = read_bytes(\n \"s3://{}/nyc-taxi/2014/*.csv\".format(test_bucket_name),\n blocksize=None,\n anon=True,\n **s3so\n )\n assert len(L) == 12", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_read_bytes_block_test_read_bytes_block.assert_set_ourlines_s": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_read_bytes_block_test_read_bytes_block.assert_set_ourlines_s", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_s3.py", "file_name": "test_s3.py", "file_type": "text/x-python", "category": "test", "start_line": 319, "end_line": 331, "span_ids": ["test_read_bytes_block"], "tokens": 166}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"blocksize\", [5, 15, 45, 1500])\ndef test_read_bytes_block(s3, blocksize, s3so):\n _, vals = read_bytes(\n \"s3://\" + test_bucket_name + \"/test/account*\", blocksize=blocksize, **s3so\n )\n assert list(map(len, vals)) == [(len(v) // blocksize + 1) for v in files.values()]\n\n results = compute(*concat(vals))\n assert sum(len(r) for r in results) == sum(len(v) for v in files.values())\n\n ourlines = b\"\".join(results).split(b\"\\n\")\n testlines = b\"\".join(files.values()).split(b\"\\n\")\n assert set(ourlines) == set(testlines)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_read_bytes_delimited_test_read_bytes_delimited.assert_ours_test": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_read_bytes_delimited_test_read_bytes_delimited.assert_ours_test", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_s3.py", "file_name": "test_s3.py", "file_type": "text/x-python", "category": "test", "start_line": 334, "end_line": 371, "span_ids": ["test_read_bytes_delimited"], "tokens": 356}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"blocksize\", [5, 15, 45, 1500])\ndef test_read_bytes_delimited(s3, blocksize, s3so):\n _, values = read_bytes(\n \"s3://\" + test_bucket_name + \"/test/accounts*\",\n blocksize=blocksize,\n delimiter=b\"\\n\",\n **s3so\n )\n _, values2 = read_bytes(\n \"s3://\" + test_bucket_name + \"/test/accounts*\",\n blocksize=blocksize,\n delimiter=b\"foo\",\n **s3so\n )\n assert [a.key for a in concat(values)] != [b.key for b in concat(values2)]\n\n results = compute(*concat(values))\n res = [r for r in results if r]\n assert all(r.endswith(b\"\\n\") for r in res)\n ourlines = b\"\".join(res).split(b\"\\n\")\n testlines = b\"\".join(files[k] for k in sorted(files)).split(b\"\\n\")\n assert ourlines == testlines\n\n # delimiter not at the end\n d = b\"}\"\n _, values = read_bytes(\n \"s3://\" + test_bucket_name + \"/test/accounts*\",\n blocksize=blocksize,\n delimiter=d,\n **s3so\n )\n results = compute(*concat(values))\n res = [r for r in results if r]\n # All should end in } except EOF\n assert sum(r.endswith(b\"}\") for r in res) == len(res) - 2\n ours = b\"\".join(res)\n test = b\"\".join(files[v] for v in sorted(files))\n assert ours == test", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_compression_test_compression.with_s3_context_compress.assert_b_join_results_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_compression_test_compression.with_s3_context_compress.assert_b_join_results_", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_s3.py", "file_name": "test_s3.py", "file_type": "text/x-python", "category": "test", "start_line": 374, "end_line": 401, "span_ids": ["test_compression"], "tokens": 229}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"fmt,blocksize\", [(fmt, None) for fmt in compr] + [(fmt, 10) for fmt in compr]\n)\ndef test_compression(s3, fmt, blocksize, s3so):\n if fmt not in compress:\n pytest.skip(\"compression function not provided\")\n s3._cache.clear()\n with s3_context(\"compress\", valmap(compress[fmt], files)):\n if fmt and blocksize:\n with pytest.raises(ValueError):\n read_bytes(\n \"s3://compress/test/accounts.*\",\n compression=fmt,\n blocksize=blocksize,\n **s3so\n )\n return\n sample, values = read_bytes(\n \"s3://compress/test/accounts.*\",\n compression=fmt,\n blocksize=blocksize,\n **s3so\n )\n assert sample.startswith(files[sorted(files)[0]][:10])\n assert sample.endswith(b\"\\n\")\n\n results = compute(*concat(values))\n assert b\"\".join(results) == b\"\".join([files[k] for k in sorted(files)])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_open_files_double.lambda_x_x_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_open_files_double.lambda_x_x_2", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_s3.py", "file_name": "test_s3.py", "file_type": "text/x-python", "category": "test", "start_line": 404, "end_line": 417, "span_ids": ["test_open_files", "impl:20"], "tokens": 125}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"mode\", [\"rt\", \"rb\"])\ndef test_open_files(s3, mode, s3so):\n myfiles = open_files(\n \"s3://\" + test_bucket_name + \"/test/accounts.*\", mode=mode, **s3so\n )\n assert len(myfiles) == len(files)\n for lazy_file, path in zip(myfiles, sorted(files)):\n with lazy_file as f:\n data = f.read()\n sol = files[path]\n assert data == sol if mode == \"rb\" else sol.decode()\n\n\ndouble = lambda x: x * 2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_modification_time_read_bytes_test_modification_time_read_bytes.assert_aa__key_for_aa_in": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_modification_time_read_bytes_test_modification_time_read_bytes.assert_aa__key_for_aa_in", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_s3.py", "file_name": "test_s3.py", "file_type": "text/x-python", "category": "test", "start_line": 420, "end_line": 430, "span_ids": ["test_modification_time_read_bytes"], "tokens": 153}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_modification_time_read_bytes(s3, s3so):\n with s3_context(\"compress\", files):\n _, a = read_bytes(\"s3://compress/test/accounts.*\", anon=True, **s3so)\n _, b = read_bytes(\"s3://compress/test/accounts.*\", anon=True, **s3so)\n\n assert [aa._key for aa in concat(a)] == [bb._key for bb in concat(b)]\n\n with s3_context(\"compress\", valmap(double, files)):\n _, c = read_bytes(\"s3://compress/test/accounts.*\", anon=True, **s3so)\n\n assert [aa._key for aa in concat(a)] != [cc._key for cc in concat(c)]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_parquet_test_parquet.tm_assert_frame_equal_dat": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_parquet_test_parquet.tm_assert_frame_equal_dat", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_s3.py", "file_name": "test_s3.py", "file_type": "text/x-python", "category": "test", "start_line": 433, "end_line": 468, "span_ids": ["test_parquet"], "tokens": 358}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"engine\", [\"pyarrow\", \"fastparquet\"])\n@numpy_120_mark\ndef test_parquet(s3, engine, s3so):\n dd = pytest.importorskip(\"dask.dataframe\")\n from dask.dataframe._compat import tm\n\n lib = pytest.importorskip(engine)\n if engine == \"pyarrow\" and LooseVersion(lib.__version__) < \"0.13.1\":\n pytest.skip(\"pyarrow < 0.13.1 not supported for parquet\")\n import pandas as pd\n import numpy as np\n\n url = \"s3://%s/test.parquet\" % test_bucket_name\n\n data = pd.DataFrame(\n {\n \"i32\": np.arange(1000, dtype=np.int32),\n \"i64\": np.arange(1000, dtype=np.int64),\n \"f\": np.arange(1000, dtype=np.float64),\n \"bhello\": np.random.choice([\"hello\", \"you\", \"people\"], size=1000).astype(\n \"O\"\n ),\n },\n index=pd.Index(np.arange(1000), name=\"foo\"),\n )\n df = dd.from_pandas(data, chunksize=500)\n df.to_parquet(url, engine=engine, storage_options=s3so)\n\n files = [f.split(\"/\")[-1] for f in s3.ls(url)]\n assert \"_common_metadata\" in files\n assert \"part.0.parquet\" in files\n\n df2 = dd.read_parquet(url, index=\"foo\", engine=engine, storage_options=s3so)\n assert len(df2.divisions) > 1\n\n tm.assert_frame_equal(data, df2.compute())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_parquet_wstoragepars_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_parquet_wstoragepars_", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_s3.py", "file_name": "test_s3.py", "file_type": "text/x-python", "category": "test", "start_line": 471, "end_line": 502, "span_ids": ["test_get_pyarrow_fs_s3", "test_parquet_wstoragepars"], "tokens": 316}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@numpy_120_mark\ndef test_parquet_wstoragepars(s3, s3so):\n dd = pytest.importorskip(\"dask.dataframe\")\n pytest.importorskip(\"fastparquet\")\n\n import pandas as pd\n import numpy as np\n\n url = \"s3://%s/test.parquet\" % test_bucket_name\n\n data = pd.DataFrame({\"i32\": np.array([0, 5, 2, 5])})\n df = dd.from_pandas(data, chunksize=500)\n df.to_parquet(url, write_index=False, storage_options=s3so)\n\n dd.read_parquet(url, storage_options=dict(**s3so, **{\"default_fill_cache\": False}))\n assert s3.current().default_fill_cache is False\n dd.read_parquet(url, storage_options=dict(**s3so, **{\"default_fill_cache\": True}))\n assert s3.current().default_fill_cache is True\n\n dd.read_parquet(\n url, storage_options=dict(**s3so, **{\"default_block_size\": 2 ** 20})\n )\n assert s3.current().default_block_size == 2 ** 20\n with s3.current().open(url + \"/_metadata\") as f:\n assert f.blocksize == 2 ** 20\n\n\ndef test_get_pyarrow_fs_s3(s3):\n pa = pytest.importorskip(\"pyarrow\")\n fs = DaskS3FileSystem(anon=True)\n assert isinstance(fs, pa.filesystem.FileSystem)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/utils.py_io_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/utils.py_io_", "embedding": null, "metadata": {"file_path": "dask/bytes/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 25, "span_ids": ["imports", "zip_compress", "impl"], "tokens": 127}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import io\nimport gzip\nimport bz2\nimport lzma\nimport zipfile\n\n\ndef zip_compress(data):\n \"\"\"Write data into zipfile and return the bytes\"\"\"\n out = io.BytesIO()\n with zipfile.ZipFile(file=out, mode=\"w\") as z:\n with z.open(\"myfile\", \"w\") as zf:\n zf.write(data)\n out.seek(0)\n return out.read()\n\n\ncompress = {\n \"gzip\": gzip.compress,\n \"bz2\": bz2.compress,\n None: lambda x: x,\n \"xz\": lzma.compress,\n \"zip\": zip_compress,\n}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/cache.py_Callback_Cache._pretask.self_starttimes_key_de": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/cache.py_Callback_Cache._pretask.self_starttimes_key_de", "embedding": null, "metadata": {"file_path": "dask/cache.py", "file_name": "cache.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 52, "span_ids": ["Cache._pretask", "Cache", "imports", "Cache._start", "Cache.__init__"], "tokens": 357}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from .callbacks import Callback\nfrom timeit import default_timer\nfrom numbers import Number\nimport sys\n\noverhead = sys.getsizeof(1.23) * 4 + sys.getsizeof(()) * 4\n\n\nclass Cache(Callback):\n \"\"\"Use cache for computation\n\n Examples\n --------\n\n >>> cache = Cache(1e9) # doctest: +SKIP\n\n The cache can be used locally as a context manager around ``compute`` or\n ``get`` calls:\n\n >>> with cache: # doctest: +SKIP\n ... result = x.compute()\n\n You can also register a cache globally, so that it works for all\n computations:\n\n >>> cache.register() # doctest: +SKIP\n >>> cache.unregister() # doctest: +SKIP\n \"\"\"\n\n def __init__(self, cache, *args, **kwargs):\n try:\n import cachey\n except ImportError as ex:\n raise ImportError(\n 'Cache requires cachey, \"{ex}\" problem ' \"importing\".format(ex=str(ex))\n ) from ex\n self._nbytes = cachey.nbytes\n if isinstance(cache, Number):\n cache = cachey.Cache(cache, *args, **kwargs)\n else:\n assert not args and not kwargs\n self.cache = cache\n self.starttimes = dict()\n\n def _start(self, dsk):\n self.durations = dict()\n overlap = set(dsk) & set(self.cache.data)\n for key in overlap:\n dsk[key] = self.cache.data[key]\n\n def _pretask(self, key, dsk, state):\n self.starttimes[key] = default_timer()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/cache.py_Cache._posttask_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/cache.py_Cache._posttask_", "embedding": null, "metadata": {"file_path": "dask/cache.py", "file_name": "cache.py", "file_type": "text/x-python", "category": "implementation", "start_line": 54, "end_line": 66, "span_ids": ["Cache._finish", "Cache._posttask"], "tokens": 145}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Cache(Callback):\n\n def _posttask(self, key, value, dsk, state, id):\n duration = default_timer() - self.starttimes[key]\n deps = state[\"dependencies\"][key]\n if deps:\n duration += max(self.durations.get(k, 0) for k in deps)\n self.durations[key] = duration\n nb = self._nbytes(value) + overhead + sys.getsizeof(key) * 4\n self.cache.put(key, value, cost=duration / nb / 1e9, nbytes=nb)\n\n def _finish(self, dsk, state, errored):\n self.starttimes.clear()\n self.durations.clear()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/callbacks.py_from_contextlib_import_co_Callback.unregister.Callback_active_remove_se": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/callbacks.py_from_contextlib_import_co_Callback.unregister.Callback_active_remove_se", "embedding": null, "metadata": {"file_path": "dask/callbacks.py", "file_name": "callbacks.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 79, "span_ids": ["Callback._callback", "Callback.unregister", "imports", "Callback.__enter__", "Callback", "Callback.__init__", "Callback.__exit__", "Callback.register"], "tokens": 550}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from contextlib import contextmanager\n\n__all__ = [\"Callback\", \"add_callbacks\"]\n\n\nclass Callback(object):\n \"\"\"Base class for using the callback mechanism\n\n Create a callback with functions of the following signatures:\n\n >>> def start(dsk):\n ... pass\n >>> def start_state(dsk, state):\n ... pass\n >>> def pretask(key, dsk, state):\n ... pass\n >>> def posttask(key, result, dsk, state, worker_id):\n ... pass\n >>> def finish(dsk, state, failed):\n ... pass\n\n You may then construct a callback object with any number of them\n\n >>> cb = Callback(pretask=pretask, finish=finish) # doctest: +SKIP\n\n And use it either as a context manager over a compute/get call\n\n >>> with cb: # doctest: +SKIP\n ... x.compute() # doctest: +SKIP\n\n Or globally with the ``register`` method\n\n >>> cb.register() # doctest: +SKIP\n >>> cb.unregister() # doctest: +SKIP\n\n Alternatively subclass the ``Callback`` class with your own methods.\n\n >>> class PrintKeys(Callback): # doctest: +SKIP\n ... def _pretask(self, key, dask, state):\n ... print(\"Computing: {0}!\".format(repr(key)))\n\n >>> with PrintKeys(): # doctest: +SKIP\n ... x.compute() # doctest: +SKIP\n \"\"\"\n\n active = set()\n\n def __init__(\n self, start=None, start_state=None, pretask=None, posttask=None, finish=None\n ):\n if start:\n self._start = start\n if start_state:\n self._start_state = start_state\n if pretask:\n self._pretask = pretask\n if posttask:\n self._posttask = posttask\n if finish:\n self._finish = finish\n\n @property\n def _callback(self):\n fields = [\"_start\", \"_start_state\", \"_pretask\", \"_posttask\", \"_finish\"]\n return tuple(getattr(self, i, None) for i in fields)\n\n def __enter__(self):\n self._cm = add_callbacks(self)\n self._cm.__enter__()\n return self\n\n def __exit__(self, *args):\n self._cm.__exit__(*args)\n\n def register(self):\n Callback.active.add(self._callback)\n\n def unregister(self):\n Callback.active.remove(self._callback)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/callbacks.py_unpack_callbacks_normalize_callback.if_isinstance_cb_Callbac.else_.raise_TypeError_Callback": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/callbacks.py_unpack_callbacks_normalize_callback.if_isinstance_cb_Callbac.else_.raise_TypeError_Callback", "embedding": null, "metadata": {"file_path": "dask/callbacks.py", "file_name": "callbacks.py", "file_type": "text/x-python", "category": "implementation", "start_line": 82, "end_line": 113, "span_ids": ["normalize_callback", "unpack_callbacks", "local_callbacks"], "tokens": 212}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def unpack_callbacks(cbs):\n \"\"\"Take an iterable of callbacks, return a list of each callback.\"\"\"\n if cbs:\n return [[i for i in f if i] for f in zip(*cbs)]\n else:\n return [(), (), (), (), ()]\n\n\n@contextmanager\ndef local_callbacks(callbacks=None):\n \"\"\"Allows callbacks to work with nested schedulers.\n\n Callbacks will only be used by the first started scheduler they encounter.\n This means that only the outermost scheduler will use global callbacks.\"\"\"\n global_callbacks = callbacks is None\n if global_callbacks:\n callbacks, Callback.active = Callback.active, set()\n try:\n yield callbacks or ()\n finally:\n if global_callbacks:\n Callback.active = callbacks\n\n\ndef normalize_callback(cb):\n \"\"\"Normalizes a callback to a tuple\"\"\"\n if isinstance(cb, Callback):\n return cb._callback\n elif isinstance(cb, tuple):\n return cb\n else:\n raise TypeError(\"Callbacks must be either `Callback` or `tuple`\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/callbacks.py_add_callbacks_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/callbacks.py_add_callbacks_", "embedding": null, "metadata": {"file_path": "dask/callbacks.py", "file_name": "callbacks.py", "file_type": "text/x-python", "category": "implementation", "start_line": 116, "end_line": 142, "span_ids": ["add_callbacks.__init__", "add_callbacks.__exit__", "add_callbacks.__enter__", "add_callbacks"], "tokens": 187}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class add_callbacks(object):\n \"\"\"Context manager for callbacks.\n\n Takes several callbacks and applies them only in the enclosed context.\n Callbacks can either be represented as a ``Callback`` object, or as a tuple\n of length 4.\n\n Examples\n --------\n >>> def pretask(key, dsk, state):\n ... print(\"Now running {0}\").format(key)\n >>> callbacks = (None, pretask, None, None)\n >>> with add_callbacks(callbacks): # doctest: +SKIP\n ... res.compute()\n \"\"\"\n\n def __init__(self, *callbacks):\n self.callbacks = [normalize_callback(c) for c in callbacks]\n Callback.active.update(self.callbacks)\n\n def __enter__(self):\n return\n\n def __exit__(self, type, value, traceback):\n for c in self.callbacks:\n Callback.active.discard(c)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/compatibility.py_sys_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/compatibility.py_sys_", "embedding": null, "metadata": {"file_path": "dask/compatibility.py", "file_name": "compatibility.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 23, "span_ids": ["imports"], "tokens": 122}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import sys\nfrom distutils.version import LooseVersion\n\n# TODO: remove this import once dask requires distributed > 2.3.2\nfrom .utils import apply # noqa\n\n# TODO: remove this once dask requires distributed >= 2.2.0\nunicode = str # noqa\n\ntry:\n from dataclasses import is_dataclass, fields as dataclass_fields\n\nexcept ImportError:\n\n def is_dataclass(x):\n return False\n\n def dataclass_fields(x):\n return []\n\n\nPY_VERSION = LooseVersion(\".\".join(map(str, sys.version_info[:3])))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_ast_defaults._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_ast_defaults._", "embedding": null, "metadata": {"file_path": "dask/config.py", "file_name": "config.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 35, "span_ids": ["imports"], "tokens": 161}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import ast\nimport builtins\nfrom collections.abc import Mapping\nimport os\nimport sys\nimport threading\nimport warnings\n\nimport yaml\n\n\nno_default = \"__no_default__\"\n\n\npaths = [\n os.getenv(\"DASK_ROOT_CONFIG\", \"/etc/dask\"),\n os.path.join(sys.prefix, \"etc\", \"dask\"),\n os.path.join(os.path.expanduser(\"~\"), \".config\", \"dask\"),\n os.path.join(os.path.expanduser(\"~\"), \".dask\"),\n]\n\nif \"DASK_CONFIG\" in os.environ:\n PATH = os.environ[\"DASK_CONFIG\"]\n paths.append(PATH)\nelse:\n PATH = os.path.join(os.path.expanduser(\"~\"), \".config\", \"dask\")\n\n\nglobal_config = config = {}\n\n\nconfig_lock = threading.Lock()\n\n\ndefaults = []", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_canonical_name_canonical_name.return.k": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_canonical_name_canonical_name.return.k", "embedding": null, "metadata": {"file_path": "dask/config.py", "file_name": "config.py", "file_type": "text/x-python", "category": "implementation", "start_line": 38, "end_line": 58, "span_ids": ["canonical_name"], "tokens": 148}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def canonical_name(k, config):\n \"\"\"Return the canonical name for a key.\n\n Handles user choice of '-' or '_' conventions by standardizing on whichever\n version was set first. If a key already exists in either hyphen or\n underscore form, the existing version is the canonical name. If neither\n version exists the original key is used as is.\n \"\"\"\n try:\n if k in config:\n return k\n except TypeError:\n # config is not a mapping, return the same name as provided\n return k\n\n altk = k.replace(\"_\", \"-\") if \"_\" in k else k.replace(\"-\", \"_\")\n\n if altk in config:\n return altk\n\n return k", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_update_update.return.old": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_update_update.return.old", "embedding": null, "metadata": {"file_path": "dask/config.py", "file_name": "config.py", "file_type": "text/x-python", "category": "implementation", "start_line": 61, "end_line": 101, "span_ids": ["update"], "tokens": 333}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def update(old, new, priority=\"new\"):\n \"\"\"Update a nested dictionary with values from another\n\n This is like dict.update except that it smoothly merges nested values\n\n This operates in-place and modifies old\n\n Parameters\n ----------\n priority: string {'old', 'new'}\n If new (default) then the new dictionary has preference.\n Otherwise the old dictionary does.\n\n Examples\n --------\n >>> a = {'x': 1, 'y': {'a': 2}}\n >>> b = {'x': 2, 'y': {'b': 3}}\n >>> update(a, b) # doctest: +SKIP\n {'x': 2, 'y': {'a': 2, 'b': 3}}\n\n >>> a = {'x': 1, 'y': {'a': 2}}\n >>> b = {'x': 2, 'y': {'b': 3}}\n >>> update(a, b, priority='old') # doctest: +SKIP\n {'x': 1, 'y': {'a': 2, 'b': 3}}\n\n See Also\n --------\n dask.config.merge\n \"\"\"\n for k, v in new.items():\n k = canonical_name(k, old)\n\n if isinstance(v, Mapping):\n if k not in old or old[k] is None:\n old[k] = {}\n update(old[k], v, priority=priority)\n else:\n if priority == \"new\" or k not in old:\n old[k] = v\n\n return old", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_merge_merge.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_merge_merge.return.result", "embedding": null, "metadata": {"file_path": "dask/config.py", "file_name": "config.py", "file_type": "text/x-python", "category": "implementation", "start_line": 104, "end_line": 123, "span_ids": ["merge"], "tokens": 137}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def merge(*dicts):\n \"\"\"Update a sequence of nested dictionaries\n\n This prefers the values in the latter dictionaries to those in the former\n\n Examples\n --------\n >>> a = {'x': 1, 'y': {'a': 2}}\n >>> b = {'y': {'b': 3}}\n >>> merge(a, b) # doctest: +SKIP\n {'x': 1, 'y': {'a': 2, 'b': 3}}\n\n See Also\n --------\n dask.config.update\n \"\"\"\n result = {}\n for d in dicts:\n update(result, d)\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_collect_yaml_collect_yaml.return.configs": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_collect_yaml_collect_yaml.return.configs", "embedding": null, "metadata": {"file_path": "dask/config.py", "file_name": "config.py", "file_type": "text/x-python", "category": "implementation", "start_line": 126, "end_line": 166, "span_ids": ["collect_yaml"], "tokens": 219}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def collect_yaml(paths=paths):\n \"\"\"Collect configuration from yaml files\n\n This searches through a list of paths, expands to find all yaml or json\n files, and then parses each file.\n \"\"\"\n # Find all paths\n file_paths = []\n for path in paths:\n if os.path.exists(path):\n if os.path.isdir(path):\n try:\n file_paths.extend(\n sorted(\n [\n os.path.join(path, p)\n for p in os.listdir(path)\n if os.path.splitext(p)[1].lower()\n in (\".json\", \".yaml\", \".yml\")\n ]\n )\n )\n except OSError:\n # Ignore permission errors\n pass\n else:\n file_paths.append(path)\n\n configs = []\n\n # Parse yaml files\n for path in file_paths:\n try:\n with open(path) as f:\n data = yaml.safe_load(f.read()) or {}\n configs.append(data)\n except (OSError, IOError):\n # Ignore permission errors\n pass\n\n return configs", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_collect_env_collect_env.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_collect_env_collect_env.return.result", "embedding": null, "metadata": {"file_path": "dask/config.py", "file_name": "config.py", "file_type": "text/x-python", "category": "implementation", "start_line": 169, "end_line": 194, "span_ids": ["collect_env"], "tokens": 205}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def collect_env(env=None):\n \"\"\"Collect config from environment variables\n\n This grabs environment variables of the form \"DASK_FOO__BAR_BAZ=123\" and\n turns these into config variables of the form ``{\"foo\": {\"bar-baz\": 123}}``\n It transforms the key and value in the following way:\n\n - Lower-cases the key text\n - Treats ``__`` (double-underscore) as nested access\n - Calls ``ast.literal_eval`` on the value\n \"\"\"\n if env is None:\n env = os.environ\n d = {}\n for name, value in env.items():\n if name.startswith(\"DASK_\"):\n varname = name[5:].lower().replace(\"__\", \".\")\n try:\n d[varname] = ast.literal_eval(value)\n except (SyntaxError, ValueError):\n d[varname] = value\n\n result = {}\n set(d, config=result)\n\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_ensure_file_ensure_file.try_.except_IOError_OSError_.pass": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_ensure_file_ensure_file.try_.except_IOError_OSError_.pass", "embedding": null, "metadata": {"file_path": "dask/config.py", "file_name": "config.py", "file_type": "text/x-python", "category": "implementation", "start_line": 197, "end_line": 255, "span_ids": ["ensure_file"], "tokens": 422}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def ensure_file(source, destination=None, comment=True):\n \"\"\"\n Copy file to default location if it does not already exist\n\n This tries to move a default configuration file to a default location if\n if does not already exist. It also comments out that file by default.\n\n This is to be used by downstream modules (like dask.distributed) that may\n have default configuration files that they wish to include in the default\n configuration path.\n\n Parameters\n ----------\n source : string, filename\n Source configuration file, typically within a source directory.\n destination : string, directory\n Destination directory. Configurable by ``DASK_CONFIG`` environment\n variable, falling back to ~/.config/dask.\n comment : bool, True by default\n Whether or not to comment out the config file when copying.\n \"\"\"\n if destination is None:\n destination = PATH\n\n # destination is a file and already exists, never overwrite\n if os.path.isfile(destination):\n return\n\n # If destination is not an existing file, interpret as a directory,\n # use the source basename as the filename\n directory = destination\n destination = os.path.join(directory, os.path.basename(source))\n\n try:\n if not os.path.exists(destination):\n os.makedirs(directory, exist_ok=True)\n\n # Atomically create destination. Parallel testing discovered\n # a race condition where a process can be busy creating the\n # destination while another process reads an empty config file.\n tmp = \"%s.tmp.%d\" % (destination, os.getpid())\n with open(source) as f:\n lines = list(f)\n\n if comment:\n lines = [\n \"# \" + line if line.strip() and not line.startswith(\"#\") else line\n for line in lines\n ]\n\n with open(tmp, \"w\") as f:\n f.write(\"\".join(lines))\n\n try:\n os.rename(tmp, destination)\n except OSError:\n os.remove(tmp)\n except (IOError, OSError):\n pass", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_set_set.__exit__.for_op_path_value_in_re.if_op_replace_.else_insert.for_key_in_path_1_.else_.d_pop_path_1_None_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_set_set.__exit__.for_op_path_value_in_re.if_op_replace_.else_insert.for_key_in_path_1_.else_.d_pop_path_1_None_", "embedding": null, "metadata": {"file_path": "dask/config.py", "file_name": "config.py", "file_type": "text/x-python", "category": "implementation", "start_line": 258, "end_line": 326, "span_ids": ["set.__enter__", "set.__init__", "set", "set.__exit__"], "tokens": 471}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class set(object):\n \"\"\"Temporarily set configuration values within a context manager\n\n Parameters\n ----------\n arg : mapping or None, optional\n A mapping of configuration key-value pairs to set.\n **kwargs :\n Additional key-value pairs to set. If ``arg`` is provided, values set\n in ``arg`` will be applied before those in ``kwargs``.\n Double-underscores (``__``) in keyword arguments will be replaced with\n ``.``, allowing nested values to be easily set.\n\n Examples\n --------\n >>> import dask\n\n Set ``'foo.bar'`` in a context, by providing a mapping.\n\n >>> with dask.config.set({'foo.bar': 123}):\n ... pass\n\n Set ``'foo.bar'`` in a context, by providing a keyword argument.\n\n >>> with dask.config.set(foo__bar=123):\n ... pass\n\n Set ``'foo.bar'`` globally.\n\n >>> dask.config.set(foo__bar=123) # doctest: +SKIP\n\n See Also\n --------\n dask.config.get\n \"\"\"\n\n def __init__(self, arg=None, config=config, lock=config_lock, **kwargs):\n with lock:\n self.config = config\n self._record = []\n\n if arg is not None:\n for key, value in arg.items():\n key = check_deprecations(key)\n self._assign(key.split(\".\"), value, config)\n if kwargs:\n for key, value in kwargs.items():\n key = key.replace(\"__\", \".\")\n key = check_deprecations(key)\n self._assign(key.split(\".\"), value, config)\n\n def __enter__(self):\n return self.config\n\n def __exit__(self, type, value, traceback):\n for op, path, value in reversed(self._record):\n d = self.config\n if op == \"replace\":\n for key in path[:-1]:\n d = d.setdefault(key, {})\n d[path[-1]] = value\n else: # insert\n for key in path[:-1]:\n try:\n d = d[key]\n except KeyError:\n break\n else:\n d.pop(path[-1], None)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_set._assign_set._assign.if_len_keys_1_.else_.self__assign_keys_1_va": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_set._assign_set._assign.if_len_keys_1_.else_.self__assign_keys_1_va", "embedding": null, "metadata": {"file_path": "dask/config.py", "file_name": "config.py", "file_type": "text/x-python", "category": "implementation", "start_line": 328, "end_line": 362, "span_ids": ["set._assign"], "tokens": 263}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class set(object):\n\n def _assign(self, keys, value, d, path=(), record=True):\n \"\"\"Assign value into a nested configuration dictionary\n\n Parameters\n ----------\n keys : Sequence[str]\n The nested path of keys to assign the value.\n value : object\n d : dict\n The part of the nested dictionary into which we want to assign the\n value\n path : Tuple[str], optional\n The path history up to this point.\n record : bool, optional\n Whether this operation needs to be recorded to allow for rollback.\n \"\"\"\n key = canonical_name(keys[0], d)\n\n path = path + (key,)\n\n if len(keys) == 1:\n if record:\n if key in d:\n self._record.append((\"replace\", path, d[key]))\n else:\n self._record.append((\"insert\", path, None))\n d[key] = value\n else:\n if key not in d:\n if record:\n self._record.append((\"insert\", path, None))\n d[key] = {}\n # No need to record subsequent operations after an insert\n record = False\n self._assign(keys[1:], value, d[key], path, record=record)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_collect_collect.return.merge_configs_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_collect_collect.return.merge_configs_", "embedding": null, "metadata": {"file_path": "dask/config.py", "file_name": "config.py", "file_type": "text/x-python", "category": "implementation", "start_line": 365, "end_line": 391, "span_ids": ["collect"], "tokens": 123}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def collect(paths=paths, env=None):\n \"\"\"\n Collect configuration from paths and environment variables\n\n Parameters\n ----------\n paths : List[str]\n A list of paths to search for yaml config files\n\n env : dict\n The system environment variables\n\n Returns\n -------\n config: dict\n\n See Also\n --------\n dask.config.refresh: collect configuration and update into primary config\n \"\"\"\n if env is None:\n env = os.environ\n\n configs = collect_yaml(paths=paths)\n configs.append(collect_env(env=env))\n\n return merge(*configs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_refresh_refresh.update_config_collect_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_refresh_refresh.update_config_collect_", "embedding": null, "metadata": {"file_path": "dask/config.py", "file_name": "config.py", "file_type": "text/x-python", "category": "implementation", "start_line": 394, "end_line": 423, "span_ids": ["refresh"], "tokens": 201}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def refresh(config=config, defaults=defaults, **kwargs):\n \"\"\"\n Update configuration by re-reading yaml files and env variables\n\n This mutates the global dask.config.config, or the config parameter if\n passed in.\n\n This goes through the following stages:\n\n 1. Clearing out all old configuration\n 2. Updating from the stored defaults from downstream libraries\n (see update_defaults)\n 3. Updating from yaml files and environment variables\n\n Note that some functionality only checks configuration once at startup and\n may not change behavior, even if configuration changes. It is recommended\n to restart your python process if convenient to ensure that new\n configuration changes take place.\n\n See Also\n --------\n dask.config.collect: for parameters\n dask.config.update_defaults\n \"\"\"\n config.clear()\n\n for d in defaults:\n update(config, d, priority=\"old\")\n\n update(config, collect(**kwargs))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_get_get.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_get_get.return.result", "embedding": null, "metadata": {"file_path": "dask/config.py", "file_name": "config.py", "file_type": "text/x-python", "category": "implementation", "start_line": 426, "end_line": 459, "span_ids": ["get"], "tokens": 193}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def get(key, default=no_default, config=config):\n \"\"\"\n Get elements from global config\n\n Use '.' for nested access\n\n Examples\n --------\n >>> from dask import config\n >>> config.get('foo') # doctest: +SKIP\n {'x': 1, 'y': 2}\n\n >>> config.get('foo.x') # doctest: +SKIP\n 1\n\n >>> config.get('foo.x.y', default=123) # doctest: +SKIP\n 123\n\n See Also\n --------\n dask.config.set\n \"\"\"\n keys = key.split(\".\")\n result = config\n for k in keys:\n k = canonical_name(k, result)\n try:\n result = result[k]\n except (TypeError, IndexError, KeyError):\n if default is not no_default:\n return default\n else:\n raise\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_rename_update_defaults.update_config_new_prior": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_rename_update_defaults.update_config_new_prior", "embedding": null, "metadata": {"file_path": "dask/config.py", "file_name": "config.py", "file_type": "text/x-python", "category": "implementation", "start_line": 462, "end_line": 491, "span_ids": ["update_defaults", "rename"], "tokens": 196}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def rename(aliases, config=config):\n \"\"\"Rename old keys to new keys\n\n This helps migrate older configuration versions over time\n \"\"\"\n old = []\n new = {}\n for o, n in aliases.items():\n value = get(o, None, config=config)\n if value is not None:\n old.append(o)\n new[n] = value\n\n for k in old:\n del config[canonical_name(k, config)] # TODO: support nested keys\n\n set(new, config=config)\n\n\ndef update_defaults(new, config=config, defaults=defaults):\n \"\"\"Add a new set of defaults to the configuration\n\n It does two things:\n\n 1. Add the defaults to a global collection to be used by refresh later\n 2. Updates the global config with the new configuration\n prioritizing older values over newer ones\n \"\"\"\n defaults.append(new)\n update(config, new, priority=\"old\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_expand_environment_variables_deprecations._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_expand_environment_variables_deprecations._", "embedding": null, "metadata": {"file_path": "dask/config.py", "file_name": "config.py", "file_type": "text/x-python", "category": "implementation", "start_line": 494, "end_line": 531, "span_ids": ["expand_environment_variables", "impl:19"], "tokens": 292}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def expand_environment_variables(config):\n \"\"\"Expand environment variables in a nested config dictionary\n\n This function will recursively search through any nested dictionaries\n and/or lists.\n\n Parameters\n ----------\n config : dict, iterable, or str\n Input object to search for environment variables\n\n Returns\n -------\n config : same type as input\n\n Examples\n --------\n >>> expand_environment_variables({'x': [1, 2, '$USER']}) # doctest: +SKIP\n {'x': [1, 2, 'my-username']}\n \"\"\"\n if isinstance(config, Mapping):\n return {k: expand_environment_variables(v) for k, v in config.items()}\n elif isinstance(config, str):\n return os.path.expandvars(config)\n elif isinstance(config, (list, tuple, builtins.set)):\n return type(config)([expand_environment_variables(v) for v in config])\n else:\n return config\n\n\ndeprecations = {\n \"fuse_ave_width\": \"optimization.fuse.ave-width\",\n \"fuse_max_height\": \"optimization.fuse.max-height\",\n \"fuse_max_width\": \"optimization.fuse.max-width\",\n \"fuse_subgraphs\": \"optimization.fuse.subgraphs\",\n \"fuse_rename_keys\": \"optimization.fuse.rename-keys\",\n \"fuse_max_depth_new_edges\": \"optimization.fuse.max-depth-new-edges\",\n}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_check_deprecations_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_check_deprecations_", "embedding": null, "metadata": {"file_path": "dask/config.py", "file_name": "config.py", "file_type": "text/x-python", "category": "implementation", "start_line": 534, "end_line": 589, "span_ids": ["impl:21", "check_deprecations", "_initialize"], "tokens": 361}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def check_deprecations(key: str, deprecations: dict = deprecations):\n \"\"\"Check if the provided value has been renamed or removed\n\n Parameters\n ----------\n key : str\n The configuration key to check\n deprecations : Dict[str, str]\n The mapping of aliases\n\n Examples\n --------\n >>> deprecations = {\"old_key\": \"new_key\", \"invalid\": None}\n >>> check_deprecations(\"old_key\", deprecations=deprecations) # doctest: +SKIP\n UserWarning: Configuration key \"old_key\" has been deprecated. Please use \"new_key\" instead.\n\n >>> check_deprecations(\"invalid\", deprecations=deprecations)\n Traceback (most recent call last):\n ...\n ValueError: Configuration value \"invalid\" has been removed\n\n >>> check_deprecations(\"another_key\", deprecations=deprecations)\n 'another_key'\n\n Returns\n -------\n new: str\n The proper key, whether the original (if no deprecation) or the aliased\n value\n \"\"\"\n if key in deprecations:\n new = deprecations[key]\n if new:\n warnings.warn(\n 'Configuration key \"{}\" has been deprecated. '\n 'Please use \"{}\" instead'.format(key, new)\n )\n return new\n else:\n raise ValueError('Configuration value \"{}\" has been removed'.format(key))\n else:\n return key\n\n\ndef _initialize():\n fn = os.path.join(os.path.dirname(__file__), \"dask.yaml\")\n\n with open(fn) as f:\n _defaults = yaml.safe_load(f)\n\n update_defaults(_defaults)\n\n\nrefresh()\n_initialize()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/context.py___": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/context.py___", "embedding": null, "metadata": {"file_path": "dask/context.py", "file_name": "context.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 66, "span_ids": ["GlobalMethod", "globalmethod", "GlobalMethod.__init__", "GlobalMethod.__get__", "docstring"], "tokens": 406}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\"\nControl global computation context\n\"\"\"\nimport threading\nfrom functools import partial\nfrom . import config\n\n_globals = config.config\n\n\nthread_state = threading.local()\n\n\ndef globalmethod(default=None, key=None, falsey=None):\n \"\"\"Allow function to be taken over by globals\n\n This modifies a method so that occurrences of it may be taken over by\n functions registered in the global options. Can be used as a decorator or a\n function.\n\n Parameters\n ----------\n default : callable\n The default callable to use.\n key : str\n Key under which we register this function in the global parameters\n falsey : callable, None, optional\n A function to use if the option is falsey. If not provided, the default\n is used instead.\n\n Examples\n --------\n >>> import dask\n >>> class Foo(object):\n ... @globalmethod(key='bar', falsey=lambda: 3)\n ... def bar():\n ... return 1\n >>> f = Foo()\n >>> f.bar()\n 1\n >>> with dask.config.set(bar=lambda: 2):\n ... print(f.bar())\n 2\n >>> with dask.config.set(bar=False):\n ... print(f.bar())\n 3\n \"\"\"\n if default is None:\n return partial(globalmethod, key=key, falsey=falsey)\n return GlobalMethod(default=default, key=key, falsey=falsey)\n\n\nclass GlobalMethod(object):\n def __init__(self, default, key, falsey=None):\n self._default = default\n self._key = key\n self._falsey = falsey\n\n def __get__(self, instance, owner=None):\n if self._key in _globals:\n if _globals[self._key]:\n return _globals[self._key]\n elif self._falsey is not None:\n return self._falsey\n return self._default", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_from_collections_import_d_istask.return.type_x_is_tuple_and_x_an": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_from_collections_import_d_istask.return.type_x_is_tuple_and_x_an", "embedding": null, "metadata": {"file_path": "dask/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 40, "span_ids": ["imports", "ishashable", "istask"], "tokens": 173}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from collections import defaultdict\n\nfrom .utils_test import add, inc # noqa: F401\n\nno_default = \"__no_default__\"\n\n\ndef ishashable(x):\n \"\"\"Is x hashable?\n\n Examples\n --------\n\n >>> ishashable(1)\n True\n >>> ishashable([1])\n False\n \"\"\"\n try:\n hash(x)\n return True\n except TypeError:\n return False\n\n\ndef istask(x):\n \"\"\"Is x a runnable task?\n\n A task is a tuple with a callable first argument\n\n Examples\n --------\n\n >>> inc = lambda x: x + 1\n >>> istask((inc, 1))\n True\n >>> istask(1)\n False\n \"\"\"\n return type(x) is tuple and x and callable(x[0])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_has_tasks_lists_to_tuples.return.res": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_has_tasks_lists_to_tuples.return.res", "embedding": null, "metadata": {"file_path": "dask/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 43, "end_line": 83, "span_ids": ["lists_to_tuples", "has_tasks", "preorder_traversal"], "tokens": 236}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def has_tasks(dsk, x):\n \"\"\"Whether ``x`` has anything to compute.\n\n Returns True if:\n - ``x`` is a task\n - ``x`` is a key in ``dsk``\n - ``x`` is a list that contains any tasks or keys\n \"\"\"\n if istask(x):\n return True\n try:\n if x in dsk:\n return True\n except Exception:\n pass\n if isinstance(x, list):\n for i in x:\n if has_tasks(dsk, i):\n return True\n return False\n\n\ndef preorder_traversal(task):\n \"\"\"A generator to preorder-traverse a task.\"\"\"\n\n for item in task:\n if istask(item):\n for i in preorder_traversal(item):\n yield i\n elif isinstance(item, list):\n yield list\n for i in preorder_traversal(item):\n yield i\n else:\n yield item\n\n\ndef lists_to_tuples(res, keys):\n if isinstance(keys, list):\n return tuple(lists_to_tuples(r, k) for r, k in zip(res, keys))\n return res", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py__execute_task__execute_task.if_isinstance_arg_list_.else_.return.arg": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py__execute_task__execute_task.if_isinstance_arg_list_.else_.return.arg", "embedding": null, "metadata": {"file_path": "dask/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 86, "end_line": 127, "span_ids": ["_execute_task"], "tokens": 343}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _execute_task(arg, cache, dsk=None):\n \"\"\"Do the actual work of collecting data and executing a function\n\n Examples\n --------\n\n >>> cache = {'x': 1, 'y': 2}\n\n Compute tasks against a cache\n >>> _execute_task((add, 'x', 1), cache) # Compute task in naive manner\n 2\n >>> _execute_task((add, (inc, 'x'), 1), cache) # Support nested computation\n 3\n\n Also grab data from cache\n >>> _execute_task('x', cache)\n 1\n\n Support nested lists\n >>> list(_execute_task(['x', 'y'], cache))\n [1, 2]\n\n >>> list(map(list, _execute_task([['x', 'y'], ['y', 'x']], cache)))\n [[1, 2], [2, 1]]\n\n >>> _execute_task('foo', cache) # Passes through on non-keys\n 'foo'\n \"\"\"\n if isinstance(arg, list):\n return [_execute_task(a, cache) for a in arg]\n elif istask(arg):\n func, args = arg[0], arg[1:]\n # Note: Don't assign the subtask results to a variable. numpy detects\n # temporaries by their reference count and can execute certain\n # operations in-place.\n return func(*(_execute_task(a, cache) for a in args))\n elif not ishashable(arg):\n return arg\n elif arg in cache:\n return cache[arg]\n else:\n return arg", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_get_get.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_get_get.return.result", "embedding": null, "metadata": {"file_path": "dask/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 130, "end_line": 156, "span_ids": ["get"], "tokens": 198}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def get(dsk, out, cache=None):\n \"\"\"Get value from Dask\n\n Examples\n --------\n\n >>> inc = lambda x: x + 1\n >>> d = {'x': 1, 'y': (inc, 'x')}\n\n >>> get(d, 'x')\n 1\n >>> get(d, 'y')\n 2\n \"\"\"\n for k in flatten(out) if isinstance(out, list) else [out]:\n if k not in dsk:\n raise KeyError(\"{0} is not a key in the graph\".format(k))\n if cache is None:\n cache = {}\n for key in toposort(dsk):\n task = dsk[key]\n result = _execute_task(task, cache)\n cache[key] = result\n result = _execute_task(out, cache)\n if isinstance(out, list):\n result = lists_to_tuples(result, out)\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_keys_in_tasks_keys_in_tasks.return.ret_if_as_list_else_set_r": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_keys_in_tasks_keys_in_tasks.return.ret_if_as_list_else_set_r", "embedding": null, "metadata": {"file_path": "dask/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 159, "end_line": 191, "span_ids": ["keys_in_tasks"], "tokens": 253}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def keys_in_tasks(keys, tasks, as_list=False):\n \"\"\"Returns the keys in `keys` that are also in `tasks`\n\n Examples\n --------\n >>> dsk = {'x': 1,\n ... 'y': (inc, 'x'),\n ... 'z': (add, 'x', 'y'),\n ... 'w': (inc, 'z'),\n ... 'a': (add, (inc, 'x'), 1)}\n\n >>> keys_in_tasks(dsk, ['x', 'y', 'j']) # doctest: +SKIP\n {'x', 'y'}\n \"\"\"\n ret = []\n while tasks:\n work = []\n for w in tasks:\n typ = type(w)\n if typ is tuple and w and callable(w[0]): # istask(w)\n work.extend(w[1:])\n elif typ is list:\n work.extend(w)\n elif typ is dict:\n work.extend(w.values())\n else:\n try:\n if w in keys:\n ret.append(w)\n except TypeError: # not hashable\n pass\n tasks = work\n return ret if as_list else set(ret)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_get_dependencies_get_dependencies.return.keys_in_tasks_dsk_arg_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_get_dependencies_get_dependencies.return.keys_in_tasks_dsk_arg_", "embedding": null, "metadata": {"file_path": "dask/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 194, "end_line": 230, "span_ids": ["get_dependencies"], "tokens": 277}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def get_dependencies(dsk, key=None, task=no_default, as_list=False):\n \"\"\"Get the immediate tasks on which this task depends\n\n Examples\n --------\n >>> dsk = {'x': 1,\n ... 'y': (inc, 'x'),\n ... 'z': (add, 'x', 'y'),\n ... 'w': (inc, 'z'),\n ... 'a': (add, (inc, 'x'), 1)}\n\n >>> get_dependencies(dsk, 'x')\n set()\n\n >>> get_dependencies(dsk, 'y')\n {'x'}\n\n >>> get_dependencies(dsk, 'z') # doctest: +SKIP\n {'x', 'y'}\n\n >>> get_dependencies(dsk, 'w') # Only direct dependencies\n {'z'}\n\n >>> get_dependencies(dsk, 'a') # Ignore non-keys\n {'x'}\n\n >>> get_dependencies(dsk, task=(inc, 'x')) # provide tasks directly\n {'x'}\n \"\"\"\n if key is not None:\n arg = dsk[key]\n elif task is not no_default:\n arg = task\n else:\n raise ValueError(\"Provide either key or task\")\n\n return keys_in_tasks(dsk, [arg], as_list=as_list)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_get_deps_get_deps.return.dependencies_dependents": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_get_deps_get_deps.return.dependencies_dependents", "embedding": null, "metadata": {"file_path": "dask/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 233, "end_line": 245, "span_ids": ["get_deps"], "tokens": 153}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def get_deps(dsk):\n \"\"\"Get dependencies and dependents from dask dask graph\n\n >>> dsk = {'a': 1, 'b': (inc, 'a'), 'c': (inc, 'b')}\n >>> dependencies, dependents = get_deps(dsk)\n >>> dependencies\n {'a': set(), 'b': {'a'}, 'c': {'b'}}\n >>> dependents # doctest: +SKIP\n {'a': {'b'}, 'b': {'c'}, 'c': set()}\n \"\"\"\n dependencies = {k: get_dependencies(dsk, task=v) for k, v in dsk.items()}\n dependents = reverse_dict(dependencies)\n return dependencies, dependents", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_flatten_flatten.if_isinstance_seq_str_.else_.for_item_in_seq_.if_isinstance_item_conta.else_.yield_item": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_flatten_flatten.if_isinstance_seq_str_.else_.for_item_in_seq_.if_isinstance_item_conta.else_.yield_item", "embedding": null, "metadata": {"file_path": "dask/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 248, "end_line": 274, "span_ids": ["flatten"], "tokens": 206}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def flatten(seq, container=list):\n \"\"\"\n\n >>> list(flatten([1]))\n [1]\n\n >>> list(flatten([[1, 2], [1, 2]]))\n [1, 2, 1, 2]\n\n >>> list(flatten([[[1], [2]], [[1], [2]]]))\n [1, 2, 1, 2]\n\n >>> list(flatten(((1, 2), (1, 2)))) # Don't flatten tuples\n [(1, 2), (1, 2)]\n\n >>> list(flatten((1, 2, [3, 4]))) # support heterogeneous\n [1, 2, 3, 4]\n \"\"\"\n if isinstance(seq, str):\n yield seq\n else:\n for item in seq:\n if isinstance(item, container):\n for item2 in flatten(item, container=container):\n yield item2\n else:\n yield item", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_reverse_dict_reverse_dict.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_reverse_dict_reverse_dict.return.result", "embedding": null, "metadata": {"file_path": "dask/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 277, "end_line": 292, "span_ids": ["reverse_dict"], "tokens": 127}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def reverse_dict(d):\n \"\"\"\n\n >>> a, b, c = 'abc'\n >>> d = {a: [b, c], b: [c]}\n >>> reverse_dict(d) # doctest: +SKIP\n {'a': set([]), 'b': set(['a']}, 'c': set(['a', 'b'])}\n \"\"\"\n result = defaultdict(set)\n _add = set.add\n for k, vals in d.items():\n result[k]\n for val in vals:\n _add(result[val], k)\n result.default_factory = None\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_subs_subs.return.task_1_tuple_newargs_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_subs_subs.return.task_1_tuple_newargs_", "embedding": null, "metadata": {"file_path": "dask/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 295, "end_line": 329, "span_ids": ["subs"], "tokens": 273}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def subs(task, key, val):\n \"\"\"Perform a substitution on a task\n\n Examples\n --------\n\n >>> subs((inc, 'x'), 'x', 1) # doctest: +SKIP\n (inc, 1)\n \"\"\"\n type_task = type(task)\n if not (type_task is tuple and task and callable(task[0])): # istask(task):\n try:\n if type_task is type(key) and task == key:\n return val\n except Exception:\n pass\n if type_task is list:\n return [subs(x, key, val) for x in task]\n return task\n newargs = []\n hash_key = {key}\n for arg in task[1:]:\n type_arg = type(arg)\n if type_arg is tuple and arg and callable(arg[0]): # istask(task):\n arg = subs(arg, key, val)\n elif type_arg is list:\n arg = [subs(x, key, val) for x in arg]\n else:\n try:\n if arg in hash_key: # Hash and equality match\n arg = val\n except TypeError: # not hashable\n pass\n newargs.append(arg)\n return task[:1] + tuple(newargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py__toposort__toposort.return.ordered": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py__toposort__toposort.return.ordered", "embedding": null, "metadata": {"file_path": "dask/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 332, "end_line": 398, "span_ids": ["_toposort"], "tokens": 492}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _toposort(dsk, keys=None, returncycle=False, dependencies=None):\n # Stack-based depth-first search traversal. This is based on Tarjan's\n # method for topological sorting (see wikipedia for pseudocode)\n if keys is None:\n keys = dsk\n elif not isinstance(keys, list):\n keys = [keys]\n if not returncycle:\n ordered = []\n\n # Nodes whose descendents have been completely explored.\n # These nodes are guaranteed to not be part of a cycle.\n completed = set()\n\n # All nodes that have been visited in the current traversal. Because\n # we are doing depth-first search, going \"deeper\" should never result\n # in visiting a node that has already been seen. The `seen` and\n # `completed` sets are mutually exclusive; it is okay to visit a node\n # that has already been added to `completed`.\n seen = set()\n\n if dependencies is None:\n dependencies = dict((k, get_dependencies(dsk, k)) for k in dsk)\n\n for key in keys:\n if key in completed:\n continue\n nodes = [key]\n while nodes:\n # Keep current node on the stack until all descendants are visited\n cur = nodes[-1]\n if cur in completed:\n # Already fully traversed descendants of cur\n nodes.pop()\n continue\n seen.add(cur)\n\n # Add direct descendants of cur to nodes stack\n next_nodes = []\n for nxt in dependencies[cur]:\n if nxt not in completed:\n if nxt in seen:\n # Cycle detected!\n cycle = [nxt]\n while nodes[-1] != nxt:\n cycle.append(nodes.pop())\n cycle.append(nodes.pop())\n cycle.reverse()\n if returncycle:\n return cycle\n else:\n cycle = \"->\".join(str(x) for x in cycle)\n raise RuntimeError(\"Cycle detected in Dask: %s\" % cycle)\n next_nodes.append(nxt)\n\n if next_nodes:\n nodes.extend(next_nodes)\n else:\n # cur has no more descendants to explore, so we're done with it\n if not returncycle:\n ordered.append(cur)\n completed.add(cur)\n seen.remove(cur)\n nodes.pop()\n if returncycle:\n return []\n return ordered", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_toposort_getcycle.return._toposort_d_keys_keys_r": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_toposort_getcycle.return._toposort_d_keys_keys_r", "embedding": null, "metadata": {"file_path": "dask/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 401, "end_line": 424, "span_ids": ["getcycle", "toposort"], "tokens": 177}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def toposort(dsk, dependencies=None):\n \"\"\" Return a list of keys of dask sorted in topological order.\"\"\"\n return _toposort(dsk, dependencies=dependencies)\n\n\ndef getcycle(d, keys):\n \"\"\"Return a list of nodes that form a cycle if Dask is not a DAG.\n\n Returns an empty list if no cycle is found.\n\n ``keys`` may be a single key or list of keys.\n\n Examples\n --------\n\n >>> d = {'x': (inc, 'z'), 'y': (inc, 'x'), 'z': (inc, 'y')}\n >>> getcycle(d, 'x')\n ['x', 'z', 'y', 'x']\n\n See Also\n --------\n isdag\n \"\"\"\n return _toposort(d, keys=keys, returncycle=True)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_isdag_isdag.return.not_getcycle_d_keys_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_isdag_isdag.return.not_getcycle_d_keys_", "embedding": null, "metadata": {"file_path": "dask/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 427, "end_line": 445, "span_ids": ["isdag"], "tokens": 130}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def isdag(d, keys):\n \"\"\"Does Dask form a directed acyclic graph when calculating keys?\n\n ``keys`` may be a single key or list of keys.\n\n Examples\n --------\n\n >>> inc = lambda x: x + 1\n >>> isdag({'x': 0, 'y': (inc, 'x')}, 'y')\n True\n >>> isdag({'x': (inc, 'y'), 'y': (inc, 'x')}, 'y')\n False\n\n See Also\n --------\n getcycle\n \"\"\"\n return not getcycle(d, keys)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_literal_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_literal_", "embedding": null, "metadata": {"file_path": "dask/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 448, "end_line": 478, "span_ids": ["literal.__call__", "literal.__init__", "literal", "literal.__reduce__", "quote", "literal.__repr__"], "tokens": 202}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class literal(object):\n \"\"\"A small serializable object to wrap literal values without copying\"\"\"\n\n __slots__ = (\"data\",)\n\n def __init__(self, data):\n self.data = data\n\n def __repr__(self):\n return \"literal\" % type(self.data).__name__\n\n def __reduce__(self):\n return (literal, (self.data,))\n\n def __call__(self):\n return self.data\n\n\ndef quote(x):\n \"\"\"Ensure that this value remains this value in a dask graph\n\n Some values in dask graph take on special meaning. Sometimes we want to\n ensure that our data is not interpreted but remains literal.\n\n >>> quote((add, 1, 2)) # doctest: +SKIP\n (literal,)\n \"\"\"\n if istask(x) or type(x) is list or type(x) is dict:\n return (literal(x),)\n return x", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/__init__.py_try__": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/__init__.py_try__", "embedding": null, "metadata": {"file_path": "dask/dataframe/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 58, "span_ids": ["impl"], "tokens": 344}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "try:\n from .core import (\n DataFrame,\n Series,\n Index,\n _Frame,\n map_partitions,\n repartition,\n to_datetime,\n to_timedelta,\n )\n from .groupby import Aggregation\n from .io import (\n from_array,\n from_pandas,\n from_bcolz,\n from_dask_array,\n read_hdf,\n read_sql_table,\n from_delayed,\n read_csv,\n to_csv,\n read_table,\n demo,\n to_hdf,\n to_records,\n to_sql,\n to_bag,\n read_json,\n to_json,\n read_fwf,\n )\n from .numeric import to_numeric\n from .optimize import optimize\n from .multi import merge, concat, merge_asof\n from . import rolling, backends\n from ..base import compute\n from .reshape import get_dummies, pivot_table, melt\n from .utils import assert_eq\n from .io.orc import read_orc\n\n try:\n from .io import read_parquet, to_parquet\n except ImportError:\n pass\n try:\n from .core import isna\n except ImportError:\n pass\nexcept ImportError as e:\n msg = (\n \"Dask dataframe requirements are not installed.\\n\\n\"\n \"Please either conda or pip install as follows:\\n\\n\"\n \" conda install dask # either conda install\\n\"\n ' python -m pip install \"dask[dataframe]\" --upgrade # or python -m pip install'\n )\n raise ImportError(msg) from e", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/_accessor.py_warnings_CachedAccessor.__init__.self._accessor.accessor": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/_accessor.py_warnings_CachedAccessor.__init__.self._accessor.accessor", "embedding": null, "metadata": {"file_path": "dask/dataframe/_accessor.py", "file_name": "_accessor.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 24, "span_ids": ["imports", "CachedAccessor.__init__", "CachedAccessor"], "tokens": 147}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import warnings\n\n\n# Ported from pandas\n# https://github.com/pandas-dev/pandas/blob/master/pandas/core/accessor.py\n\n\nclass CachedAccessor(object):\n \"\"\"\n Custom property-like object (descriptor) for caching accessors.\n\n Parameters\n ----------\n name : str\n The namespace this will be accessed under, e.g. ``df.foo``\n accessor : cls\n The class with the extension methods. The class' __init__ method\n should expect one of a ``Series``, ``DataFrame`` or ``Index`` as\n the single argument ``data``\n \"\"\"\n\n def __init__(self, name, accessor):\n self._name = name\n self._accessor = accessor", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/_accessor.py_CachedAccessor.__get___CachedAccessor.__get__.return.accessor_obj": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/_accessor.py_CachedAccessor.__get___CachedAccessor.__get__.return.accessor_obj", "embedding": null, "metadata": {"file_path": "dask/dataframe/_accessor.py", "file_name": "_accessor.py", "file_type": "text/x-python", "category": "implementation", "start_line": 26, "end_line": 36, "span_ids": ["CachedAccessor.__get__"], "tokens": 124}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class CachedAccessor(object):\n\n def __get__(self, obj, cls):\n if obj is None:\n # we're accessing the attribute of the class, i.e., Dataset.geo\n return self._accessor\n accessor_obj = self._accessor(obj)\n # Replace the property with the accessor object. Inspired by:\n # http://www.pydanny.com/cached-property.html\n # We need to use object.__setattr__ because we overwrite __setattr__ on\n # NDFrame\n object.__setattr__(obj, self._name, accessor_obj)\n return accessor_obj", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/_accessor.py__register_accessor__register_accessor.return.decorator": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/_accessor.py__register_accessor__register_accessor.return.decorator", "embedding": null, "metadata": {"file_path": "dask/dataframe/_accessor.py", "file_name": "_accessor.py", "file_type": "text/x-python", "category": "implementation", "start_line": 39, "end_line": 53, "span_ids": ["_register_accessor"], "tokens": 107}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _register_accessor(name, cls):\n def decorator(accessor):\n if hasattr(cls, name):\n warnings.warn(\n \"registration of accessor {!r} under name {!r} for type \"\n \"{!r} is overriding a preexisting attribute with the same \"\n \"name.\".format(accessor, name, cls),\n UserWarning,\n stacklevel=2,\n )\n setattr(cls, name, CachedAccessor(name, accessor))\n cls._accessors.add(name)\n return accessor\n\n return decorator", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/_accessor.py_register_dataframe_accessor_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/_accessor.py_register_dataframe_accessor_", "embedding": null, "metadata": {"file_path": "dask/dataframe/_accessor.py", "file_name": "_accessor.py", "file_type": "text/x-python", "category": "implementation", "start_line": 56, "end_line": 87, "span_ids": ["register_index_accessor", "register_series_accessor", "register_dataframe_accessor"], "tokens": 177}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def register_dataframe_accessor(name):\n \"\"\"\n Register a custom accessor on :class:`dask.dataframe.DataFrame`.\n\n See :func:`pandas.api.extensions.register_dataframe_accessor` for more.\n \"\"\"\n from dask.dataframe import DataFrame\n\n return _register_accessor(name, DataFrame)\n\n\ndef register_series_accessor(name):\n \"\"\"\n Register a custom accessor on :class:`dask.dataframe.Series`.\n\n See :func:`pandas.api.extensions.register_series_accessor` for more.\n \"\"\"\n from dask.dataframe import Series\n\n return _register_accessor(name, Series)\n\n\ndef register_index_accessor(name):\n \"\"\"\n Register a custom accessor on :class:`dask.dataframe.Index`.\n\n See :func:`pandas.api.extensions.register_index_accessor` for more.\n \"\"\"\n from dask.dataframe import Index\n\n return _register_accessor(name, Index)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/_compat.py_string_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/_compat.py_string_", "embedding": null, "metadata": {"file_path": "dask/dataframe/_compat.py", "file_name": "_compat.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 87, "span_ids": ["assert_numpy_array_equal", "imports", "makeDateIndex", "makeMissingDataframe", "makeTimeDataFrame", "makeTimedeltaIndex", "makeTimeSeries", "assert_categorical_equal", "makeMixedDataFrame", "makeDataFrame"], "tokens": 647}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import string\nfrom distutils.version import LooseVersion\n\nimport numpy as np\nimport pandas as pd\n\n\nPANDAS_VERSION = LooseVersion(pd.__version__)\nPANDAS_GT_0240 = PANDAS_VERSION >= LooseVersion(\"0.24.0\")\nPANDAS_GT_0250 = PANDAS_VERSION >= LooseVersion(\"0.25.0\")\nPANDAS_GT_100 = PANDAS_VERSION >= LooseVersion(\"1.0.0\")\nPANDAS_GT_104 = PANDAS_VERSION >= LooseVersion(\"1.0.4\")\nPANDAS_GT_110 = PANDAS_VERSION >= LooseVersion(\"1.1.0\")\nHAS_INT_NA = PANDAS_GT_0240\n\n\nif PANDAS_GT_100:\n import pandas.testing as tm # noqa: F401\nelse:\n import pandas.util.testing as tm # noqa: F401\n\n\ndef assert_categorical_equal(left, right, *args, **kwargs):\n if PANDAS_GT_100:\n tm.assert_extension_array_equal(left, right, *args, **kwargs)\n assert pd.api.types.is_categorical_dtype(\n left.dtype\n ), \"{} is not categorical dtype\".format(left)\n assert pd.api.types.is_categorical_dtype(\n right.dtype\n ), \"{} is not categorical dtype\".format(right)\n else:\n return tm.assert_categorical_equal(left, right, *args, **kwargs)\n\n\ndef assert_numpy_array_equal(left, right):\n left_na = pd.isna(left)\n right_na = pd.isna(right)\n np.testing.assert_array_equal(left_na, right_na)\n\n left_valid = left[~left_na]\n right_valid = right[~right_na]\n np.testing.assert_array_equal(left_valid, right_valid)\n\n\ndef makeDataFrame():\n data = np.random.randn(30, 4)\n index = list(string.ascii_letters)[:30]\n return pd.DataFrame(data, index=index, columns=list(\"ABCD\"))\n\n\ndef makeTimeDataFrame():\n data = makeDataFrame()\n data.index = makeDateIndex()\n return data\n\n\ndef makeTimeSeries():\n return makeTimeDataFrame()[\"A\"]\n\n\ndef makeDateIndex(k=30, freq=\"B\"):\n return pd.date_range(\"2000\", periods=k, freq=freq)\n\n\ndef makeTimedeltaIndex(k=30, freq=\"D\"):\n return pd.timedelta_range(\"1 day\", periods=k, freq=freq)\n\n\ndef makeMissingDataframe():\n df = makeDataFrame()\n data = df.values\n data = np.where(data > 1, np.nan, data)\n return pd.DataFrame(data, index=df.index, columns=df.columns)\n\n\ndef makeMixedDataFrame():\n df = pd.DataFrame(\n {\n \"A\": [0.0, 1, 2, 3, 4],\n \"B\": [0.0, 1, 0, 1, 0],\n \"C\": [\"foo{}\".format(i) for i in range(5)],\n \"D\": pd.date_range(\"2009-01-01\", periods=5),\n }\n )\n return df", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/_dtypes.py_pd_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/_dtypes.py_pd_", "embedding": null, "metadata": {"file_path": "dask/dataframe/_dtypes.py", "file_name": "_dtypes.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 22, "span_ids": ["imports"], "tokens": 133}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pandas as pd\nfrom ._compat import PANDAS_GT_100\nfrom .extensions import make_array_nonempty, make_scalar\n\nif PANDAS_GT_100:\n\n @make_array_nonempty.register(pd.StringDtype)\n def _(dtype):\n return pd.array([\"a\", pd.NA], dtype=dtype)\n\n @make_scalar.register(str)\n def _(x):\n return \"s\"\n\n @make_array_nonempty.register(pd.BooleanDtype)\n def _dtype(dtype):\n return pd.array([True, pd.NA], dtype=dtype)\n\n @make_scalar.register(bool)\n def _(x):\n return True", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/accessor.py_np_Accessor._property_map.return.self__series_map_partitio": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/accessor.py_np_Accessor._property_map.return.self__series_map_partitio", "embedding": null, "metadata": {"file_path": "dask/dataframe/accessor.py", "file_name": "accessor.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 56, "span_ids": ["Accessor", "imports", "maybe_wrap_pandas", "Accessor._delegate_property", "Accessor.__init__", "Accessor._delegate_method", "Accessor._property_map"], "tokens": 379}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import numpy as np\nimport pandas as pd\nfrom functools import partial\n\nfrom ..utils import derived_from\n\n\ndef maybe_wrap_pandas(obj, x):\n if isinstance(x, np.ndarray):\n if isinstance(obj, pd.Series):\n return pd.Series(x, index=obj.index, dtype=x.dtype)\n return pd.Index(x)\n return x\n\n\nclass Accessor(object):\n \"\"\"\n Base class for pandas Accessor objects cat, dt, and str.\n\n Notes\n -----\n Subclasses should define ``_accessor_name``\n \"\"\"\n\n _not_implemented = set()\n\n def __init__(self, series):\n from .core import Series\n\n if not isinstance(series, Series):\n raise ValueError(\"Accessor cannot be initialized\")\n\n series_meta = series._meta\n if hasattr(series_meta, \"to_series\"): # is index-like\n series_meta = series_meta.to_series()\n meta = getattr(series_meta, self._accessor_name)\n\n self._meta = meta\n self._series = series\n\n @staticmethod\n def _delegate_property(obj, accessor, attr):\n out = getattr(getattr(obj, accessor, obj), attr)\n return maybe_wrap_pandas(obj, out)\n\n @staticmethod\n def _delegate_method(obj, accessor, attr, args, kwargs):\n out = getattr(getattr(obj, accessor, obj), attr)(*args, **kwargs)\n return maybe_wrap_pandas(obj, out)\n\n def _property_map(self, attr):\n meta = self._delegate_property(self._series._meta, self._accessor_name, attr)\n token = \"%s-%s\" % (self._accessor_name, attr)\n return self._series.map_partitions(\n self._delegate_property, self._accessor_name, attr, token=token, meta=meta\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/accessor.py_Accessor._function_map_Accessor._function_map.return.self__series_map_partitio": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/accessor.py_Accessor._function_map_Accessor._function_map.return.self__series_map_partitio", "embedding": null, "metadata": {"file_path": "dask/dataframe/accessor.py", "file_name": "accessor.py", "file_type": "text/x-python", "category": "implementation", "start_line": 58, "end_line": 74, "span_ids": ["Accessor._function_map"], "tokens": 130}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Accessor(object):\n\n def _function_map(self, attr, *args, **kwargs):\n if \"meta\" in kwargs:\n meta = kwargs.pop(\"meta\")\n else:\n meta = self._delegate_method(\n self._series._meta_nonempty, self._accessor_name, attr, args, kwargs\n )\n token = \"%s-%s\" % (self._accessor_name, attr)\n return self._series.map_partitions(\n self._delegate_method,\n self._accessor_name,\n attr,\n args,\n kwargs,\n meta=meta,\n token=token,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/accessor.py_Accessor._delegates_DatetimeAccessor._accessor_name._dt_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/accessor.py_Accessor._delegates_DatetimeAccessor._accessor_name._dt_", "embedding": null, "metadata": {"file_path": "dask/dataframe/accessor.py", "file_name": "accessor.py", "file_type": "text/x-python", "category": "implementation", "start_line": 76, "end_line": 105, "span_ids": ["DatetimeAccessor", "Accessor.__dir__", "Accessor._delegates", "Accessor.__getattr__"], "tokens": 178}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Accessor(object):\n\n @property\n def _delegates(self):\n return set(dir(self._meta)).difference(self._not_implemented)\n\n def __dir__(self):\n o = self._delegates\n o.update(self.__dict__)\n o.update(dir(type(self)))\n return list(o)\n\n def __getattr__(self, key):\n if key in self._delegates:\n if callable(getattr(self._meta, key)):\n return partial(self._function_map, key)\n else:\n return self._property_map(key)\n else:\n raise AttributeError(key)\n\n\nclass DatetimeAccessor(Accessor):\n \"\"\"Accessor object for datetimelike properties of the Series values.\n\n Examples\n --------\n\n >>> s.dt.microsecond # doctest: +SKIP\n \"\"\"\n\n _accessor_name = \"dt\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/accessor.py_StringAccessor_StringAccessor.split.return.self__function_map_split": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/accessor.py_StringAccessor_StringAccessor.split.return.self__function_map_split", "embedding": null, "metadata": {"file_path": "dask/dataframe/accessor.py", "file_name": "accessor.py", "file_type": "text/x-python", "category": "implementation", "start_line": 108, "end_line": 134, "span_ids": ["StringAccessor.split", "StringAccessor"], "tokens": 232}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class StringAccessor(Accessor):\n \"\"\"Accessor object for string properties of the Series values.\n\n Examples\n --------\n\n >>> s.str.lower() # doctest: +SKIP\n \"\"\"\n\n _accessor_name = \"str\"\n _not_implemented = {\"get_dummies\"}\n\n @derived_from(pd.core.strings.StringMethods)\n def split(self, pat=None, n=-1, expand=False):\n if expand:\n if n == -1:\n raise NotImplementedError(\n \"To use the expand parameter you must specify the number of \"\n \"expected splits with the n= parameter. Usually n splits result in n+1 output columns.\"\n )\n else:\n delimiter = \" \" if pat is None else pat\n meta = type(self._series._meta)([delimiter.join([\"a\"] * (n + 1))])\n meta = meta.str.split(n=n, expand=expand, pat=pat)\n else:\n meta = (self._series.name, object)\n return self._function_map(\"split\", pat=pat, n=n, expand=expand, meta=meta)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/accessor.py_StringAccessor.cat_StringAccessor.cat.return.self__series_map_partitio": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/accessor.py_StringAccessor.cat_StringAccessor.cat.return.self__series_map_partitio", "embedding": null, "metadata": {"file_path": "dask/dataframe/accessor.py", "file_name": "accessor.py", "file_type": "text/x-python", "category": "implementation", "start_line": 136, "end_line": 151, "span_ids": ["StringAccessor.cat"], "tokens": 151}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class StringAccessor(Accessor):\n\n @derived_from(pd.core.strings.StringMethods)\n def cat(self, others=None, sep=None, na_rep=None):\n from .core import Series, Index\n\n if others is None:\n raise NotImplementedError(\"x.str.cat() with `others == None`\")\n\n valid_types = (Series, Index, pd.Series, pd.Index)\n if isinstance(others, valid_types):\n others = [others]\n elif not all(isinstance(a, valid_types) for a in others):\n raise TypeError(\"others must be Series/Index\")\n\n return self._series.map_partitions(\n str_cat, *others, sep=sep, na_rep=na_rep, meta=self._series._meta\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/accessor.py_StringAccessor.extractall_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/accessor.py_StringAccessor.extractall_", "embedding": null, "metadata": {"file_path": "dask/dataframe/accessor.py", "file_name": "accessor.py", "file_type": "text/x-python", "category": "implementation", "start_line": 153, "end_line": 176, "span_ids": ["StringAccessor.__getitem__", "StringAccessor.extractall", "str_get", "str_extractall", "str_cat"], "tokens": 192}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class StringAccessor(Accessor):\n\n @derived_from(pd.core.strings.StringMethods)\n def extractall(self, pat, flags=0):\n # TODO: metadata inference here won't be necessary for pandas >= 0.23.0\n meta = self._series._meta.str.extractall(pat, flags=flags)\n return self._series.map_partitions(\n str_extractall, pat, flags, meta=meta, token=\"str-extractall\"\n )\n\n def __getitem__(self, index):\n return self._series.map_partitions(str_get, index, meta=self._series._meta)\n\n\ndef str_extractall(series, pat, flags):\n return series.str.extractall(pat, flags=flags)\n\n\ndef str_get(series, index):\n \"\"\" Implements series.str[index] \"\"\"\n return series.str[index]\n\n\ndef str_cat(self, *others, **kwargs):\n return self.str.cat(others=others, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/backends.py_concat_dispatch_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/backends.py_concat_dispatch_", "embedding": null, "metadata": {"file_path": "dask/dataframe/backends.py", "file_name": "backends.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 19, "span_ids": ["_register_cudf", "imports"], "tokens": 135}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from .methods import concat_dispatch\nfrom .core import get_parallel_type, meta_nonempty, make_meta\nfrom .utils import hash_object_dispatch, group_split_dispatch\n\n\n######################################\n# cuDF: Pandas Dataframes on the GPU #\n######################################\n\n\n@concat_dispatch.register_lazy(\"cudf\")\n@hash_object_dispatch.register_lazy(\"cudf\")\n@group_split_dispatch.register_lazy(\"cudf\")\n@get_parallel_type.register_lazy(\"cudf\")\n@meta_nonempty.register_lazy(\"cudf\")\n@make_meta.register_lazy(\"cudf\")\ndef _register_cudf():\n import dask_cudf # noqa: F401", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/categorical.py_from_collections_import_d__categorize_block.return.df": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/categorical.py_from_collections_import_d__categorize_block.return.df", "embedding": null, "metadata": {"file_path": "dask/dataframe/categorical.py", "file_name": "categorical.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 41, "span_ids": ["imports", "_categorize_block"], "tokens": 262}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from collections import defaultdict\nimport pandas as pd\nfrom tlz import partition_all\nfrom numbers import Integral\n\nfrom ..base import tokenize, compute_as_if_collection\nfrom .accessor import Accessor\nfrom .utils import (\n has_known_categories,\n clear_known_categories,\n is_scalar,\n is_categorical_dtype,\n)\nfrom . import methods\nfrom ..utils import Dispatch\n\n\ndef _categorize_block(df, categories, index):\n \"\"\"Categorize a dataframe with given categories\n\n df: DataFrame\n categories: dict mapping column name to iterable of categories\n \"\"\"\n df = df.copy()\n for col, vals in categories.items():\n if is_categorical_dtype(df[col]):\n df[col] = df[col].cat.set_categories(vals)\n else:\n cat_dtype = categorical_dtype(meta=df[col], categories=vals, ordered=False)\n df[col] = df[col].astype(cat_dtype)\n if index is not None:\n if is_categorical_dtype(df.index):\n ind = df.index.set_categories(index)\n else:\n cat_dtype = categorical_dtype(\n meta=df.index, categories=index, ordered=False\n )\n ind = df.index.astype(dtype=cat_dtype)\n ind.name = df.index.name\n df.index = ind\n return df", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/categorical.py__get_categories__get_categories_agg.return.res_res_ind_0_append_re": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/categorical.py__get_categories__get_categories_agg.return.res_res_ind_0_append_re", "embedding": null, "metadata": {"file_path": "dask/dataframe/categorical.py", "file_name": "categorical.py", "file_type": "text/x-python", "category": "implementation", "start_line": 44, "end_line": 72, "span_ids": ["_get_categories_agg", "_get_categories"], "tokens": 212}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _get_categories(df, columns, index):\n res = {}\n for col in columns:\n x = df[col]\n if is_categorical_dtype(x):\n res[col] = pd.Series(x.cat.categories)\n else:\n res[col] = x.dropna().drop_duplicates()\n if index:\n if is_categorical_dtype(df.index):\n return res, df.index.categories\n return res, df.index.dropna().drop_duplicates()\n return res, None\n\n\ndef _get_categories_agg(parts):\n res = defaultdict(list)\n res_ind = []\n for p in parts:\n for k, v in p[0].items():\n res[k].append(v)\n res_ind.append(p[1])\n res = {\n k: methods.concat(v, ignore_index=True).drop_duplicates()\n for k, v in res.items()\n }\n if res_ind[0] is None:\n return res, None\n return res, res_ind[0].append(res_ind[1:]).drop_duplicates()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/categorical.py_categorize_categorize.return.df_map_partitions__catego": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/categorical.py_categorize_categorize.return.df_map_partitions__catego", "embedding": null, "metadata": {"file_path": "dask/dataframe/categorical.py", "file_name": "categorical.py", "file_type": "text/x-python", "category": "implementation", "start_line": 75, "end_line": 150, "span_ids": ["categorize"], "tokens": 644}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def categorize(df, columns=None, index=None, split_every=None, **kwargs):\n \"\"\"Convert columns of the DataFrame to category dtype.\n\n Parameters\n ----------\n columns : list, optional\n A list of column names to convert to categoricals. By default any\n column with an object dtype is converted to a categorical, and any\n unknown categoricals are made known.\n index : bool, optional\n Whether to categorize the index. By default, object indices are\n converted to categorical, and unknown categorical indices are made\n known. Set True to always categorize the index, False to never.\n split_every : int, optional\n Group partitions into groups of this size while performing a\n tree-reduction. If set to False, no tree-reduction will be used.\n Default is 16.\n kwargs\n Keyword arguments are passed on to compute.\n \"\"\"\n meta = df._meta\n if columns is None:\n columns = list(meta.select_dtypes([\"object\", \"category\"]).columns)\n elif is_scalar(columns):\n columns = [columns]\n\n # Filter out known categorical columns\n columns = [\n c\n for c in columns\n if not (is_categorical_dtype(meta[c]) and has_known_categories(meta[c]))\n ]\n\n if index is not False:\n if is_categorical_dtype(meta.index):\n index = not has_known_categories(meta.index)\n elif index is None:\n index = meta.index.dtype == object\n\n # Nothing to do\n if not len(columns) and index is False:\n return df\n\n if split_every is None:\n split_every = 16\n elif split_every is False:\n split_every = df.npartitions\n elif not isinstance(split_every, Integral) or split_every < 2:\n raise ValueError(\"split_every must be an integer >= 2\")\n\n token = tokenize(df, columns, index, split_every)\n a = \"get-categories-chunk-\" + token\n dsk = {\n (a, i): (_get_categories, key, columns, index)\n for (i, key) in enumerate(df.__dask_keys__())\n }\n\n prefix = \"get-categories-agg-\" + token\n k = df.npartitions\n depth = 0\n while k > split_every:\n b = prefix + str(depth)\n for part_i, inds in enumerate(partition_all(split_every, range(k))):\n dsk[(b, part_i)] = (_get_categories_agg, [(a, i) for i in inds])\n k = part_i + 1\n a = b\n depth += 1\n\n dsk[(prefix, 0)] = (_get_categories_agg, [(a, i) for i in range(k)])\n dsk.update(df.dask)\n\n # Compute the categories\n categories, index = compute_as_if_collection(type(df), dsk, (prefix, 0), **kwargs)\n\n # Categorize each partition\n return df.map_partitions(_categorize_block, categories, index)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/categorical.py_CategoricalAccessor_CategoricalAccessor.known.return.has_known_categories_self": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/categorical.py_CategoricalAccessor_CategoricalAccessor.known.return.has_known_categories_self", "embedding": null, "metadata": {"file_path": "dask/dataframe/categorical.py", "file_name": "categorical.py", "file_type": "text/x-python", "category": "implementation", "start_line": 153, "end_line": 182, "span_ids": ["CategoricalAccessor.known", "CategoricalAccessor"], "tokens": 156}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class CategoricalAccessor(Accessor):\n \"\"\"\n Accessor object for categorical properties of the Series values.\n\n Examples\n --------\n >>> s.cat.categories # doctest: +SKIP\n\n Notes\n -----\n Attributes that depend only on metadata are eager\n\n * categories\n * ordered\n\n Attributes depending on the entire dataset are lazy\n\n * codes\n * ...\n\n So `df.a.cat.categories` <=> `df.a._meta.cat.categories`\n So `df.a.cat.codes` <=> `df.a.map_partitions(lambda x: x.cat.codes)`\n \"\"\"\n\n _accessor_name = \"cat\"\n\n @property\n def known(self):\n \"\"\"Whether the categories are fully known\"\"\"\n return has_known_categories(self._series)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/categorical.py_CategoricalAccessor.as_known_CategoricalAccessor.as_known.return.self_set_categories_categ": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/categorical.py_CategoricalAccessor.as_known_CategoricalAccessor.as_known.return.self_set_categories_categ", "embedding": null, "metadata": {"file_path": "dask/dataframe/categorical.py", "file_name": "categorical.py", "file_type": "text/x-python", "category": "implementation", "start_line": 184, "end_line": 199, "span_ids": ["CategoricalAccessor.as_known"], "tokens": 121}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class CategoricalAccessor(Accessor):\n\n def as_known(self, **kwargs):\n \"\"\"Ensure the categories in this series are known.\n\n If the categories are known, this is a no-op. If unknown, the\n categories are computed, and a new series with known categories is\n returned.\n\n Parameters\n ----------\n kwargs\n Keywords to pass on to the call to `compute`.\n \"\"\"\n if self.known:\n return self._series\n categories = self._property_map(\"categories\").unique().compute(**kwargs)\n return self.set_categories(categories.values)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/categorical.py_CategoricalAccessor.as_unknown_CategoricalAccessor.codes.return.self__property_map_codes": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/categorical.py_CategoricalAccessor.as_unknown_CategoricalAccessor.codes.return.self__property_map_codes", "embedding": null, "metadata": {"file_path": "dask/dataframe/categorical.py", "file_name": "categorical.py", "file_type": "text/x-python", "category": "implementation", "start_line": 201, "end_line": 239, "span_ids": ["CategoricalAccessor.ordered", "CategoricalAccessor.codes", "CategoricalAccessor.categories", "CategoricalAccessor.as_unknown"], "tokens": 290}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class CategoricalAccessor(Accessor):\n\n def as_unknown(self):\n \"\"\"Ensure the categories in this series are unknown\"\"\"\n if not self.known:\n return self._series\n out = self._series.copy()\n out._meta = clear_known_categories(out._meta)\n return out\n\n @property\n def ordered(self):\n return self._delegate_property(self._series._meta, \"cat\", \"ordered\")\n\n @property\n def categories(self):\n \"\"\"The categories of this categorical.\n\n If categories are unknown, an error is raised\"\"\"\n if not self.known:\n msg = (\n \"`df.column.cat.categories` with unknown categories is not \"\n \"supported. Please use `column.cat.as_known()` or \"\n \"`df.categorize()` beforehand to ensure known categories\"\n )\n raise NotImplementedError(msg)\n return self._delegate_property(self._series._meta, \"cat\", \"categories\")\n\n @property\n def codes(self):\n \"\"\"The codes of this categorical.\n\n If categories are unknown, an error is raised\"\"\"\n if not self.known:\n msg = (\n \"`df.column.cat.codes` with unknown categories is not \"\n \"supported. Please use `column.cat.as_known()` or \"\n \"`df.categorize()` beforehand to ensure known categories\"\n )\n raise NotImplementedError(msg)\n return self._property_map(\"codes\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/categorical.py_CategoricalAccessor.remove_unused_categories_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/categorical.py_CategoricalAccessor.remove_unused_categories_", "embedding": null, "metadata": {"file_path": "dask/dataframe/categorical.py", "file_name": "categorical.py", "file_type": "text/x-python", "category": "implementation", "start_line": 241, "end_line": 289, "span_ids": ["categorical_dtype", "CategoricalAccessor.remove_unused_categories", "categorical_dtype_pandas", "impl"], "tokens": 338}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class CategoricalAccessor(Accessor):\n\n def remove_unused_categories(self):\n \"\"\"\n Removes categories which are not used\n\n Notes\n -----\n This method requires a full scan of the data to compute the\n unique values, which can be expensive.\n \"\"\"\n # get the set of used categories\n present = self._series.dropna().unique()\n present = pd.Index(present.compute())\n\n if isinstance(self._series._meta, pd.CategoricalIndex):\n meta_cat = self._series._meta\n else:\n meta_cat = self._series._meta.cat\n\n # Reorder to keep cat:code relationship, filtering unused (-1)\n ordered, mask = present.reindex(meta_cat.categories)\n if mask is None:\n # PANDAS-23963: old and new categories match.\n return self._series\n\n new_categories = ordered[mask != -1]\n meta = meta_cat.set_categories(new_categories, ordered=meta_cat.ordered)\n return self._series.map_partitions(\n self._delegate_method,\n \"cat\",\n \"set_categories\",\n (),\n {\"new_categories\": new_categories},\n meta=meta,\n token=\"cat-set_categories\",\n )\n\n\ncategorical_dtype_dispatch = Dispatch(\"CategoricalDtype\")\n\n\ndef categorical_dtype(meta, categories=None, ordered=False):\n func = categorical_dtype_dispatch.dispatch(type(meta))\n return func(categories=categories, ordered=ordered)\n\n\n@categorical_dtype_dispatch.register((pd.DataFrame, pd.Series, pd.Index))\ndef categorical_dtype_pandas(categories=None, ordered=False):\n return pd.api.types.CategoricalDtype(categories=categories, ordered=ordered)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_operator_pd_set_option_compute_us": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_operator_pd_set_option_compute_us", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 80, "span_ids": ["imports"], "tokens": 458}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import operator\nimport warnings\nfrom collections.abc import Iterator, Sequence\nfrom functools import wraps, partial\nfrom numbers import Number, Integral\nfrom operator import getitem\nfrom pprint import pformat\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.util import cache_readonly\nfrom pandas.api.types import (\n is_bool_dtype,\n is_timedelta64_dtype,\n is_numeric_dtype,\n is_datetime64_any_dtype,\n)\nfrom tlz import merge, first, unique, partition_all, remove\n\ntry:\n from chest import Chest as Cache\nexcept ImportError:\n Cache = dict\n\nfrom .. import array as da\nfrom .. import core\n\nfrom ..utils import parse_bytes, partial_by_order, Dispatch, IndexCallable, apply\nfrom .. import threaded\nfrom ..context import globalmethod\nfrom ..utils import (\n random_state_data,\n pseudorandom,\n derived_from,\n funcname,\n memory_repr,\n put_lines,\n M,\n key_split,\n OperatorMethodMixin,\n is_arraylike,\n typename,\n iter_chunks,\n)\nfrom ..array.core import Array, normalize_arg\nfrom ..array.utils import zeros_like_safe\nfrom ..blockwise import blockwise, Blockwise\nfrom ..base import DaskMethodsMixin, tokenize, dont_optimize, is_dask_collection\nfrom ..delayed import delayed, Delayed, unpack_collections\nfrom ..highlevelgraph import HighLevelGraph\n\nfrom . import methods\nfrom .accessor import DatetimeAccessor, StringAccessor\nfrom .categorical import CategoricalAccessor, categorize\nfrom .optimize import optimize\nfrom .utils import (\n meta_nonempty,\n make_meta,\n insert_meta_param_description,\n raise_on_meta_error,\n clear_known_categories,\n group_split_dispatch,\n is_categorical_dtype,\n has_known_categories,\n PANDAS_VERSION,\n PANDAS_GT_100,\n PANDAS_GT_110,\n index_summary,\n is_dataframe_like,\n is_series_like,\n is_index_like,\n valid_divisions,\n hash_object_dispatch,\n check_matching_columns,\n drop_by_shallow_copy,\n)\n\nno_default = \"__no_default__\"\n\npd.set_option(\"compute.use_numexpr\", False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__concat_finalize.return._concat_results_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__concat_finalize.return._concat_results_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 83, "end_line": 106, "span_ids": ["finalize", "_concat"], "tokens": 176}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _concat(args, ignore_index=False):\n if not args:\n return args\n if isinstance(first(core.flatten(args)), np.ndarray):\n return da.core.concatenate3(args)\n if not has_parallel_type(args[0]):\n try:\n return pd.Series(args)\n except Exception:\n return args\n # We filter out empty partitions here because pandas frequently has\n # inconsistent dtypes in results between empty and non-empty frames.\n # Ideally this would be handled locally for each operation, but in practice\n # this seems easier. TODO: don't do this.\n args2 = [i for i in args if len(i)]\n return (\n args[0]\n if not args2\n else methods.concat(args2, uniform=True, ignore_index=ignore_index)\n )\n\n\ndef finalize(results):\n return _concat(results)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Scalar_Scalar._get_unary_operator.return.f": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Scalar_Scalar._get_unary_operator.return.f", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 109, "end_line": 215, "span_ids": ["Scalar:3", "Scalar", "Scalar.__dask_graph__", "Scalar.__dir__", "Scalar.dtype", "Scalar.__repr__", "Scalar.__bool__", "Scalar.__dask_tokenize__", "Scalar.__array__", "Scalar.__dask_keys__", "Scalar.divisions", "Scalar._args", "Scalar.__init__", "Scalar.__dask_layers__", "Scalar.__dask_postpersist__", "Scalar._meta_nonempty", "Scalar.__getstate__", "Scalar.__dask_postcompute__", "Scalar._get_unary_operator", "Scalar.key", "Scalar.__setstate__"], "tokens": 823}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Scalar(DaskMethodsMixin, OperatorMethodMixin):\n \"\"\" A Dask object to represent a pandas scalar\"\"\"\n\n def __init__(self, dsk, name, meta, divisions=None):\n # divisions is ignored, only present to be compatible with other\n # objects.\n if not isinstance(dsk, HighLevelGraph):\n dsk = HighLevelGraph.from_collections(name, dsk, dependencies=[])\n self.dask = dsk\n self._name = name\n meta = make_meta(meta)\n if is_dataframe_like(meta) or is_series_like(meta) or is_index_like(meta):\n raise TypeError(\n \"Expected meta to specify scalar, got \"\n \"{0}\".format(typename(type(meta)))\n )\n self._meta = meta\n\n def __dask_graph__(self):\n return self.dask\n\n def __dask_keys__(self):\n return [self.key]\n\n def __dask_tokenize__(self):\n return self._name\n\n def __dask_layers__(self):\n return (self._name,)\n\n __dask_optimize__ = globalmethod(\n optimize, key=\"dataframe_optimize\", falsey=dont_optimize\n )\n __dask_scheduler__ = staticmethod(threaded.get)\n\n def __dask_postcompute__(self):\n return first, ()\n\n def __dask_postpersist__(self):\n return Scalar, (self._name, self._meta, self.divisions)\n\n @property\n def _meta_nonempty(self):\n return self._meta\n\n @property\n def dtype(self):\n return self._meta.dtype\n\n def __dir__(self):\n o = set(dir(type(self)))\n o.update(self.__dict__)\n if not hasattr(self._meta, \"dtype\"):\n o.remove(\"dtype\") # dtype only in `dir` if available\n return list(o)\n\n @property\n def divisions(self):\n \"\"\"Dummy divisions to be compat with Series and DataFrame\"\"\"\n return [None, None]\n\n def __repr__(self):\n name = self._name if len(self._name) < 10 else self._name[:7] + \"...\"\n if hasattr(self._meta, \"dtype\"):\n extra = \", dtype=%s\" % self._meta.dtype\n else:\n extra = \", type=%s\" % type(self._meta).__name__\n return \"dd.Scalar<%s%s>\" % (name, extra)\n\n def __array__(self):\n # array interface is required to support pandas instance + Scalar\n # Otherwise, above op results in pd.Series of Scalar (object dtype)\n return np.asarray(self.compute())\n\n @property\n def _args(self):\n return (self.dask, self._name, self._meta)\n\n def __getstate__(self):\n return self._args\n\n def __setstate__(self, state):\n self.dask, self._name, self._meta = state\n\n def __bool__(self):\n raise TypeError(\n \"Trying to convert {} to a boolean value. Because Dask objects are \"\n \"lazily evaluated, they cannot be converted to a boolean value or used \"\n \"in boolean conditions like if statements. Try calling .compute() to \"\n \"force computation prior to converting to a boolean value or using in \"\n \"a conditional statement.\".format(self)\n )\n\n @property\n def key(self):\n return (self._name, 0)\n\n @classmethod\n def _get_unary_operator(cls, op):\n def f(self):\n name = funcname(op) + \"-\" + tokenize(self)\n dsk = {(name, 0): (op, (self._name, 0))}\n meta = op(self._meta_nonempty)\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])\n return Scalar(graph, name, meta)\n\n return f", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Scalar._get_binary_operator_Scalar.to_delayed.return.Delayed_self_key_dsk_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Scalar._get_binary_operator_Scalar.to_delayed.return.Delayed_self_key_dsk_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 217, "end_line": 235, "span_ids": ["Scalar.to_delayed", "Scalar._get_binary_operator"], "tokens": 185}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Scalar(DaskMethodsMixin, OperatorMethodMixin):\n\n @classmethod\n def _get_binary_operator(cls, op, inv=False):\n return lambda self, other: _scalar_binary(op, self, other, inv=inv)\n\n def to_delayed(self, optimize_graph=True):\n \"\"\"Convert into a ``dask.delayed`` object.\n\n Parameters\n ----------\n optimize_graph : bool, optional\n If True [default], the graph is optimized before converting into\n ``dask.delayed`` objects.\n \"\"\"\n dsk = self.__dask_graph__()\n if optimize_graph:\n dsk = self.__dask_optimize__(dsk, self.__dask_keys__())\n name = \"delayed-\" + self._name\n dsk = HighLevelGraph.from_collections(name, dsk, dependencies=())\n return Delayed(self.key, dsk)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__scalar_binary__scalar_binary.if_return_type_is_not_Sca.else_.return.Scalar_graph_name_meta_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__scalar_binary__scalar_binary.if_return_type_is_not_Sca.else_.return.Scalar_graph_name_meta_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 238, "end_line": 268, "span_ids": ["_scalar_binary"], "tokens": 254}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _scalar_binary(op, self, other, inv=False):\n name = \"{0}-{1}\".format(funcname(op), tokenize(self, other))\n dependencies = [self]\n\n dsk = {}\n return_type = get_parallel_type(other)\n\n if isinstance(other, Scalar):\n dependencies.append(other)\n other_key = (other._name, 0)\n elif is_dask_collection(other):\n return NotImplemented\n else:\n other_key = other\n\n dsk[(name, 0)] = (\n (op, other_key, (self._name, 0)) if inv else (op, (self._name, 0), other_key)\n )\n\n other_meta = make_meta(other)\n other_meta_nonempty = meta_nonempty(other_meta)\n if inv:\n meta = op(other_meta_nonempty, self._meta_nonempty)\n else:\n meta = op(self._meta_nonempty, other_meta_nonempty)\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=dependencies)\n if return_type is not Scalar:\n return return_type(graph, name, meta, [other.index.min(), other.index.max()])\n else:\n return Scalar(graph, name, meta)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame__Frame.__array_wrap__.raise_NotImplementedError": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame__Frame.__array_wrap__.raise_NotImplementedError", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 271, "end_line": 385, "span_ids": ["_Frame.__dask_keys__", "_Frame._meta_nonempty", "_Frame._constructor", "_Frame.__array__", "_Frame.npartitions", "_Frame.copy", "_Frame._args", "_Frame:3", "_Frame", "_Frame.__dask_graph__", "_Frame.__dask_postpersist__", "_Frame.__array_wrap__", "_Frame.__dask_layers__", "_Frame.__dask_tokenize__", "_Frame.__init__", "_Frame.__getstate__", "_Frame.attrs", "_Frame.__setstate__", "_Frame.size", "_Frame.attrs_9", "_Frame.__dask_postcompute__"], "tokens": 792}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n \"\"\"Superclass for DataFrame and Series\n\n Parameters\n ----------\n dsk: dict\n The dask graph to compute this DataFrame\n name: str\n The key prefix that specifies which keys in the dask comprise this\n particular DataFrame / Series\n meta: pandas.DataFrame, pandas.Series, or pandas.Index\n An empty pandas object with names, dtypes, and indices matching the\n expected output.\n divisions: tuple of index values\n Values along which we partition our blocks on the index\n \"\"\"\n\n def __init__(self, dsk, name, meta, divisions):\n if not isinstance(dsk, HighLevelGraph):\n dsk = HighLevelGraph.from_collections(name, dsk, dependencies=[])\n self.dask = dsk\n self._name = name\n meta = make_meta(meta)\n if not self._is_partition_type(meta):\n raise TypeError(\n \"Expected meta to specify type {0}, got type \"\n \"{1}\".format(type(self).__name__, typename(type(meta)))\n )\n self._meta = meta\n self.divisions = tuple(divisions)\n\n def __dask_graph__(self):\n return self.dask\n\n def __dask_keys__(self):\n return [(self._name, i) for i in range(self.npartitions)]\n\n def __dask_layers__(self):\n return (self._name,)\n\n def __dask_tokenize__(self):\n return self._name\n\n __dask_optimize__ = globalmethod(\n optimize, key=\"dataframe_optimize\", falsey=dont_optimize\n )\n __dask_scheduler__ = staticmethod(threaded.get)\n\n def __dask_postcompute__(self):\n return finalize, ()\n\n def __dask_postpersist__(self):\n return type(self), (self._name, self._meta, self.divisions)\n\n @property\n def _constructor(self):\n return new_dd_object\n\n @property\n def npartitions(self):\n \"\"\"Return number of partitions\"\"\"\n return len(self.divisions) - 1\n\n @property\n @derived_from(pd.DataFrame)\n def attrs(self):\n return self._meta.attrs\n\n @attrs.setter\n def attrs(self, value):\n self._meta.attrs = dict(value)\n\n @property\n def size(self):\n \"\"\"Size of the Series or DataFrame as a Delayed object.\n\n Examples\n --------\n >>> series.size # doctest: +SKIP\n dd.Scalar\n \"\"\"\n return self.reduction(\n methods.size, np.sum, token=\"size\", meta=int, split_every=False\n )\n\n @property\n def _meta_nonempty(self):\n \"\"\" A non-empty version of `_meta` with fake data.\"\"\"\n return meta_nonempty(self._meta)\n\n @property\n def _args(self):\n return (self.dask, self._name, self._meta, self.divisions)\n\n def __getstate__(self):\n return self._args\n\n def __setstate__(self, state):\n self.dask, self._name, self._meta, self.divisions = state\n\n def copy(self):\n \"\"\"Make a copy of the dataframe\n\n This is strictly a shallow copy of the underlying computational graph.\n It does not affect the underlying data\n \"\"\"\n return new_dd_object(self.dask, self._name, self._meta, self.divisions)\n\n def __array__(self, dtype=None, **kwargs):\n self._computed = self.compute()\n x = np.array(self._computed)\n return x\n\n def __array_wrap__(self, array, context=None):\n raise NotImplementedError", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.__array_ufunc____Frame.__array_ufunc__.if_method___call___.else_.return.NotImplemented": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.__array_ufunc____Frame.__array_ufunc__.if_method___call___.else_.return.NotImplemented", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 378, "end_line": 401, "span_ids": ["_Frame.__array_ufunc__"], "tokens": 219}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n def __array_ufunc__(self, numpy_ufunc, method, *inputs, **kwargs):\n out = kwargs.get(\"out\", ())\n for x in inputs + out:\n # ufuncs work with 0-dimensional NumPy ndarrays\n # so we don't want to raise NotImplemented\n if isinstance(x, np.ndarray) and x.shape == ():\n continue\n elif not isinstance(\n x, (Number, Scalar, _Frame, Array, pd.DataFrame, pd.Series, pd.Index)\n ):\n return NotImplemented\n\n if method == \"__call__\":\n if numpy_ufunc.signature is not None:\n return NotImplemented\n if numpy_ufunc.nout > 1:\n # ufuncs with multiple output values\n # are not yet supported for frames\n return NotImplemented\n else:\n return elemwise(numpy_ufunc, *inputs, **kwargs)\n else:\n # ufunc methods are not yet supported for frames\n return NotImplemented", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._elemwise__Frame.__repr__.return._str_fmt_format_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._elemwise__Frame.__repr__.return._str_fmt_format_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 403, "end_line": 433, "span_ids": ["_Frame._repr_divisions", "_Frame.__repr__", "_Frame._elemwise", "_Frame._repr_data"], "tokens": 255}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @property\n def _elemwise(self):\n return elemwise\n\n def _repr_data(self):\n raise NotImplementedError\n\n @property\n def _repr_divisions(self):\n name = \"npartitions={0}\".format(self.npartitions)\n if self.known_divisions:\n divisions = pd.Index(self.divisions, name=name)\n else:\n # avoid to be converted to NaN\n divisions = pd.Index([\"\"] * (self.npartitions + 1), name=name)\n return divisions\n\n def __repr__(self):\n data = self._repr_data().to_string(max_rows=5, show_dimensions=False)\n _str_fmt = \"\"\"Dask {klass} Structure:\n{data}\nDask Name: {name}, {task} tasks\"\"\"\n if len(self.columns) == 0:\n data = data.partition(\"\\n\")[-1].replace(\"Index\", \"Divisions\")\n _str_fmt = \"Empty {}\".format(_str_fmt)\n return _str_fmt.format(\n klass=self.__class__.__name__,\n data=data,\n name=key_split(self._name),\n task=len(self.dask),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.reset_index__Frame.reset_index.return.self_map_partitions_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.reset_index__Frame.reset_index.return.self_map_partitions_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 456, "end_line": 478, "span_ids": ["_Frame.reset_index"], "tokens": 260}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n def reset_index(self, drop=False):\n \"\"\"Reset the index to the default index.\n\n Note that unlike in ``pandas``, the reset ``dask.dataframe`` index will\n not be monotonically increasing from 0. Instead, it will restart at 0\n for each partition (e.g. ``index1 = [0, ..., 10], index2 = [0, ...]``).\n This is due to the inability to statically know the full length of the\n index.\n\n For DataFrame with multi-level index, returns a new DataFrame with\n labeling information in the columns under the index names, defaulting\n to 'level_0', 'level_1', etc. if any are None. For a standard index,\n the index name will be used (if set), otherwise a default 'index' or\n 'level_0' (if 'index' is already taken) will be used.\n\n Parameters\n ----------\n drop : boolean, default False\n Do not try to insert index into dataframe columns.\n \"\"\"\n return self.map_partitions(\n M.reset_index, drop=drop, enforce_metadata=False\n ).clear_divisions()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.known_divisions__Frame.get_partition.if_0_n_self_npartiti.else_.raise_ValueError_msg_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.known_divisions__Frame.get_partition.if_0_n_self_npartiti.else_.raise_ValueError_msg_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 480, "end_line": 500, "span_ids": ["_Frame.get_partition", "_Frame.known_divisions", "_Frame.clear_divisions"], "tokens": 246}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @property\n def known_divisions(self):\n \"\"\"Whether divisions are already known\"\"\"\n return len(self.divisions) > 0 and self.divisions[0] is not None\n\n def clear_divisions(self):\n \"\"\" Forget division information \"\"\"\n divisions = (None,) * (self.npartitions + 1)\n return type(self)(self.dask, self._name, self._meta, divisions)\n\n def get_partition(self, n):\n \"\"\"Get a dask DataFrame/Series representing the `nth` partition.\"\"\"\n if 0 <= n < self.npartitions:\n name = \"get-partition-%s-%s\" % (str(n), self._name)\n divisions = self.divisions[n : n + 2]\n layer = {(name, 0): (self._name, n)}\n graph = HighLevelGraph.from_collections(name, layer, dependencies=[self])\n return new_dd_object(graph, name, self._meta, divisions)\n else:\n msg = \"n must be 0 <= n < {0}\".format(self.npartitions)\n raise ValueError(msg)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.drop_duplicates__Frame.drop_duplicates.return.aca_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.drop_duplicates__Frame.drop_duplicates.return.aca_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 502, "end_line": 532, "span_ids": ["_Frame.drop_duplicates"], "tokens": 242}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @derived_from(pd.DataFrame)\n def drop_duplicates(\n self, subset=None, split_every=None, split_out=1, ignore_index=False, **kwargs\n ):\n if subset is not None:\n # Let pandas error on bad inputs\n self._meta_nonempty.drop_duplicates(subset=subset, **kwargs)\n kwargs[\"subset\"] = subset\n split_out_setup = split_out_on_cols\n split_out_setup_kwargs = {\"cols\": subset}\n else:\n self._meta_nonempty.drop_duplicates(**kwargs)\n split_out_setup = split_out_setup_kwargs = None\n\n if kwargs.get(\"keep\", True) is False:\n raise NotImplementedError(\"drop_duplicates with keep=False\")\n\n chunk = M.drop_duplicates\n return aca(\n self,\n chunk=chunk,\n aggregate=chunk,\n meta=self._meta,\n token=\"drop-duplicates\",\n split_every=split_every,\n split_out=split_out,\n split_out_setup=split_out_setup,\n split_out_setup_kwargs=split_out_setup_kwargs,\n ignore_index=ignore_index,\n **kwargs,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.__len____Frame.__complex__.return.self__scalarfunc_complex_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.__len____Frame.__complex__.return.self__scalarfunc_complex_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 534, "end_line": 562, "span_ids": ["_Frame.__int__", "_Frame._scalarfunc", "_Frame:9", "_Frame.__bool__", "_Frame.__complex__", "_Frame:7", "_Frame.__float__", "_Frame.__len__"], "tokens": 202}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n def __len__(self):\n return self.reduction(\n len, np.sum, token=\"len\", meta=int, split_every=False\n ).compute()\n\n def __bool__(self):\n raise ValueError(\n \"The truth value of a {0} is ambiguous. \"\n \"Use a.any() or a.all().\".format(self.__class__.__name__)\n )\n\n __nonzero__ = __bool__ # python 2\n\n def _scalarfunc(self, cast_type):\n def wrapper():\n raise TypeError(\"cannot convert the series to {0}\".format(str(cast_type)))\n\n return wrapper\n\n def __float__(self):\n return self._scalarfunc(float)\n\n def __int__(self):\n return self._scalarfunc(int)\n\n __long__ = __int__ # python 2\n\n def __complex__(self):\n return self._scalarfunc(complex)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.map_partitions__Frame.map_partitions.return.map_partitions_func_self": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.map_partitions__Frame.map_partitions.return.map_partitions_func_self", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 573, "end_line": 653, "span_ids": ["_Frame.map_partitions"], "tokens": 774}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @insert_meta_param_description(pad=12)\n def map_partitions(self, func, *args, **kwargs):\n \"\"\"Apply Python function on each DataFrame partition.\n\n Note that the index and divisions are assumed to remain unchanged.\n\n Parameters\n ----------\n func : function\n Function applied to each partition.\n args, kwargs :\n Arguments and keywords to pass to the function. The partition will\n be the first argument, and these will be passed *after*. Arguments\n and keywords may contain ``Scalar``, ``Delayed`` or regular\n python objects. DataFrame-like args (both dask and pandas) will be\n repartitioned to align (if necessary) before applying the function.\n $META\n\n Examples\n --------\n Given a DataFrame, Series, or Index, such as:\n\n >>> import pandas as pd\n >>> import dask.dataframe as dd\n >>> df = pd.DataFrame({'x': [1, 2, 3, 4, 5],\n ... 'y': [1., 2., 3., 4., 5.]})\n >>> ddf = dd.from_pandas(df, npartitions=2)\n\n One can use ``map_partitions`` to apply a function on each partition.\n Extra arguments and keywords can optionally be provided, and will be\n passed to the function after the partition.\n\n Here we apply a function with arguments and keywords to a DataFrame,\n resulting in a Series:\n\n >>> def myadd(df, a, b=1):\n ... return df.x + df.y + a + b\n >>> res = ddf.map_partitions(myadd, 1, b=2)\n >>> res.dtype\n dtype('float64')\n\n By default, dask tries to infer the output metadata by running your\n provided function on some fake data. This works well in many cases, but\n can sometimes be expensive, or even fail. To avoid this, you can\n manually specify the output metadata with the ``meta`` keyword. This\n can be specified in many forms, for more information see\n ``dask.dataframe.utils.make_meta``.\n\n Here we specify the output is a Series with no name, and dtype\n ``float64``:\n\n >>> res = ddf.map_partitions(myadd, 1, b=2, meta=(None, 'f8'))\n\n Here we map a function that takes in a DataFrame, and returns a\n DataFrame with a new column:\n\n >>> res = ddf.map_partitions(lambda df: df.assign(z=df.x * df.y))\n >>> res.dtypes\n x int64\n y float64\n z float64\n dtype: object\n\n As before, the output metadata can also be specified manually. This\n time we pass in a ``dict``, as the output is a DataFrame:\n\n >>> res = ddf.map_partitions(lambda df: df.assign(z=df.x * df.y),\n ... meta={'x': 'i8', 'y': 'f8', 'z': 'f8'})\n\n In the case where the metadata doesn't change, you can also pass in\n the object itself directly:\n\n >>> res = ddf.map_partitions(lambda df: df.head(), meta=ddf)\n\n Also note that the index and divisions are assumed to remain unchanged.\n If the function you're mapping changes the index/divisions, you'll need\n to clear them afterwards:\n\n >>> ddf.map_partitions(func).clear_divisions() # doctest: +SKIP\n \"\"\"\n return map_partitions(func, self, *args, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.map_overlap__Frame.map_overlap.return.map_overlap_func_self_b": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.map_overlap__Frame.map_overlap.return.map_overlap_func_self_b", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 655, "end_line": 765, "span_ids": ["_Frame.map_overlap"], "tokens": 1184}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @insert_meta_param_description(pad=12)\n def map_overlap(self, func, before, after, *args, **kwargs):\n \"\"\"Apply a function to each partition, sharing rows with adjacent partitions.\n\n This can be useful for implementing windowing functions such as\n ``df.rolling(...).mean()`` or ``df.diff()``.\n\n Parameters\n ----------\n func : function\n Function applied to each partition.\n before : int\n The number of rows to prepend to partition ``i`` from the end of\n partition ``i - 1``.\n after : int\n The number of rows to append to partition ``i`` from the beginning\n of partition ``i + 1``.\n args, kwargs :\n Arguments and keywords to pass to the function. The partition will\n be the first argument, and these will be passed *after*.\n $META\n\n Notes\n -----\n Given positive integers ``before`` and ``after``, and a function\n ``func``, ``map_overlap`` does the following:\n\n 1. Prepend ``before`` rows to each partition ``i`` from the end of\n partition ``i - 1``. The first partition has no rows prepended.\n\n 2. Append ``after`` rows to each partition ``i`` from the beginning of\n partition ``i + 1``. The last partition has no rows appended.\n\n 3. Apply ``func`` to each partition, passing in any extra ``args`` and\n ``kwargs`` if provided.\n\n 4. Trim ``before`` rows from the beginning of all but the first\n partition.\n\n 5. Trim ``after`` rows from the end of all but the last partition.\n\n Note that the index and divisions are assumed to remain unchanged.\n\n Examples\n --------\n Given a DataFrame, Series, or Index, such as:\n\n >>> import pandas as pd\n >>> import dask.dataframe as dd\n >>> df = pd.DataFrame({'x': [1, 2, 4, 7, 11],\n ... 'y': [1., 2., 3., 4., 5.]})\n >>> ddf = dd.from_pandas(df, npartitions=2)\n\n A rolling sum with a trailing moving window of size 2 can be computed by\n overlapping 2 rows before each partition, and then mapping calls to\n ``df.rolling(2).sum()``:\n\n >>> ddf.compute()\n x y\n 0 1 1.0\n 1 2 2.0\n 2 4 3.0\n 3 7 4.0\n 4 11 5.0\n >>> ddf.map_overlap(lambda df: df.rolling(2).sum(), 2, 0).compute()\n x y\n 0 NaN NaN\n 1 3.0 3.0\n 2 6.0 5.0\n 3 11.0 7.0\n 4 18.0 9.0\n\n The pandas ``diff`` method computes a discrete difference shifted by a\n number of periods (can be positive or negative). This can be\n implemented by mapping calls to ``df.diff`` to each partition after\n prepending/appending that many rows, depending on sign:\n\n >>> def diff(df, periods=1):\n ... before, after = (periods, 0) if periods > 0 else (0, -periods)\n ... return df.map_overlap(lambda df, periods=1: df.diff(periods),\n ... periods, 0, periods=periods)\n >>> diff(ddf, 1).compute()\n x y\n 0 NaN NaN\n 1 1.0 1.0\n 2 2.0 1.0\n 3 3.0 1.0\n 4 4.0 1.0\n\n If you have a ``DatetimeIndex``, you can use a ``pd.Timedelta`` for time-\n based windows.\n\n >>> ts = pd.Series(range(10), index=pd.date_range('2017', periods=10))\n >>> dts = dd.from_pandas(ts, npartitions=2)\n >>> dts.map_overlap(lambda df: df.rolling('2D').sum(),\n ... pd.Timedelta('2D'), 0).compute()\n 2017-01-01 0.0\n 2017-01-02 1.0\n 2017-01-03 3.0\n 2017-01-04 5.0\n 2017-01-05 7.0\n 2017-01-06 9.0\n 2017-01-07 11.0\n 2017-01-08 13.0\n 2017-01-09 15.0\n 2017-01-10 17.0\n Freq: D, dtype: float64\n \"\"\"\n from .rolling import map_overlap\n\n return map_overlap(func, self, before, after, *args, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.memory_usage_per_partition__Frame.memory_usage_per_partition.return.self_map_partitions_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.memory_usage_per_partition__Frame.memory_usage_per_partition.return.self_map_partitions_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 756, "end_line": 777, "span_ids": ["_Frame.memory_usage_per_partition"], "tokens": 167}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n def memory_usage_per_partition(self, index=True, deep=False):\n \"\"\"Return the memory usage of each partition\n\n Parameters\n ----------\n index : bool, default True\n Specifies whether to include the memory usage of the index in\n returned Series.\n deep : bool, default False\n If True, introspect the data deeply by interrogating\n ``object`` dtypes for system-level memory consumption, and include\n it in the returned values.\n\n Returns\n -------\n Series\n A Series whose index is the partition number and whose values\n are the memory usage of each partition in bytes.\n \"\"\"\n return self.map_partitions(\n total_mem_usage, index=index, deep=deep\n ).clear_divisions()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.reduction__Frame.reduction._Generic_row_wise_reduc": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.reduction__Frame.reduction._Generic_row_wise_reduc", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 779, "end_line": 891, "span_ids": ["_Frame.reduction"], "tokens": 986}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @insert_meta_param_description(pad=12)\n def reduction(\n self,\n chunk,\n aggregate=None,\n combine=None,\n meta=no_default,\n token=None,\n split_every=None,\n chunk_kwargs=None,\n aggregate_kwargs=None,\n combine_kwargs=None,\n **kwargs,\n ):\n \"\"\"Generic row-wise reductions.\n\n Parameters\n ----------\n chunk : callable\n Function to operate on each partition. Should return a\n ``pandas.DataFrame``, ``pandas.Series``, or a scalar.\n aggregate : callable, optional\n Function to operate on the concatenated result of ``chunk``. If not\n specified, defaults to ``chunk``. Used to do the final aggregation\n in a tree reduction.\n\n The input to ``aggregate`` depends on the output of ``chunk``.\n If the output of ``chunk`` is a:\n\n - scalar: Input is a Series, with one row per partition.\n - Series: Input is a DataFrame, with one row per partition. Columns\n are the rows in the output series.\n - DataFrame: Input is a DataFrame, with one row per partition.\n Columns are the columns in the output dataframes.\n\n Should return a ``pandas.DataFrame``, ``pandas.Series``, or a\n scalar.\n combine : callable, optional\n Function to operate on intermediate concatenated results of\n ``chunk`` in a tree-reduction. If not provided, defaults to\n ``aggregate``. The input/output requirements should match that of\n ``aggregate`` described above.\n $META\n token : str, optional\n The name to use for the output keys.\n split_every : int, optional\n Group partitions into groups of this size while performing a\n tree-reduction. If set to False, no tree-reduction will be used,\n and all intermediates will be concatenated and passed to\n ``aggregate``. Default is 8.\n chunk_kwargs : dict, optional\n Keyword arguments to pass on to ``chunk`` only.\n aggregate_kwargs : dict, optional\n Keyword arguments to pass on to ``aggregate`` only.\n combine_kwargs : dict, optional\n Keyword arguments to pass on to ``combine`` only.\n kwargs :\n All remaining keywords will be passed to ``chunk``, ``combine``,\n and ``aggregate``.\n\n Examples\n --------\n >>> import pandas as pd\n >>> import dask.dataframe as dd\n >>> df = pd.DataFrame({'x': range(50), 'y': range(50, 100)})\n >>> ddf = dd.from_pandas(df, npartitions=4)\n\n Count the number of rows in a DataFrame. To do this, count the number\n of rows in each partition, then sum the results:\n\n >>> res = ddf.reduction(lambda x: x.count(),\n ... aggregate=lambda x: x.sum())\n >>> res.compute()\n x 50\n y 50\n dtype: int64\n\n Count the number of rows in a Series with elements greater than or\n equal to a value (provided via a keyword).\n\n >>> def count_greater(x, value=0):\n ... return (x >= value).sum()\n >>> res = ddf.x.reduction(count_greater, aggregate=lambda x: x.sum(),\n ... chunk_kwargs={'value': 25})\n >>> res.compute()\n 25\n\n Aggregate both the sum and count of a Series at the same time:\n\n >>> def sum_and_count(x):\n ... return pd.Series({'count': x.count(), 'sum': x.sum()},\n ... index=['count', 'sum'])\n >>> res = ddf.x.reduction(sum_and_count, aggregate=lambda x: x.sum())\n >>> res.compute()\n count 50\n sum 1225\n dtype: int64\n\n Doing the same, but for a DataFrame. Here ``chunk`` returns a\n DataFrame, meaning the input to ``aggregate`` is a DataFrame with an\n index with non-unique entries for both 'x' and 'y'. We groupby the\n index, and sum each group to get the final result.\n\n >>> def sum_and_count(x):\n ... return pd.DataFrame({'count': x.count(), 'sum': x.sum()},\n ... columns=['count', 'sum'])\n >>> res = ddf.reduction(sum_and_count,\n ... aggregate=lambda x: x.groupby(level=0).sum())\n >>> res.compute()\n count sum\n x 50 1225\n y 50 3725\n \"\"\"\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.reduction.if_aggregate_is_None___Frame.reduction.return.aca_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.reduction.if_aggregate_is_None___Frame.reduction.return.aca_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 892, "end_line": 922, "span_ids": ["_Frame.reduction"], "tokens": 267}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @insert_meta_param_description(pad=12)\n def reduction(\n self,\n chunk,\n aggregate=None,\n combine=None,\n meta=no_default,\n token=None,\n split_every=None,\n chunk_kwargs=None,\n aggregate_kwargs=None,\n combine_kwargs=None,\n **kwargs,\n ):\n if aggregate is None:\n aggregate = chunk\n\n if combine is None:\n if combine_kwargs:\n raise ValueError(\"`combine_kwargs` provided with no `combine`\")\n combine = aggregate\n combine_kwargs = aggregate_kwargs\n\n chunk_kwargs = chunk_kwargs.copy() if chunk_kwargs else {}\n chunk_kwargs[\"aca_chunk\"] = chunk\n\n combine_kwargs = combine_kwargs.copy() if combine_kwargs else {}\n combine_kwargs[\"aca_combine\"] = combine\n\n aggregate_kwargs = aggregate_kwargs.copy() if aggregate_kwargs else {}\n aggregate_kwargs[\"aca_aggregate\"] = aggregate\n\n return aca(\n self,\n chunk=_reduction_chunk,\n aggregate=_reduction_aggregate,\n combine=_reduction_combine,\n meta=meta,\n token=token,\n split_every=split_every,\n chunk_kwargs=chunk_kwargs,\n aggregate_kwargs=aggregate_kwargs,\n combine_kwargs=combine_kwargs,\n **kwargs,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.pipe__Frame.pipe.if_isinstance_func_tuple.else_.return.func_self_args_kwarg": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.pipe__Frame.pipe.if_isinstance_func_tuple.else_.return.func_self_args_kwarg", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 924, "end_line": 937, "span_ids": ["_Frame.pipe"], "tokens": 136}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @derived_from(pd.DataFrame)\n def pipe(self, func, *args, **kwargs):\n # Taken from pandas:\n # https://github.com/pydata/pandas/blob/master/pandas/core/generic.py#L2698-L2707\n if isinstance(func, tuple):\n func, target = func\n if target in kwargs:\n raise ValueError(\n \"%s is both the pipe target and a keyword argument\" % target\n )\n kwargs[target] = self\n return func(*args, **kwargs)\n else:\n return func(self, *args, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.random_split__Frame.random_split.return.out": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.random_split__Frame.random_split.return.out", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 939, "end_line": 989, "span_ids": ["_Frame.random_split"], "tokens": 446}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n def random_split(self, frac, random_state=None, shuffle=False):\n \"\"\"Pseudorandomly split dataframe into different pieces row-wise\n\n Parameters\n ----------\n frac : list\n List of floats that should sum to one.\n random_state : int or np.random.RandomState\n If int create a new RandomState with this as the seed.\n Otherwise draw from the passed RandomState.\n shuffle : bool, default False\n If set to True, the dataframe is shuffled (within partition)\n before the split.\n\n Examples\n --------\n\n 50/50 split\n\n >>> a, b = df.random_split([0.5, 0.5]) # doctest: +SKIP\n\n 80/10/10 split, consistent random_state\n\n >>> a, b, c = df.random_split([0.8, 0.1, 0.1], random_state=123) # doctest: +SKIP\n\n See Also\n --------\n dask.DataFrame.sample\n \"\"\"\n if not np.allclose(sum(frac), 1):\n raise ValueError(\"frac should sum to 1\")\n state_data = random_state_data(self.npartitions, random_state)\n token = tokenize(self, frac, random_state)\n name = \"split-\" + token\n layer = {\n (name, i): (pd_split, (self._name, i), frac, state, shuffle)\n for i, state in enumerate(state_data)\n }\n\n out = []\n for i in range(len(frac)):\n name2 = \"split-%d-%s\" % (i, token)\n dsk2 = {\n (name2, j): (getitem, (name, j), i) for j in range(self.npartitions)\n }\n graph = HighLevelGraph.from_collections(\n name2, merge(dsk2, layer), dependencies=[self]\n )\n out_df = type(self)(graph, name2, self._meta, self.divisions)\n out.append(out_df)\n return out", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.head__Frame.head.return.self__head_n_n_npartitio": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.head__Frame.head.return.self__head_n_n_npartitio", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 991, "end_line": 1006, "span_ids": ["_Frame.head"], "tokens": 177}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n def head(self, n=5, npartitions=1, compute=True):\n \"\"\"First n rows of the dataset\n\n Parameters\n ----------\n n : int, optional\n The number of rows to return. Default is 5.\n npartitions : int, optional\n Elements are only taken from the first ``npartitions``, with a\n default of 1. If there are fewer than ``n`` rows in the first\n ``npartitions`` a warning will be raised and any found rows\n returned. Pass -1 to use all partitions.\n compute : bool, optional\n Whether to compute the result, default is True.\n \"\"\"\n return self._head(n=n, npartitions=npartitions, compute=compute, safe=True)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._head__Frame._head.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._head__Frame._head.return.result", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1008, "end_line": 1040, "span_ids": ["_Frame._head"], "tokens": 319}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n def _head(self, n, npartitions, compute, safe):\n if npartitions <= -1:\n npartitions = self.npartitions\n if npartitions > self.npartitions:\n msg = \"only {} partitions, head received {}\"\n raise ValueError(msg.format(self.npartitions, npartitions))\n\n name = \"head-%d-%d-%s\" % (npartitions, n, self._name)\n if safe:\n head = safe_head\n else:\n head = M.head\n\n if npartitions > 1:\n name_p = \"head-partial-%d-%s\" % (n, self._name)\n\n dsk = {}\n for i in range(npartitions):\n dsk[(name_p, i)] = (M.head, (self._name, i), n)\n\n concat = (_concat, [(name_p, i) for i in range(npartitions)])\n dsk[(name, 0)] = (head, concat, n)\n else:\n dsk = {(name, 0): (head, (self._name, 0), n)}\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])\n result = new_dd_object(\n graph, name, self._meta, [self.divisions[0], self.divisions[npartitions]]\n )\n\n if compute:\n result = result.compute()\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.tail__Frame.loc.return._LocIndexer_self_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.tail__Frame.loc.return._LocIndexer_self_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1042, "end_line": 1066, "span_ids": ["_Frame.loc", "_Frame.tail"], "tokens": 225}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n def tail(self, n=5, compute=True):\n \"\"\"Last n rows of the dataset\n\n Caveat, the only checks the last n rows of the last partition.\n \"\"\"\n name = \"tail-%d-%s\" % (n, self._name)\n dsk = {(name, 0): (M.tail, (self._name, self.npartitions - 1), n)}\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])\n result = new_dd_object(graph, name, self._meta, self.divisions[-2:])\n\n if compute:\n result = result.compute()\n return result\n\n @property\n def loc(self):\n \"\"\"Purely label-location based indexer for selection by label.\n\n >>> df.loc[\"b\"] # doctest: +SKIP\n >>> df.loc[\"b\":\"d\"] # doctest: +SKIP\n \"\"\"\n from .indexing import _LocIndexer\n\n return _LocIndexer(self)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._partitions__Frame._partitions.return.new_dd_object_graph_name": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._partitions__Frame._partitions.return.new_dd_object_graph_name", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1068, "end_line": 1084, "span_ids": ["_Frame._partitions"], "tokens": 204}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n def _partitions(self, index):\n if not isinstance(index, tuple):\n index = (index,)\n from ..array.slicing import normalize_index\n\n index = normalize_index(index, (self.npartitions,))\n index = tuple(slice(k, k + 1) if isinstance(k, Number) else k for k in index)\n name = \"blocks-\" + tokenize(self, index)\n new_keys = np.array(self.__dask_keys__(), dtype=object)[index].tolist()\n\n divisions = [self.divisions[i] for _, i in new_keys] + [\n self.divisions[new_keys[-1][1] + 1]\n ]\n dsk = {(name, i): tuple(key) for i, key in enumerate(new_keys)}\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])\n return new_dd_object(graph, name, self._meta, divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.partitions__Frame._Note_iloc_is_implement": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.partitions__Frame._Note_iloc_is_implement", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1086, "end_line": 1107, "span_ids": ["_Frame.partitions"], "tokens": 182}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @property\n def partitions(self):\n \"\"\"Slice dataframe by partitions\n\n This allows partitionwise slicing of a Dask Dataframe. You can perform normal\n Numpy-style slicing but now rather than slice elements of the array you\n slice along partitions so, for example, ``df.partitions[:5]`` produces a new\n Dask Dataframe of the first five partitions.\n\n Examples\n --------\n >>> df.partitions[0] # doctest: +SKIP\n >>> df.partitions[:3] # doctest: +SKIP\n >>> df.partitions[::10] # doctest: +SKIP\n\n Returns\n -------\n A Dask DataFrame\n \"\"\"\n return IndexCallable(self._partitions)\n\n # Note: iloc is implemented only on DataFrame", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.repartition__Frame.repartition.if_partition_size_is_not_.elif_freq_is_not_None_.return.repartition_freq_self_fr": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.repartition__Frame.repartition.if_partition_size_is_not_.elif_freq_is_not_None_.return.repartition_freq_self_fr", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1109, "end_line": 1188, "span_ids": ["_Frame.repartition"], "tokens": 644}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n def repartition(\n self,\n divisions=None,\n npartitions=None,\n partition_size=None,\n freq=None,\n force=False,\n ):\n \"\"\"Repartition dataframe along new divisions\n\n Parameters\n ----------\n divisions : list, optional\n List of partitions to be used. Only used if npartitions and\n partition_size isn't specified.\n For convenience if given an integer this will defer to npartitions\n and if given a string it will defer to partition_size (see below)\n npartitions : int, optional\n Number of partitions of output. Only used if partition_size\n isn't specified.\n partition_size: int or string, optional\n Max number of bytes of memory for each partition. Use numbers or\n strings like 5MB. If specified npartitions and divisions will be\n ignored.\n\n .. warning::\n\n This keyword argument triggers computation to determine\n the memory size of each partition, which may be expensive.\n\n freq : str, pd.Timedelta\n A period on which to partition timeseries data like ``'7D'`` or\n ``'12h'`` or ``pd.Timedelta(hours=12)``. Assumes a datetime index.\n force : bool, default False\n Allows the expansion of the existing divisions.\n If False then the new divisions' lower and upper bounds must be\n the same as the old divisions'.\n\n Notes\n -----\n Exactly one of `divisions`, `npartitions`, `partition_size`, or `freq`\n should be specified. A ``ValueError`` will be raised when that is\n not the case.\n\n Examples\n --------\n >>> df = df.repartition(npartitions=10) # doctest: +SKIP\n >>> df = df.repartition(divisions=[0, 5, 10, 20]) # doctest: +SKIP\n >>> df = df.repartition(freq='7d') # doctest: +SKIP\n \"\"\"\n if isinstance(divisions, int):\n npartitions = divisions\n divisions = None\n if isinstance(divisions, str):\n partition_size = divisions\n divisions = None\n if (\n sum(\n [\n partition_size is not None,\n divisions is not None,\n npartitions is not None,\n freq is not None,\n ]\n )\n != 1\n ):\n raise ValueError(\n \"Please provide exactly one of ``npartitions=``, ``freq=``, \"\n \"``divisions=``, ``partitions_size=`` keyword arguments\"\n )\n\n if partition_size is not None:\n return repartition_size(self, partition_size)\n elif npartitions is not None:\n return repartition_npartitions(self, npartitions)\n elif divisions is not None:\n return repartition(self, divisions, force=force)\n elif freq is not None:\n return repartition_freq(self, freq=freq)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.shuffle__Frame.shuffle.return.dd_shuffle_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.shuffle__Frame.shuffle.return.dd_shuffle_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1190, "end_line": 1243, "span_ids": ["_Frame.shuffle"], "tokens": 393}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n def shuffle(\n self,\n on,\n npartitions=None,\n max_branch=None,\n shuffle=None,\n ignore_index=False,\n compute=None,\n ):\n \"\"\"Rearrange DataFrame into new partitions\n\n Uses hashing of `on` to map rows to output partitions. After this\n operation, rows with the same value of `on` will be in the same\n partition.\n\n Parameters\n ----------\n on : str, list of str, or Series, Index, or DataFrame\n Column(s) or index to be used to map rows to output partitions\n npartitions : int, optional\n Number of partitions of output. Partition count will not be\n changed by default.\n max_branch: int, optional\n The maximum number of splits per input partition. Used within\n the staged shuffling algorithm.\n shuffle: {'disk', 'tasks'}, optional\n Either ``'disk'`` for single-node operation or ``'tasks'`` for\n distributed operation. Will be inferred by your current scheduler.\n ignore_index: bool, default False\n Ignore index during shuffle. If ``True``, performance may improve,\n but index values will not be preserved.\n compute: bool\n Whether or not to trigger an immediate computation. Defaults to False.\n\n Notes\n -----\n This does not preserve a meaningful index/partitioning scheme. This\n is not deterministic if done in parallel.\n\n Examples\n --------\n >>> df = df.shuffle(df.columns[0]) # doctest: +SKIP\n \"\"\"\n from .shuffle import shuffle as dd_shuffle\n\n return dd_shuffle(\n self,\n on,\n npartitions=npartitions,\n max_branch=max_branch,\n shuffle=shuffle,\n ignore_index=ignore_index,\n compute=compute,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.fillna__Frame.fillna.return.parts_map_overlap_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.fillna__Frame.fillna.return.parts_map_overlap_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1245, "end_line": 1308, "span_ids": ["_Frame.fillna"], "tokens": 499}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @derived_from(pd.DataFrame)\n def fillna(self, value=None, method=None, limit=None, axis=None):\n axis = self._validate_axis(axis)\n if method is None and limit is not None:\n raise NotImplementedError(\"fillna with set limit and method=None\")\n if isinstance(value, _Frame):\n test_value = value._meta_nonempty.values[0]\n elif isinstance(value, Scalar):\n test_value = value._meta_nonempty\n else:\n test_value = value\n meta = self._meta_nonempty.fillna(\n value=test_value, method=method, limit=limit, axis=axis\n )\n\n if axis == 1 or method is None:\n # Control whether or not dask's partition alignment happens.\n # We don't want for a pandas Series.\n # We do want it for a dask Series\n if is_series_like(value) and not is_dask_collection(value):\n args = ()\n kwargs = {\"value\": value}\n else:\n args = (value,)\n kwargs = {}\n return self.map_partitions(\n M.fillna,\n *args,\n method=method,\n limit=limit,\n axis=axis,\n meta=meta,\n enforce_metadata=False,\n **kwargs,\n )\n\n if method in (\"pad\", \"ffill\"):\n method = \"ffill\"\n skip_check = 0\n before, after = 1 if limit is None else limit, 0\n else:\n method = \"bfill\"\n skip_check = self.npartitions - 1\n before, after = 0, 1 if limit is None else limit\n\n if limit is None:\n name = \"fillna-chunk-\" + tokenize(self, method)\n dsk = {\n (name, i): (\n methods.fillna_check,\n (self._name, i),\n method,\n i != skip_check,\n )\n for i in range(self.npartitions)\n }\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])\n parts = new_dd_object(graph, name, meta, self.divisions)\n else:\n parts = self\n\n return parts.map_overlap(\n M.fillna, before, after, method=method, limit=limit, meta=meta\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.ffill__Frame.sample.return.new_dd_object_graph_name": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.ffill__Frame.sample.return.new_dd_object_graph_name", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1310, "end_line": 1365, "span_ids": ["_Frame.sample", "_Frame.bfill", "_Frame.ffill"], "tokens": 443}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @derived_from(pd.DataFrame)\n def ffill(self, axis=None, limit=None):\n return self.fillna(method=\"ffill\", limit=limit, axis=axis)\n\n @derived_from(pd.DataFrame)\n def bfill(self, axis=None, limit=None):\n return self.fillna(method=\"bfill\", limit=limit, axis=axis)\n\n def sample(self, n=None, frac=None, replace=False, random_state=None):\n \"\"\"Random sample of items\n\n Parameters\n ----------\n n : int, optional\n Number of items to return is not supported by dask. Use frac\n instead.\n frac : float, optional\n Fraction of axis items to return.\n replace : boolean, optional\n Sample with or without replacement. Default = False.\n random_state : int or ``np.random.RandomState``\n If int we create a new RandomState with this as the seed\n Otherwise we draw from the passed RandomState\n\n See Also\n --------\n DataFrame.random_split\n pandas.DataFrame.sample\n \"\"\"\n if n is not None:\n msg = (\n \"sample does not support the number of sampled items \"\n \"parameter, 'n'. Please use the 'frac' parameter instead.\"\n )\n if isinstance(n, Number) and 0 <= n <= 1:\n warnings.warn(msg)\n frac = n\n else:\n raise ValueError(msg)\n\n if frac is None:\n raise ValueError(\"frac must not be None\")\n\n if random_state is None:\n random_state = np.random.RandomState()\n\n name = \"sample-\" + tokenize(self, frac, replace, random_state)\n\n state_data = random_state_data(self.npartitions, random_state)\n dsk = {\n (name, i): (methods.sample, (self._name, i), state, frac, replace)\n for i, state in enumerate(state_data)\n }\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])\n return new_dd_object(graph, name, self._meta, self.divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.replace__Frame.to_dask_array.return.arr": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.replace__Frame.to_dask_array.return.arr", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1378, "end_line": 1422, "span_ids": ["_Frame.to_dask_array", "_Frame.replace"], "tokens": 315}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @derived_from(pd.DataFrame)\n def replace(self, to_replace=None, value=None, regex=False):\n return self.map_partitions(\n M.replace,\n to_replace=to_replace,\n value=value,\n regex=regex,\n enforce_metadata=False,\n )\n\n def to_dask_array(self, lengths=None, meta=None):\n \"\"\"Convert a dask DataFrame to a dask array.\n\n Parameters\n ----------\n lengths : bool or Sequence of ints, optional\n How to determine the chunks sizes for the output array.\n By default, the output array will have unknown chunk lengths\n along the first axis, which can cause some later operations\n to fail.\n\n * True : immediately compute the length of each partition\n * Sequence : a sequence of integers to use for the chunk sizes\n on the first axis. These values are *not* validated for\n correctness, beyond ensuring that the number of items\n matches the number of partitions.\n meta : object, optional\n An optional `meta` parameter can be passed for dask to override the\n default metadata on the underlying dask array.\n\n Returns\n -------\n \"\"\"\n if lengths is True:\n lengths = tuple(self.map_partitions(len, enforce_metadata=False).compute())\n\n arr = self.values\n\n chunks = self._validate_chunks(arr, lengths)\n arr._chunks = chunks\n\n if meta is not None:\n arr._meta = meta\n\n return arr", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.to_hdf__Frame.to_sql.return.to_sql_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.to_hdf__Frame.to_sql.return.to_sql_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1407, "end_line": 1449, "span_ids": ["_Frame.to_sql", "_Frame.to_hdf", "_Frame.to_csv"], "tokens": 273}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n def to_hdf(self, path_or_buf, key, mode=\"a\", append=False, **kwargs):\n \"\"\" See dd.to_hdf docstring for more information \"\"\"\n from .io import to_hdf\n\n return to_hdf(self, path_or_buf, key, mode, append, **kwargs)\n\n def to_csv(self, filename, **kwargs):\n \"\"\" See dd.to_csv docstring for more information \"\"\"\n from .io import to_csv\n\n return to_csv(self, filename, **kwargs)\n\n def to_sql(\n self,\n name: str,\n uri: str,\n schema=None,\n if_exists: str = \"fail\",\n index: bool = True,\n index_label=None,\n chunksize=None,\n dtype=None,\n method=None,\n compute=True,\n parallel=False,\n ):\n \"\"\" See dd.to_sql docstring for more information \"\"\"\n from .io import to_sql\n\n return to_sql(\n self,\n name=name,\n uri=uri,\n schema=schema,\n if_exists=if_exists,\n index=index,\n index_label=index_label,\n chunksize=chunksize,\n dtype=dtype,\n method=method,\n compute=compute,\n parallel=parallel,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.to_json__Frame._get_binary_operator.if_inv_.else_.return.lambda_self_other_elemw": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.to_json__Frame._get_binary_operator.if_inv_.else_.return.lambda_self_other_elemw", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1451, "end_line": 1491, "span_ids": ["_Frame.to_delayed", "_Frame._get_unary_operator", "_Frame.to_json", "_Frame._get_binary_operator"], "tokens": 327}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n def to_json(self, filename, *args, **kwargs):\n \"\"\" See dd.to_json docstring for more information \"\"\"\n from .io import to_json\n\n return to_json(self, filename, *args, **kwargs)\n\n def to_delayed(self, optimize_graph=True):\n \"\"\"Convert into a list of ``dask.delayed`` objects, one per partition.\n\n Parameters\n ----------\n optimize_graph : bool, optional\n If True [default], the graph is optimized before converting into\n ``dask.delayed`` objects.\n\n Examples\n --------\n >>> partitions = df.to_delayed() # doctest: +SKIP\n\n See Also\n --------\n dask.dataframe.from_delayed\n \"\"\"\n keys = self.__dask_keys__()\n graph = self.__dask_graph__()\n if optimize_graph:\n graph = self.__dask_optimize__(graph, self.__dask_keys__())\n name = \"delayed-\" + self._name\n graph = HighLevelGraph.from_collections(name, graph, dependencies=())\n return [Delayed(k, graph) for k in keys]\n\n @classmethod\n def _get_unary_operator(cls, op):\n return lambda self: elemwise(op, self)\n\n @classmethod\n def _get_binary_operator(cls, op, inv=False):\n if inv:\n return lambda self, other: elemwise(op, other, self)\n else:\n return lambda self, other: elemwise(op, self, other)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.rolling__Frame.rolling.return.Rolling_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.rolling__Frame.rolling.return.Rolling_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1493, "end_line": 1542, "span_ids": ["_Frame.rolling"], "tokens": 373}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n def rolling(self, window, min_periods=None, center=False, win_type=None, axis=0):\n \"\"\"Provides rolling transformations.\n\n Parameters\n ----------\n window : int, str, offset\n Size of the moving window. This is the number of observations used\n for calculating the statistic. When not using a ``DatetimeIndex``,\n the window size must not be so large as to span more than one\n adjacent partition. If using an offset or offset alias like '5D',\n the data must have a ``DatetimeIndex``\n\n .. versionchanged:: 0.15.0\n\n Now accepts offsets and string offset aliases\n\n min_periods : int, default None\n Minimum number of observations in window required to have a value\n (otherwise result is NA).\n center : boolean, default False\n Set the labels at the center of the window.\n win_type : string, default None\n Provide a window type. The recognized window types are identical\n to pandas.\n axis : int, default 0\n\n Returns\n -------\n a Rolling object on which to call a method to compute a statistic\n \"\"\"\n from dask.dataframe.rolling import Rolling\n\n if isinstance(window, Integral):\n if window < 0:\n raise ValueError(\"window must be >= 0\")\n\n if min_periods is not None:\n if not isinstance(min_periods, Integral):\n raise ValueError(\"min_periods must be an integer\")\n if min_periods < 0:\n raise ValueError(\"min_periods must be >= 0\")\n\n return Rolling(\n self,\n window=window,\n min_periods=min_periods,\n center=center,\n win_type=win_type,\n axis=axis,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.diff__Frame.diff.return.self_map_overlap_M_diff_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.diff__Frame.diff.return.self_map_overlap_M_diff_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1544, "end_line": 1565, "span_ids": ["_Frame.diff"], "tokens": 223}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @derived_from(pd.DataFrame)\n def diff(self, periods=1, axis=0):\n \"\"\"\n .. note::\n\n Pandas currently uses an ``object``-dtype column to represent\n boolean data with missing values. This can cause issues for\n boolean-specific operations, like ``|``. To enable boolean-\n specific operations, at the cost of metadata that doesn't match\n pandas, use ``.astype(bool)`` after the ``shift``.\n \"\"\"\n axis = self._validate_axis(axis)\n if not isinstance(periods, Integral):\n raise TypeError(\"periods must be an integer\")\n\n if axis == 1:\n return self.map_partitions(\n M.diff, token=\"diff\", periods=periods, axis=1, enforce_metadata=False\n )\n\n before, after = (periods, 0) if periods > 0 else (0, -periods)\n return self.map_overlap(M.diff, before, after, token=\"diff\", periods=periods)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.shift__Frame.shift.return.maybe_shift_divisions_out": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.shift__Frame.shift.return.maybe_shift_divisions_out", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1567, "end_line": 1600, "span_ids": ["_Frame.shift"], "tokens": 249}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @derived_from(pd.DataFrame)\n def shift(self, periods=1, freq=None, axis=0):\n axis = self._validate_axis(axis)\n if not isinstance(periods, Integral):\n raise TypeError(\"periods must be an integer\")\n\n if axis == 1:\n return self.map_partitions(\n M.shift,\n token=\"shift\",\n periods=periods,\n freq=freq,\n axis=1,\n enforce_metadata=False,\n )\n\n if freq is None:\n before, after = (periods, 0) if periods > 0 else (0, -periods)\n return self.map_overlap(\n M.shift, before, after, token=\"shift\", periods=periods\n )\n\n # Let pandas error on invalid arguments\n meta = self._meta_nonempty.shift(periods, freq=freq)\n out = self.map_partitions(\n M.shift,\n token=\"shift\",\n periods=periods,\n freq=freq,\n meta=meta,\n enforce_metadata=False,\n transform_divisions=False,\n )\n return maybe_shift_divisions(out, periods, freq=freq)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._reduction_agg__Frame._reduction_agg.if_axis_1_.else_.return.handle_out_out_result_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._reduction_agg__Frame._reduction_agg.if_axis_1_.else_.return.handle_out_out_result_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1602, "end_line": 1625, "span_ids": ["_Frame._reduction_agg"], "tokens": 203}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n def _reduction_agg(self, name, axis=None, skipna=True, split_every=False, out=None):\n axis = self._validate_axis(axis)\n\n meta = getattr(self._meta_nonempty, name)(axis=axis, skipna=skipna)\n token = self._token_prefix + name\n\n method = getattr(M, name)\n if axis == 1:\n result = self.map_partitions(\n method, meta=meta, token=token, skipna=skipna, axis=axis\n )\n return handle_out(out, result)\n else:\n result = self.reduction(\n method,\n meta=meta,\n token=token,\n skipna=skipna,\n axis=axis,\n split_every=split_every,\n )\n if isinstance(self, DataFrame):\n result.divisions = (self.columns.min(), self.columns.max())\n return handle_out(out, result)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.abs__Frame.any.return.self__reduction_agg_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.abs__Frame.any.return.self__reduction_agg_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1627, "end_line": 1643, "span_ids": ["_Frame.all", "_Frame.abs", "_Frame.any"], "tokens": 179}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @derived_from(pd.DataFrame)\n def abs(self):\n _raise_if_object_series(self, \"abs\")\n meta = self._meta_nonempty.abs()\n return self.map_partitions(M.abs, meta=meta, enforce_metadata=False)\n\n @derived_from(pd.DataFrame)\n def all(self, axis=None, skipna=True, split_every=False, out=None):\n return self._reduction_agg(\n \"all\", axis=axis, skipna=skipna, split_every=split_every, out=out\n )\n\n @derived_from(pd.DataFrame)\n def any(self, axis=None, skipna=True, split_every=False, out=None):\n return self._reduction_agg(\n \"any\", axis=axis, skipna=skipna, split_every=split_every, out=out\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.sum__Frame.sum.if_min_count_.else_.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.sum__Frame.sum.if_min_count_.else_.return.result", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1645, "end_line": 1667, "span_ids": ["_Frame.sum"], "tokens": 163}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @derived_from(pd.DataFrame)\n def sum(\n self,\n axis=None,\n skipna=True,\n split_every=False,\n dtype=None,\n out=None,\n min_count=None,\n ):\n result = self._reduction_agg(\n \"sum\", axis=axis, skipna=skipna, split_every=split_every, out=out\n )\n if min_count:\n cond = self.notnull().sum(axis=axis) >= min_count\n if is_series_like(cond):\n return result.where(cond, other=np.NaN)\n else:\n return _scalar_binary(\n lambda x, y: result if x is y else np.NaN, cond, True\n )\n else:\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.prod__Frame.prod.if_min_count_.else_.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.prod__Frame.prod.if_min_count_.else_.return.result", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1669, "end_line": 1691, "span_ids": ["_Frame.prod"], "tokens": 163}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @derived_from(pd.DataFrame)\n def prod(\n self,\n axis=None,\n skipna=True,\n split_every=False,\n dtype=None,\n out=None,\n min_count=None,\n ):\n result = self._reduction_agg(\n \"prod\", axis=axis, skipna=skipna, split_every=split_every, out=out\n )\n if min_count:\n cond = self.notnull().sum(axis=axis) >= min_count\n if is_series_like(cond):\n return result.where(cond, other=np.NaN)\n else:\n return _scalar_binary(\n lambda x, y: result if x is y else np.NaN, cond, True\n )\n else:\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.max__Frame.min.return.self__reduction_agg_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.max__Frame.min.return.self__reduction_agg_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1693, "end_line": 1703, "span_ids": ["_Frame.min", "_Frame.max"], "tokens": 130}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @derived_from(pd.DataFrame)\n def max(self, axis=None, skipna=True, split_every=False, out=None):\n return self._reduction_agg(\n \"max\", axis=axis, skipna=skipna, split_every=split_every, out=out\n )\n\n @derived_from(pd.DataFrame)\n def min(self, axis=None, skipna=True, split_every=False, out=None):\n return self._reduction_agg(\n \"min\", axis=axis, skipna=skipna, split_every=split_every, out=out\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.idxmax__Frame.idxmax.if_axis_1_.else_.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.idxmax__Frame.idxmax.if_axis_1_.else_.return.result", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1705, "end_line": 1736, "span_ids": ["_Frame.idxmax"], "tokens": 238}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @derived_from(pd.DataFrame)\n def idxmax(self, axis=None, skipna=True, split_every=False):\n fn = \"idxmax\"\n axis = self._validate_axis(axis)\n meta = self._meta_nonempty.idxmax(axis=axis, skipna=skipna)\n if axis == 1:\n return map_partitions(\n M.idxmax,\n self,\n meta=meta,\n token=self._token_prefix + fn,\n skipna=skipna,\n axis=axis,\n enforce_metadata=False,\n )\n else:\n scalar = not is_series_like(meta)\n result = aca(\n [self],\n chunk=idxmaxmin_chunk,\n aggregate=idxmaxmin_agg,\n combine=idxmaxmin_combine,\n meta=meta,\n aggregate_kwargs={\"scalar\": scalar},\n token=self._token_prefix + fn,\n split_every=split_every,\n skipna=skipna,\n fn=fn,\n )\n if isinstance(self, DataFrame):\n result.divisions = (min(self.columns), max(self.columns))\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.idxmin__Frame.idxmin.if_axis_1_.else_.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.idxmin__Frame.idxmin.if_axis_1_.else_.return.result", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1738, "end_line": 1769, "span_ids": ["_Frame.idxmin"], "tokens": 232}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @derived_from(pd.DataFrame)\n def idxmin(self, axis=None, skipna=True, split_every=False):\n fn = \"idxmin\"\n axis = self._validate_axis(axis)\n meta = self._meta_nonempty.idxmax(axis=axis)\n if axis == 1:\n return map_partitions(\n M.idxmin,\n self,\n meta=meta,\n token=self._token_prefix + fn,\n skipna=skipna,\n axis=axis,\n enforce_metadata=False,\n )\n else:\n scalar = not is_series_like(meta)\n result = aca(\n [self],\n chunk=idxmaxmin_chunk,\n aggregate=idxmaxmin_agg,\n combine=idxmaxmin_combine,\n meta=meta,\n aggregate_kwargs={\"scalar\": scalar},\n token=self._token_prefix + fn,\n split_every=split_every,\n skipna=skipna,\n fn=fn,\n )\n if isinstance(self, DataFrame):\n result.divisions = (min(self.columns), max(self.columns))\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.count__Frame.mode.return.mode_series": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.count__Frame.mode.return.mode_series", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1771, "end_line": 1805, "span_ids": ["_Frame.mode", "_Frame.count"], "tokens": 272}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @derived_from(pd.DataFrame)\n def count(self, axis=None, split_every=False):\n axis = self._validate_axis(axis)\n token = self._token_prefix + \"count\"\n if axis == 1:\n meta = self._meta_nonempty.count(axis=axis)\n return self.map_partitions(\n M.count, meta=meta, token=token, axis=axis, enforce_metadata=False\n )\n else:\n meta = self._meta_nonempty.count()\n\n # Need the astype(int) for empty dataframes, which sum to float dtype\n result = self.reduction(\n M.count,\n aggregate=_count_aggregate,\n meta=meta,\n token=token,\n split_every=split_every,\n )\n if isinstance(self, DataFrame):\n result.divisions = (self.columns.min(), self.columns.max())\n return result\n\n @derived_from(pd.DataFrame)\n def mode(self, dropna=True, split_every=False):\n mode_series = self.reduction(\n chunk=M.value_counts,\n combine=M.sum,\n aggregate=_mode_aggregate,\n split_every=split_every,\n chunk_kwargs={\"dropna\": dropna},\n aggregate_kwargs={\"dropna\": dropna},\n )\n return mode_series", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.mean__Frame.mean.if_axis_1_.else_.return.handle_out_out_result_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.mean__Frame.mean.if_axis_1_.else_.return.handle_out_out_result_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1807, "end_line": 1838, "span_ids": ["_Frame.mean"], "tokens": 267}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @derived_from(pd.DataFrame)\n def mean(self, axis=None, skipna=True, split_every=False, dtype=None, out=None):\n axis = self._validate_axis(axis)\n _raise_if_object_series(self, \"mean\")\n meta = self._meta_nonempty.mean(axis=axis, skipna=skipna)\n if axis == 1:\n result = map_partitions(\n M.mean,\n self,\n meta=meta,\n token=self._token_prefix + \"mean\",\n axis=axis,\n skipna=skipna,\n enforce_metadata=False,\n )\n return handle_out(out, result)\n else:\n num = self._get_numeric_data()\n s = num.sum(skipna=skipna, split_every=split_every)\n n = num.count(split_every=split_every)\n name = self._token_prefix + \"mean-%s\" % tokenize(self, axis, skipna)\n result = map_partitions(\n methods.mean_aggregate,\n s,\n n,\n token=name,\n meta=meta,\n enforce_metadata=False,\n )\n if isinstance(self, DataFrame):\n result.divisions = (self.columns.min(), self.columns.max())\n return handle_out(out, result)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.var__Frame.var.if_axis_1_.else_.return.handle_out_out_result_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.var__Frame.var.if_axis_1_.else_.return.handle_out_out_result_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1840, "end_line": 1879, "span_ids": ["_Frame.var"], "tokens": 361}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @derived_from(pd.DataFrame)\n def var(\n self, axis=None, skipna=True, ddof=1, split_every=False, dtype=None, out=None\n ):\n axis = self._validate_axis(axis)\n _raise_if_object_series(self, \"var\")\n meta = self._meta_nonempty.var(axis=axis, skipna=skipna)\n if axis == 1:\n result = map_partitions(\n M.var,\n self,\n meta=meta,\n token=self._token_prefix + \"var\",\n axis=axis,\n skipna=skipna,\n ddof=ddof,\n enforce_metadata=False,\n )\n return handle_out(out, result)\n else:\n if self.ndim == 1:\n result = self._var_1d(self, skipna, ddof, split_every)\n return handle_out(out, result)\n\n count_timedeltas = len(\n self._meta_nonempty.select_dtypes(include=[np.timedelta64]).columns\n )\n\n # pandas 1.0+ does not implement var on timedelta\n\n if not PANDAS_GT_100 and count_timedeltas == len(self._meta.columns):\n result = self._var_timedeltas(skipna, ddof, split_every)\n elif not PANDAS_GT_100 and count_timedeltas > 0:\n result = self._var_mixed(skipna, ddof, split_every)\n else:\n result = self._var_numeric(skipna, ddof, split_every)\n\n if isinstance(self, DataFrame):\n result.divisions = (self.columns.min(), self.columns.max())\n return handle_out(out, result)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._var_numeric__Frame._var_numeric.return.new_dd_object_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._var_numeric__Frame._var_numeric.return.new_dd_object_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1881, "end_line": 1904, "span_ids": ["_Frame._var_numeric"], "tokens": 271}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n def _var_numeric(self, skipna=True, ddof=1, split_every=False):\n num = self.select_dtypes(include=[\"number\", \"bool\"], exclude=[np.timedelta64])\n\n values_dtype = num.values.dtype\n array_values = num.values\n\n if not np.issubdtype(values_dtype, np.number):\n array_values = num.values.astype(\"f8\")\n\n var = da.nanvar if skipna or skipna is None else da.var\n array_var = var(array_values, axis=0, ddof=ddof, split_every=split_every)\n\n name = self._token_prefix + \"var-numeric\" + tokenize(num, split_every)\n cols = num._meta.columns if is_dataframe_like(num) else None\n\n var_shape = num._meta_nonempty.values.var(axis=0).shape\n array_var_name = (array_var._name,) + (0,) * len(var_shape)\n\n layer = {(name, 0): (methods.wrap_var_reduction, array_var_name, cols)}\n graph = HighLevelGraph.from_collections(name, layer, dependencies=[array_var])\n\n return new_dd_object(\n graph, name, num._meta_nonempty.var(), divisions=[None, None]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._var_timedeltas__Frame._var_timedeltas.return.new_dd_object_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._var_timedeltas__Frame._var_timedeltas.return.new_dd_object_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1906, "end_line": 1932, "span_ids": ["_Frame._var_timedeltas"], "tokens": 224}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n def _var_timedeltas(self, skipna=True, ddof=1, split_every=False):\n timedeltas = self.select_dtypes(include=[np.timedelta64])\n\n var_timedeltas = [\n self._var_1d(timedeltas[col_idx], skipna, ddof, split_every)\n for col_idx in timedeltas._meta.columns\n ]\n var_timedelta_names = [(v._name, 0) for v in var_timedeltas]\n\n name = (\n self._token_prefix + \"var-timedeltas-\" + tokenize(timedeltas, split_every)\n )\n\n layer = {\n (name, 0): (\n methods.wrap_var_reduction,\n var_timedelta_names,\n timedeltas._meta.columns,\n )\n }\n graph = HighLevelGraph.from_collections(\n name, layer, dependencies=var_timedeltas\n )\n\n return new_dd_object(\n graph, name, timedeltas._meta_nonempty.var(), divisions=[None, None]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._var_mixed__Frame._var_mixed.return.new_dd_object_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._var_mixed__Frame._var_mixed.return.new_dd_object_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1934, "end_line": 1956, "span_ids": ["_Frame._var_mixed"], "tokens": 207}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n def _var_mixed(self, skipna=True, ddof=1, split_every=False):\n data = self.select_dtypes(include=[\"number\", \"bool\", np.timedelta64])\n\n timedelta_vars = self._var_timedeltas(skipna, ddof, split_every)\n numeric_vars = self._var_numeric(skipna, ddof, split_every)\n\n name = self._token_prefix + \"var-mixed-\" + tokenize(data, split_every)\n\n layer = {\n (name, 0): (\n methods.var_mixed_concat,\n (numeric_vars._name, 0),\n (timedelta_vars._name, 0),\n data._meta.columns,\n )\n }\n\n graph = HighLevelGraph.from_collections(\n name, layer, dependencies=[numeric_vars, timedelta_vars]\n )\n return new_dd_object(\n graph, name, self._meta_nonempty.var(), divisions=[None, None]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._var_1d__Frame._var_1d.return.new_dd_object_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._var_1d__Frame._var_1d.return.new_dd_object_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1958, "end_line": 1986, "span_ids": ["_Frame._var_1d"], "tokens": 292}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n def _var_1d(self, column, skipna=True, ddof=1, split_every=False):\n is_timedelta = is_timedelta64_dtype(column._meta)\n\n if is_timedelta:\n if not skipna:\n is_nan = column.isna()\n column = column.astype(\"i8\")\n column = column.mask(is_nan)\n else:\n column = column.dropna().astype(\"i8\")\n\n if PANDAS_VERSION >= \"0.24.0\":\n if pd.Int64Dtype.is_dtype(column._meta_nonempty):\n column = column.astype(\"f8\")\n\n if not np.issubdtype(column.dtype, np.number):\n column = column.astype(\"f8\")\n\n name = self._token_prefix + \"var-1d-\" + tokenize(column, split_every)\n\n var = da.nanvar if skipna or skipna is None else da.var\n array_var = var(column.values, axis=0, ddof=ddof, split_every=split_every)\n\n layer = {(name, 0): (methods.wrap_var_reduction, (array_var._name,), None)}\n graph = HighLevelGraph.from_collections(name, layer, dependencies=[array_var])\n\n return new_dd_object(\n graph, name, column._meta_nonempty.var(), divisions=[None, None]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.std__Frame.std.if_axis_1_.else_.return.handle_out_out_result_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.std__Frame.std.if_axis_1_.else_.return.handle_out_out_result_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1988, "end_line": 2013, "span_ids": ["_Frame.std"], "tokens": 228}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @derived_from(pd.DataFrame)\n def std(\n self, axis=None, skipna=True, ddof=1, split_every=False, dtype=None, out=None\n ):\n axis = self._validate_axis(axis)\n _raise_if_object_series(self, \"std\")\n meta = self._meta_nonempty.std(axis=axis, skipna=skipna)\n if axis == 1:\n result = map_partitions(\n M.std,\n self,\n meta=meta,\n token=self._token_prefix + \"std\",\n axis=axis,\n skipna=skipna,\n ddof=ddof,\n enforce_metadata=False,\n )\n return handle_out(out, result)\n else:\n v = self.var(skipna=skipna, ddof=ddof, split_every=split_every)\n name = self._token_prefix + \"std\"\n result = map_partitions(\n np.sqrt, v, meta=meta, token=name, enforce_metadata=False\n )\n return handle_out(out, result)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.sem__Frame.sem.if_axis_1_.else_.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.sem__Frame.sem.if_axis_1_.else_.return.result", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2015, "end_line": 2041, "span_ids": ["_Frame.sem"], "tokens": 248}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @derived_from(pd.DataFrame)\n def sem(self, axis=None, skipna=None, ddof=1, split_every=False):\n axis = self._validate_axis(axis)\n _raise_if_object_series(self, \"sem\")\n meta = self._meta_nonempty.sem(axis=axis, skipna=skipna, ddof=ddof)\n if axis == 1:\n return map_partitions(\n M.sem,\n self,\n meta=meta,\n token=self._token_prefix + \"sem\",\n axis=axis,\n skipna=skipna,\n ddof=ddof,\n )\n else:\n num = self._get_numeric_data()\n v = num.var(skipna=skipna, ddof=ddof, split_every=split_every)\n n = num.count(split_every=split_every)\n name = self._token_prefix + \"sem\"\n result = map_partitions(\n np.sqrt, v / n, meta=meta, token=name, enforce_metadata=False\n )\n\n if isinstance(self, DataFrame):\n result.divisions = (self.columns.min(), self.columns.max())\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.quantile__Frame.quantile.if_axis_1_.else_.if_isinstance_quantiles_0.else_.return.DataFrame_graph_keyname_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.quantile__Frame.quantile.if_axis_1_.else_.if_isinstance_quantiles_0.else_.return.DataFrame_graph_keyname_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2043, "end_line": 2095, "span_ids": ["_Frame.quantile"], "tokens": 543}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n def quantile(self, q=0.5, axis=0, method=\"default\"):\n \"\"\"Approximate row-wise and precise column-wise quantiles of DataFrame\n\n Parameters\n ----------\n q : list/array of floats, default 0.5 (50%)\n Iterable of numbers ranging from 0 to 1 for the desired quantiles\n axis : {0, 1, 'index', 'columns'} (default 0)\n 0 or 'index' for row-wise, 1 or 'columns' for column-wise\n method : {'default', 'tdigest', 'dask'}, optional\n What method to use. By default will use dask's internal custom\n algorithm (``'dask'``). If set to ``'tdigest'`` will use tdigest\n for floats and ints and fallback to the ``'dask'`` otherwise.\n \"\"\"\n axis = self._validate_axis(axis)\n keyname = \"quantiles-concat--\" + tokenize(self, q, axis)\n\n if axis == 1:\n if isinstance(q, list):\n # Not supported, the result will have current index as columns\n raise ValueError(\"'q' must be scalar when axis=1 is specified\")\n return map_partitions(\n M.quantile,\n self,\n q,\n axis,\n token=keyname,\n enforce_metadata=False,\n meta=(q, \"f8\"),\n )\n else:\n _raise_if_object_series(self, \"quantile\")\n meta = self._meta.quantile(q, axis=axis)\n num = self._get_numeric_data()\n quantiles = tuple(quantile(self[c], q, method) for c in num.columns)\n\n qnames = [(_q._name, 0) for _q in quantiles]\n\n if isinstance(quantiles[0], Scalar):\n layer = {\n (keyname, 0): (type(meta), qnames, num.columns, None, meta.name)\n }\n graph = HighLevelGraph.from_collections(\n keyname, layer, dependencies=quantiles\n )\n divisions = (min(num.columns), max(num.columns))\n return Series(graph, keyname, meta, divisions)\n else:\n layer = {(keyname, 0): (methods.concat, qnames, 1)}\n graph = HighLevelGraph.from_collections(\n keyname, layer, dependencies=quantiles\n )\n return DataFrame(graph, keyname, meta, quantiles[0].divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.describe__Frame.describe.return.new_dd_object_graph_name": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.describe__Frame.describe.return.new_dd_object_graph_name", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2097, "end_line": 2146, "span_ids": ["_Frame.describe"], "tokens": 439}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @derived_from(pd.DataFrame)\n def describe(\n self,\n split_every=False,\n percentiles=None,\n percentiles_method=\"default\",\n include=None,\n exclude=None,\n ):\n\n if self._meta.ndim == 1:\n return self._describe_1d(self, split_every, percentiles, percentiles_method)\n elif (include is None) and (exclude is None):\n data = self._meta.select_dtypes(include=[np.number, np.timedelta64])\n\n # when some numerics/timedeltas are found, by default keep them\n if len(data.columns) == 0:\n chosen_columns = self._meta.columns\n else:\n # check if there are timedelta or boolean columns\n bools_and_timedeltas = self._meta.select_dtypes(\n include=[np.timedelta64, \"bool\"]\n )\n if len(bools_and_timedeltas.columns) == 0:\n return self._describe_numeric(\n self, split_every, percentiles, percentiles_method\n )\n else:\n chosen_columns = data.columns\n elif include == \"all\":\n if exclude is not None:\n msg = \"exclude must be None when include is 'all'\"\n raise ValueError(msg)\n chosen_columns = self._meta.columns\n else:\n chosen_columns = self._meta.select_dtypes(include=include, exclude=exclude)\n\n stats = [\n self._describe_1d(\n self[col_idx], split_every, percentiles, percentiles_method\n )\n for col_idx in chosen_columns\n ]\n stats_names = [(s._name, 0) for s in stats]\n\n name = \"describe--\" + tokenize(self, split_every)\n layer = {(name, 0): (methods.describe_aggregate, stats_names)}\n graph = HighLevelGraph.from_collections(name, layer, dependencies=stats)\n meta = self._meta_nonempty.describe(include=include, exclude=exclude)\n return new_dd_object(graph, name, meta, divisions=[None, None])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._describe_1d__Frame._describe_1d.if_is_bool_dtype_data__me.else_.return.self__describe_nonnumeric": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._describe_1d__Frame._describe_1d.if_is_bool_dtype_data__me.else_.return.self__describe_nonnumeric", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2148, "end_line": 2169, "span_ids": ["_Frame._describe_1d"], "tokens": 193}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n def _describe_1d(\n self, data, split_every=False, percentiles=None, percentiles_method=\"default\"\n ):\n if is_bool_dtype(data._meta):\n return self._describe_nonnumeric_1d(data, split_every=split_every)\n elif is_numeric_dtype(data._meta):\n return self._describe_numeric(\n data,\n split_every=split_every,\n percentiles=percentiles,\n percentiles_method=percentiles_method,\n )\n elif is_timedelta64_dtype(data._meta):\n return self._describe_numeric(\n data.dropna().astype(\"i8\"),\n split_every=split_every,\n percentiles=percentiles,\n percentiles_method=percentiles_method,\n is_timedelta_column=True,\n )\n else:\n return self._describe_nonnumeric_1d(data, split_every=split_every)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._describe_numeric__Frame._describe_numeric.return.new_dd_object_graph_name": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._describe_numeric__Frame._describe_numeric.return.new_dd_object_graph_name", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2171, "end_line": 2218, "span_ids": ["_Frame._describe_numeric"], "tokens": 404}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n def _describe_numeric(\n self,\n data,\n split_every=False,\n percentiles=None,\n percentiles_method=\"default\",\n is_timedelta_column=False,\n ):\n\n num = data._get_numeric_data()\n\n if data.ndim == 2 and len(num.columns) == 0:\n raise ValueError(\"DataFrame contains only non-numeric data.\")\n elif data.ndim == 1 and data.dtype == \"object\":\n raise ValueError(\"Cannot compute ``describe`` on object dtype.\")\n if percentiles is None:\n percentiles = [0.25, 0.5, 0.75]\n else:\n # always include the the 50%tle to calculate the median\n # unique removes duplicates and sorts quantiles\n percentiles = np.array(percentiles)\n percentiles = np.append(percentiles, 0.5)\n percentiles = np.unique(percentiles)\n percentiles = list(percentiles)\n stats = [\n num.count(split_every=split_every),\n num.mean(split_every=split_every),\n num.std(split_every=split_every),\n num.min(split_every=split_every),\n num.quantile(percentiles, method=percentiles_method),\n num.max(split_every=split_every),\n ]\n stats_names = [(s._name, 0) for s in stats]\n\n colname = data._meta.name if is_series_like(data._meta) else None\n\n name = \"describe-numeric--\" + tokenize(num, split_every)\n layer = {\n (name, 0): (\n methods.describe_numeric_aggregate,\n stats_names,\n colname,\n is_timedelta_column,\n )\n }\n graph = HighLevelGraph.from_collections(name, layer, dependencies=stats)\n meta = num._meta_nonempty.describe()\n return new_dd_object(graph, name, meta, divisions=[None, None])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._describe_nonnumeric_1d__Frame._describe_nonnumeric_1d.return.new_dd_object_graph_name": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._describe_nonnumeric_1d__Frame._describe_nonnumeric_1d.return.new_dd_object_graph_name", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2220, "end_line": 2248, "span_ids": ["_Frame._describe_nonnumeric_1d"], "tokens": 294}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n def _describe_nonnumeric_1d(self, data, split_every=False):\n vcounts = data.value_counts(split_every=split_every)\n count_nonzero = vcounts[vcounts != 0]\n count_unique = count_nonzero.size\n\n stats = [\n # nunique\n count_unique,\n # count\n data.count(split_every=split_every),\n # most common value\n vcounts._head(1, npartitions=1, compute=False, safe=False),\n ]\n\n if is_datetime64_any_dtype(data._meta):\n min_ts = data.dropna().astype(\"i8\").min(split_every=split_every)\n max_ts = data.dropna().astype(\"i8\").max(split_every=split_every)\n stats.extend([min_ts, max_ts])\n\n stats_names = [(s._name, 0) for s in stats]\n colname = data._meta.name\n\n name = \"describe-nonnumeric-1d--\" + tokenize(data, split_every)\n layer = {\n (name, 0): (methods.describe_nonnumeric_aggregate, stats_names, colname)\n }\n graph = HighLevelGraph.from_collections(name, layer, dependencies=stats)\n meta = data._meta_nonempty.describe()\n return new_dd_object(graph, name, meta, divisions=[None, None])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._cum_agg__Frame._cum_agg.if_axis_1_.else_.return.handle_out_out_result_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._cum_agg__Frame._cum_agg.if_axis_1_.else_.return.handle_out_out_result_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2250, "end_line": 2302, "span_ids": ["_Frame._cum_agg"], "tokens": 494}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n def _cum_agg(\n self, op_name, chunk, aggregate, axis, skipna=True, chunk_kwargs=None, out=None\n ):\n \"\"\" Wrapper for cumulative operation \"\"\"\n\n axis = self._validate_axis(axis)\n\n if axis == 1:\n name = \"{0}{1}(axis=1)\".format(self._token_prefix, op_name)\n result = self.map_partitions(chunk, token=name, **chunk_kwargs)\n return handle_out(out, result)\n else:\n # cumulate each partitions\n name1 = \"{0}{1}-map\".format(self._token_prefix, op_name)\n cumpart = map_partitions(\n chunk, self, token=name1, meta=self, **chunk_kwargs\n )\n\n name2 = \"{0}{1}-take-last\".format(self._token_prefix, op_name)\n cumlast = map_partitions(\n _take_last,\n cumpart,\n skipna,\n meta=pd.Series([], dtype=\"float\"),\n token=name2,\n )\n\n suffix = tokenize(self)\n name = \"{0}{1}-{2}\".format(self._token_prefix, op_name, suffix)\n cname = \"{0}{1}-cum-last-{2}\".format(self._token_prefix, op_name, suffix)\n\n # aggregate cumulated partisions and its previous last element\n layer = {}\n layer[(name, 0)] = (cumpart._name, 0)\n\n for i in range(1, self.npartitions):\n # store each cumulative step to graph to reduce computation\n if i == 1:\n layer[(cname, i)] = (cumlast._name, i - 1)\n else:\n # aggregate with previous cumulation results\n layer[(cname, i)] = (\n methods._cum_aggregate_apply,\n aggregate,\n (cname, i - 1),\n (cumlast._name, i - 1),\n )\n layer[(name, i)] = (aggregate, (cumpart._name, i), (cname, i))\n graph = HighLevelGraph.from_collections(\n name, layer, dependencies=[cumpart, cumlast]\n )\n result = new_dd_object(graph, name, chunk(self._meta), self.divisions)\n return handle_out(out, result)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.cumsum__Frame.isna.if_hasattr_pd_isna_.else_.raise_NotImplementedError": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.cumsum__Frame.isna.if_hasattr_pd_isna_.else_.raise_NotImplementedError", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2304, "end_line": 2379, "span_ids": ["_Frame.where", "_Frame.isnull", "_Frame.cumsum", "_Frame.cummin", "_Frame.isna", "_Frame.cumprod", "_Frame.cummax", "_Frame.mask", "_Frame.notnull"], "tokens": 555}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @derived_from(pd.DataFrame)\n def cumsum(self, axis=None, skipna=True, dtype=None, out=None):\n return self._cum_agg(\n \"cumsum\",\n chunk=M.cumsum,\n aggregate=operator.add,\n axis=axis,\n skipna=skipna,\n chunk_kwargs=dict(axis=axis, skipna=skipna),\n out=out,\n )\n\n @derived_from(pd.DataFrame)\n def cumprod(self, axis=None, skipna=True, dtype=None, out=None):\n return self._cum_agg(\n \"cumprod\",\n chunk=M.cumprod,\n aggregate=operator.mul,\n axis=axis,\n skipna=skipna,\n chunk_kwargs=dict(axis=axis, skipna=skipna),\n out=out,\n )\n\n @derived_from(pd.DataFrame)\n def cummax(self, axis=None, skipna=True, out=None):\n return self._cum_agg(\n \"cummax\",\n chunk=M.cummax,\n aggregate=methods.cummax_aggregate,\n axis=axis,\n skipna=skipna,\n chunk_kwargs=dict(axis=axis, skipna=skipna),\n out=out,\n )\n\n @derived_from(pd.DataFrame)\n def cummin(self, axis=None, skipna=True, out=None):\n return self._cum_agg(\n \"cummin\",\n chunk=M.cummin,\n aggregate=methods.cummin_aggregate,\n axis=axis,\n skipna=skipna,\n chunk_kwargs=dict(axis=axis, skipna=skipna),\n out=out,\n )\n\n @derived_from(pd.DataFrame)\n def where(self, cond, other=np.nan):\n # cond and other may be dask instance,\n # passing map_partitions via keyword will not be aligned\n return map_partitions(M.where, self, cond, other, enforce_metadata=False)\n\n @derived_from(pd.DataFrame)\n def mask(self, cond, other=np.nan):\n return map_partitions(M.mask, self, cond, other, enforce_metadata=False)\n\n @derived_from(pd.DataFrame)\n def notnull(self):\n return self.map_partitions(M.notnull, enforce_metadata=False)\n\n @derived_from(pd.DataFrame)\n def isnull(self):\n return self.map_partitions(M.isnull, enforce_metadata=False)\n\n @derived_from(pd.DataFrame)\n def isna(self):\n if hasattr(pd, \"isna\"):\n return self.map_partitions(M.isna, enforce_metadata=False)\n else:\n raise NotImplementedError(\n \"Need more recent version of Pandas \"\n \"to support isna. \"\n \"Please use isnull instead.\"\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.isin__Frame.isin.return.self_map_partitions_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.isin__Frame.isin.return.self_map_partitions_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2381, "end_line": 2396, "span_ids": ["_Frame.isin"], "tokens": 168}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @derived_from(pd.DataFrame)\n def isin(self, values):\n if is_dataframe_like(self._meta):\n # DataFrame.isin does weird alignment stuff\n bad_types = (_Frame, pd.Series, pd.DataFrame)\n else:\n bad_types = (_Frame,)\n if isinstance(values, bad_types):\n raise NotImplementedError(\"Passing a %r to `isin`\" % typename(type(values)))\n meta = self._meta_nonempty.isin(values)\n # We wrap values in a delayed for two reasons:\n # - avoid serializing data in every task\n # - avoid cost of traversal of large list in optimizations\n return self.map_partitions(\n M.isin, delayed(values), meta=meta, enforce_metadata=False\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.astype__Frame.append.return.concat_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.astype__Frame.append.return.concat_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2398, "end_line": 2433, "span_ids": ["_Frame.astype", "_Frame.append"], "tokens": 327}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @derived_from(pd.DataFrame)\n def astype(self, dtype):\n # XXX: Pandas will segfault for empty dataframes when setting\n # categorical dtypes. This operation isn't allowed currently anyway. We\n # get the metadata with a non-empty frame to throw the error instead of\n # segfaulting.\n if is_dataframe_like(self._meta) and is_categorical_dtype(dtype):\n meta = self._meta_nonempty.astype(dtype)\n else:\n meta = self._meta.astype(dtype)\n if hasattr(dtype, \"items\"):\n set_unknown = [\n k\n for k, v in dtype.items()\n if is_categorical_dtype(v) and getattr(v, \"categories\", None) is None\n ]\n meta = clear_known_categories(meta, cols=set_unknown)\n elif is_categorical_dtype(dtype) and getattr(dtype, \"categories\", None) is None:\n meta = clear_known_categories(meta)\n return self.map_partitions(\n M.astype, dtype=dtype, meta=meta, enforce_metadata=False\n )\n\n @derived_from(pd.Series)\n def append(self, other, interleave_partitions=False):\n # because DataFrame.append will override the method,\n # wrap by pd.Series.append docstring\n from .multi import concat\n\n if isinstance(other, (list, dict)):\n msg = \"append doesn't support list or dict input\"\n raise NotImplementedError(msg)\n\n return concat(\n [self, other], join=\"outer\", interleave_partitions=interleave_partitions\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.align__Frame.align.return.result1_result2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.align__Frame.align.return.result1_result2", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2435, "end_line": 2467, "span_ids": ["_Frame.align"], "tokens": 290}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @derived_from(pd.DataFrame)\n def align(self, other, join=\"outer\", axis=None, fill_value=None):\n meta1, meta2 = _emulate(\n M.align, self, other, join, axis=axis, fill_value=fill_value\n )\n aligned = self.map_partitions(\n M.align,\n other,\n join=join,\n axis=axis,\n fill_value=fill_value,\n enforce_metadata=False,\n )\n\n token = tokenize(self, other, join, axis, fill_value)\n\n name1 = \"align1-\" + token\n dsk1 = {\n (name1, i): (getitem, key, 0)\n for i, key in enumerate(aligned.__dask_keys__())\n }\n dsk1.update(aligned.dask)\n result1 = new_dd_object(dsk1, name1, meta1, aligned.divisions)\n\n name2 = \"align2-\" + token\n dsk2 = {\n (name2, i): (getitem, key, 1)\n for i, key in enumerate(aligned.__dask_keys__())\n }\n dsk2.update(aligned.dask)\n result2 = new_dd_object(dsk2, name2, meta2, aligned.divisions)\n\n return result1, result2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.combine__Frame.resample.return.Resampler_self_rule_clo": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.combine__Frame.resample.return.Resampler_self_rule_clo", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2469, "end_line": 2488, "span_ids": ["_Frame.resample", "_Frame.combine", "_Frame._bind_operator_method", "_Frame.combine_first"], "tokens": 170}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @derived_from(pd.DataFrame)\n def combine(self, other, func, fill_value=None, overwrite=True):\n return self.map_partitions(\n M.combine, other, func, fill_value=fill_value, overwrite=overwrite\n )\n\n @derived_from(pd.DataFrame)\n def combine_first(self, other):\n return self.map_partitions(M.combine_first, other)\n\n @classmethod\n def _bind_operator_method(cls, name, op, original=pd.DataFrame):\n \"\"\" bind operator method like DataFrame.add to this class \"\"\"\n raise NotImplementedError\n\n @derived_from(pd.DataFrame)\n def resample(self, rule, closed=None, label=None):\n from .tseries.resample import Resampler\n\n return Resampler(self, rule, closed=closed, label=label)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.first__Frame.first.return.new_dd_object_graph_name": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.first__Frame.first.return.new_dd_object_graph_name", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2490, "end_line": 2526, "span_ids": ["_Frame.first"], "tokens": 301}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @derived_from(pd.DataFrame)\n def first(self, offset):\n # Let pandas error on bad args\n self._meta_nonempty.first(offset)\n\n if not self.known_divisions:\n raise ValueError(\"`first` is not implemented for unknown divisions\")\n\n offset = pd.tseries.frequencies.to_offset(offset)\n date = self.divisions[0] + offset\n end = self.loc._get_partitions(date)\n\n if PANDAS_GT_100:\n is_anchored = offset.is_anchored()\n else:\n is_anchored = offset.isAnchored()\n\n include_right = is_anchored or not hasattr(offset, \"delta\")\n\n if end == self.npartitions - 1:\n divs = self.divisions\n else:\n divs = self.divisions[: end + 1] + (date,)\n\n name = \"first-\" + tokenize(self, offset)\n dsk = {(name, i): (self._name, i) for i in range(end)}\n dsk[(name, end)] = (\n methods.boundary_slice,\n (self._name, end),\n None,\n date,\n include_right,\n True,\n \"loc\",\n )\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])\n return new_dd_object(graph, name, self, divs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.last__Frame.last.return.new_dd_object_graph_name": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.last__Frame.last.return.new_dd_object_graph_name", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2528, "end_line": 2560, "span_ids": ["_Frame.last"], "tokens": 262}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @derived_from(pd.DataFrame)\n def last(self, offset):\n # Let pandas error on bad args\n self._meta_nonempty.first(offset)\n\n if not self.known_divisions:\n raise ValueError(\"`last` is not implemented for unknown divisions\")\n\n offset = pd.tseries.frequencies.to_offset(offset)\n date = self.divisions[-1] - offset\n start = self.loc._get_partitions(date)\n\n if start == 0:\n divs = self.divisions\n else:\n divs = (date,) + self.divisions[start + 1 :]\n\n name = \"last-\" + tokenize(self, offset)\n dsk = {\n (name, i + 1): (self._name, j + 1)\n for i, j in enumerate(range(start, self.npartitions))\n }\n dsk[(name, 0)] = (\n methods.boundary_slice,\n (self._name, start),\n date,\n None,\n True,\n False,\n \"loc\",\n )\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])\n return new_dd_object(graph, name, self, divs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.nunique_approx__Frame.nunique_approx.return.aca_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.nunique_approx__Frame.nunique_approx.return.aca_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2562, "end_line": 2590, "span_ids": ["_Frame.nunique_approx"], "tokens": 205}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n def nunique_approx(self, split_every=None):\n \"\"\"Approximate number of unique rows.\n\n This method uses the HyperLogLog algorithm for cardinality\n estimation to compute the approximate number of unique rows.\n The approximate error is 0.406%.\n\n Parameters\n ----------\n split_every : int, optional\n Group partitions into groups of this size while performing a\n tree-reduction. If set to False, no tree-reduction will be used.\n Default is 8.\n\n Returns\n -------\n a float representing the approximate number of elements\n \"\"\"\n from . import hyperloglog # here to avoid circular import issues\n\n return aca(\n [self],\n chunk=hyperloglog.compute_hll_array,\n combine=hyperloglog.reduce_state,\n aggregate=hyperloglog.estimate_count,\n split_every=split_every,\n b=16,\n meta=float,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.values__Frame._validate_chunks.return.arr__chunks": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.values__Frame._validate_chunks.return.arr__chunks", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2592, "end_line": 2624, "span_ids": ["_Frame.values", "_Frame._validate_chunks"], "tokens": 238}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @property\n def values(self):\n \"\"\"Return a dask.array of the values of this dataframe\n\n Warning: This creates a dask.array without precise shape information.\n Operations that depend on shape information, like slicing or reshaping,\n will not work.\n \"\"\"\n return self.map_partitions(methods.values)\n\n def _validate_chunks(self, arr, lengths):\n from dask.array.core import normalize_chunks\n\n if isinstance(lengths, Sequence):\n lengths = tuple(lengths)\n\n if len(lengths) != self.npartitions:\n raise ValueError(\n \"The number of items in 'lengths' does not match \"\n \"the number of partitions. \"\n \"{} != {}\".format(len(lengths), self.npartitions)\n )\n\n if self.ndim == 1:\n chunks = normalize_chunks((lengths,))\n else:\n chunks = normalize_chunks((lengths, (len(self.columns),)))\n\n return chunks\n elif lengths is not None:\n raise ValueError(\"Unexpected value for 'lengths': '{}'\".format(lengths))\n\n return arr._chunks", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._is_index_level_reference__raise_if_object_series.if_isinstance_x_Series_.raise_ValueError_s_no": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._is_index_level_reference__raise_if_object_series.if_isinstance_x_Series_.raise_ValueError_s_no", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2626, "end_line": 2657, "span_ids": ["_raise_if_object_series", "_Frame._contains_index_name", "_Frame._is_index_level_reference"], "tokens": 278}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n def _is_index_level_reference(self, key):\n \"\"\"\n Test whether a key is an index level reference\n\n To be considered an index level reference, `key` must match the index name\n and must NOT match the name of any column (if a dataframe).\n \"\"\"\n return (\n self.index.name is not None\n and not is_dask_collection(key)\n and (np.isscalar(key) or isinstance(key, tuple))\n and key == self.index.name\n and key not in getattr(self, \"columns\", ())\n )\n\n def _contains_index_name(self, columns_or_index):\n \"\"\"\n Test whether the input contains a reference to the index of the DataFrame/Series\n \"\"\"\n if isinstance(columns_or_index, list):\n return any(self._is_index_level_reference(n) for n in columns_or_index)\n else:\n return self._is_index_level_reference(columns_or_index)\n\n\ndef _raise_if_object_series(x, funcname):\n \"\"\"\n Utility function to raise an error if an object column does not support\n a certain operation like `mean`.\n \"\"\"\n if isinstance(x, Series) and hasattr(x, \"dtype\") and x.dtype == object:\n raise ValueError(\"`%s` not supported with object series\" % funcname)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series_Series._repr_data.return._repr_data_series_self__m": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series_Series._repr_data.return._repr_data_series_self__m", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2660, "end_line": 2768, "span_ids": ["Series.shape", "Series.str", "Series._repr_data", "Series.name", "Series.dtype", "Series.__dir__", "Series.ndim", "Series.dt", "Series.cat", "Series.nbytes", "Series.__array_wrap__", "Series", "Series.name_2"], "tokens": 697}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Series(_Frame):\n \"\"\"Parallel Pandas Series\n\n Do not use this class directly. Instead use functions like\n ``dd.read_csv``, ``dd.read_parquet``, or ``dd.from_pandas``.\n\n Parameters\n ----------\n\n dsk: dict\n The dask graph to compute this Series\n _name: str\n The key prefix that specifies which keys in the dask comprise this\n particular Series\n meta: pandas.Series\n An empty ``pandas.Series`` with names, dtypes, and index matching the\n expected output.\n divisions: tuple of index values\n Values along which we partition our blocks on the index\n\n See Also\n --------\n dask.dataframe.DataFrame\n \"\"\"\n\n _partition_type = pd.Series\n _is_partition_type = staticmethod(is_series_like)\n _token_prefix = \"series-\"\n _accessors = set()\n\n def __array_wrap__(self, array, context=None):\n if isinstance(context, tuple) and len(context) > 0:\n if isinstance(context[1][0], np.ndarray) and context[1][0].shape == ():\n index = None\n else:\n index = context[1][0].index\n\n return pd.Series(array, index=index, name=self.name)\n\n @property\n def name(self):\n return self._meta.name\n\n @name.setter\n def name(self, name):\n self._meta.name = name\n renamed = _rename_dask(self, name)\n # update myself\n self.dask = renamed.dask\n self._name = renamed._name\n\n @property\n def ndim(self):\n \"\"\" Return dimensionality \"\"\"\n return 1\n\n @property\n def shape(self):\n \"\"\"\n Return a tuple representing the dimensionality of a Series.\n\n The single element of the tuple is a Delayed result.\n\n Examples\n --------\n >>> series.shape # doctest: +SKIP\n # (dd.Scalar,)\n \"\"\"\n return (self.size,)\n\n @property\n def dtype(self):\n \"\"\" Return data type \"\"\"\n return self._meta.dtype\n\n @cache_readonly\n def dt(self):\n \"\"\" Namespace of datetime methods \"\"\"\n return DatetimeAccessor(self)\n\n @cache_readonly\n def cat(self):\n return CategoricalAccessor(self)\n\n @cache_readonly\n def str(self):\n \"\"\" Namespace for string methods \"\"\"\n return StringAccessor(self)\n\n def __dir__(self):\n o = set(dir(type(self)))\n o.update(self.__dict__)\n # Remove the `cat` and `str` accessors if not available. We can't\n # decide this statically for the `dt` accessor, as it works on\n # datetime-like things as well.\n for accessor in [\"cat\", \"str\"]:\n if not hasattr(self._meta, accessor):\n o.remove(accessor)\n return list(o)\n\n @property\n def nbytes(self):\n \"\"\" Number of bytes \"\"\"\n return self.reduction(\n methods.nbytes, np.sum, token=\"nbytes\", meta=int, split_every=False\n )\n\n def _repr_data(self):\n return _repr_data_series(self._meta, self._repr_divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.__repr___Series.__repr__.return._Dask_klass_Structure": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.__repr___Series.__repr__.return._Dask_klass_Structure", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2770, "end_line": 2788, "span_ids": ["Series.__repr__"], "tokens": 137}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Series(_Frame):\n\n def __repr__(self):\n \"\"\" have to overwrite footer \"\"\"\n if self.name is not None:\n footer = \"Name: {name}, dtype: {dtype}\".format(\n name=self.name, dtype=self.dtype\n )\n else:\n footer = \"dtype: {dtype}\".format(dtype=self.dtype)\n\n return \"\"\"Dask {klass} Structure:\n{data}\n{footer}\nDask Name: {name}, {task} tasks\"\"\".format(\n klass=self.__class__.__name__,\n data=self.to_string(),\n footer=footer,\n name=key_split(self._name),\n task=len(self.dask),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.rename_Series.rename.return.res": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.rename_Series.rename.return.res", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2790, "end_line": 2856, "span_ids": ["Series.rename"], "tokens": 558}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Series(_Frame):\n\n def rename(self, index=None, inplace=False, sorted_index=False):\n \"\"\"Alter Series index labels or name\n\n Function / dict values must be unique (1-to-1). Labels not contained in\n a dict / Series will be left as-is. Extra labels listed don't throw an\n error.\n\n Alternatively, change ``Series.name`` with a scalar value.\n\n Parameters\n ----------\n index : scalar, hashable sequence, dict-like or callable, optional\n If dict-like or callable, the transformation is applied to the\n index. Scalar or hashable sequence-like will alter the\n ``Series.name`` attribute.\n inplace : boolean, default False\n Whether to return a new Series or modify this one inplace.\n sorted_index : bool, default False\n If true, the output ``Series`` will have known divisions inferred\n from the input series and the transformation. Ignored for\n non-callable/dict-like ``index`` or when the input series has\n unknown divisions. Note that this may only be set to ``True`` if\n you know that the transformed index is monotonically increasing. Dask\n will check that transformed divisions are monotonic, but cannot\n check all the values between divisions, so incorrectly setting this\n can result in bugs.\n\n Returns\n -------\n renamed : Series\n\n See Also\n --------\n pandas.Series.rename\n \"\"\"\n from pandas.api.types import is_scalar, is_dict_like, is_list_like\n import dask.dataframe as dd\n\n if is_scalar(index) or (\n is_list_like(index)\n and not is_dict_like(index)\n and not isinstance(index, dd.Series)\n ):\n res = self if inplace else self.copy()\n res.name = index\n else:\n res = self.map_partitions(M.rename, index, enforce_metadata=False)\n if self.known_divisions:\n if sorted_index and (callable(index) or is_dict_like(index)):\n old = pd.Series(range(self.npartitions + 1), index=self.divisions)\n new = old.rename(index).index\n if not new.is_monotonic_increasing:\n msg = (\n \"sorted_index=True, but the transformed index \"\n \"isn't monotonic_increasing\"\n )\n raise ValueError(msg)\n res.divisions = tuple(methods.tolist(new))\n else:\n res = res.clear_divisions()\n if inplace:\n self.dask = res.dask\n self._name = res._name\n self.divisions = res.divisions\n self._meta = res._meta\n res = self\n return res", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.round_Series.quantile.return.quantile_self_q_method_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.round_Series.quantile.return.quantile_self_q_method_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2858, "end_line": 2880, "span_ids": ["Series.quantile", "Series.round", "Series.to_timestamp"], "tokens": 244}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Series(_Frame):\n\n @derived_from(pd.Series)\n def round(self, decimals=0):\n return elemwise(M.round, self, decimals)\n\n @derived_from(pd.DataFrame)\n def to_timestamp(self, freq=None, how=\"start\", axis=0):\n df = elemwise(M.to_timestamp, self, freq, how, axis)\n df.divisions = tuple(pd.Index(self.divisions).to_timestamp())\n return df\n\n def quantile(self, q=0.5, method=\"default\"):\n \"\"\"Approximate quantiles of Series\n\n Parameters\n ----------\n q : list/array of floats, default 0.5 (50%)\n Iterable of numbers ranging from 0 to 1 for the desired quantiles\n method : {'default', 'tdigest', 'dask'}, optional\n What method to use. By default will use dask's internal custom\n algorithm (``'dask'``). If set to ``'tdigest'`` will use tdigest\n for floats and ints and fallback to the ``'dask'`` otherwise.\n \"\"\"\n return quantile(self, q, method=method)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series._repartition_quantiles_Series.__getitem__.raise_NotImplementedError": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series._repartition_quantiles_Series.__getitem__.raise_NotImplementedError", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2882, "end_line": 2897, "span_ids": ["Series._repartition_quantiles", "Series.__getitem__"], "tokens": 184}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Series(_Frame):\n\n def _repartition_quantiles(self, npartitions, upsample=1.0):\n \"\"\"Approximate quantiles of Series used for repartitioning\"\"\"\n from .partitionquantiles import partition_quantiles\n\n return partition_quantiles(self, npartitions, upsample=upsample)\n\n def __getitem__(self, key):\n if isinstance(key, Series) and self.divisions == key.divisions:\n name = \"index-%s\" % tokenize(self, key)\n dsk = partitionwise_graph(operator.getitem, name, self, key)\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self, key])\n return Series(graph, name, self._meta, self.divisions)\n raise NotImplementedError(\n \"Series getitem in only supported for other series objects \"\n \"with matching partition structure\"\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series._get_numeric_data_Series.nunique.return.self_drop_duplicates_spli": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series._get_numeric_data_Series.nunique.return.self_drop_duplicates_spli", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2916, "end_line": 2981, "span_ids": ["Series.explode", "Series._validate_axis", "Series.__iter__", "Series.nunique", "Series.groupby", "Series._get_numeric_data", "Series.unique", "Series.iteritems", "Series.count", "Series.mode"], "tokens": 467}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Series(_Frame):\n\n @derived_from(pd.DataFrame)\n def _get_numeric_data(self, how=\"any\", subset=None):\n return self\n\n @derived_from(pd.Series)\n def iteritems(self):\n for i in range(self.npartitions):\n s = self.get_partition(i).compute()\n for item in s.iteritems():\n yield item\n\n @derived_from(pd.Series)\n def __iter__(self):\n for i in range(self.npartitions):\n s = self.get_partition(i).compute()\n for row in s:\n yield row\n\n @classmethod\n def _validate_axis(cls, axis=0):\n if axis not in (0, \"index\", None):\n raise ValueError(\"No axis named {0}\".format(axis))\n # convert to numeric axis\n return {None: 0, \"index\": 0}.get(axis, axis)\n\n @derived_from(pd.Series)\n def groupby(self, by=None, **kwargs):\n from dask.dataframe.groupby import SeriesGroupBy\n\n return SeriesGroupBy(self, by=by, **kwargs)\n\n @derived_from(pd.Series)\n def count(self, split_every=False):\n return super().count(split_every=split_every)\n\n @derived_from(pd.Series)\n def mode(self, dropna=True, split_every=False):\n return super().mode(dropna=dropna, split_every=split_every)\n\n @derived_from(pd.Series, version=\"0.25.0\")\n def explode(self):\n meta = self._meta.explode()\n return self.map_partitions(M.explode, meta=meta, enforce_metadata=False)\n\n def unique(self, split_every=None, split_out=1):\n \"\"\"\n Return Series of unique values in the object. Includes NA values.\n\n Returns\n -------\n uniques : Series\n \"\"\"\n return aca(\n self,\n chunk=methods.unique,\n aggregate=methods.unique,\n meta=self._meta,\n token=\"unique\",\n split_every=split_every,\n series_name=self.name,\n split_out=split_out,\n )\n\n @derived_from(pd.Series)\n def nunique(self, split_every=None):\n return self.drop_duplicates(split_every=split_every).count()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.value_counts_Series.value_counts.return.aca_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.value_counts_Series.value_counts.return.aca_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2966, "end_line": 2994, "span_ids": ["Series.value_counts"], "tokens": 236}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Series(_Frame):\n\n @derived_from(pd.Series)\n def value_counts(\n self, sort=None, ascending=False, dropna=None, split_every=None, split_out=1\n ):\n \"\"\"\n Note: dropna is only supported in pandas >= 1.1.0, in which case it defaults to\n True.\n \"\"\"\n kwargs = {\"sort\": sort, \"ascending\": ascending}\n if dropna is not None:\n if not PANDAS_GT_110:\n raise NotImplementedError(\n \"dropna is not a valid argument for dask.dataframe.value_counts \"\n f\"if pandas < 1.1.0. Pandas version is {pd.__version__}\"\n )\n kwargs[\"dropna\"] = dropna\n\n return aca(\n self,\n chunk=M.value_counts,\n aggregate=methods.value_counts_aggregate,\n combine=methods.value_counts_combine,\n meta=self._meta.value_counts(),\n token=\"value-counts\",\n split_every=split_every,\n split_out=split_out,\n split_out_setup=split_out_on_index,\n **kwargs,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.map_Series.map.return.type_self_graph_name_m": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.map_Series.map.return.type_self_graph_name_m", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3025, "end_line": 3051, "span_ids": ["Series.map"], "tokens": 257}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Series(_Frame):\n\n @insert_meta_param_description(pad=12)\n @derived_from(pd.Series)\n def map(self, arg, na_action=None, meta=no_default):\n if is_series_like(arg) and is_dask_collection(arg):\n return series_map(self, arg)\n if not (\n isinstance(arg, dict)\n or callable(arg)\n or is_series_like(arg)\n and not is_dask_collection(arg)\n ):\n raise TypeError(\n \"arg must be pandas.Series, dict or callable.\"\n \" Got {0}\".format(type(arg))\n )\n name = \"map-\" + tokenize(self, arg, na_action)\n dsk = {\n (name, i): (M.map, k, arg, na_action)\n for i, k in enumerate(self.__dask_keys__())\n }\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])\n if meta is no_default:\n meta = _emulate(M.map, self, arg, na_action=na_action, udf=True)\n else:\n meta = make_meta(meta, index=getattr(make_meta(self), \"index\", None))\n\n return type(self)(graph, name, meta, self.divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.dropna_Series.to_string.return.self__repr_data_to_stri": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.dropna_Series.to_string.return.self__repr_data_to_stri", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3070, "end_line": 3130, "span_ids": ["Series.clip", "Series.combine", "Series.to_bag", "Series.align", "Series.squeeze", "Series.to_string", "Series.to_frame", "Series.between", "Series.combine_first", "Series.clip_upper", "Series.clip_lower", "Series.dropna"], "tokens": 453}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Series(_Frame):\n\n @derived_from(pd.Series)\n def dropna(self):\n return self.map_partitions(M.dropna, enforce_metadata=False)\n\n @derived_from(pd.Series)\n def between(self, left, right, inclusive=True):\n return self.map_partitions(\n M.between, left=left, right=right, inclusive=inclusive\n )\n\n @derived_from(pd.Series)\n def clip(self, lower=None, upper=None, out=None):\n if out is not None:\n raise ValueError(\"'out' must be None\")\n # np.clip may pass out\n return self.map_partitions(\n M.clip, lower=lower, upper=upper, enforce_metadata=False\n )\n\n @derived_from(pd.Series)\n def clip_lower(self, threshold):\n return self.map_partitions(\n M.clip_lower, threshold=threshold, enforce_metadata=False\n )\n\n @derived_from(pd.Series)\n def clip_upper(self, threshold):\n return self.map_partitions(\n M.clip_upper, threshold=threshold, enforce_metadata=False\n )\n\n @derived_from(pd.Series)\n def align(self, other, join=\"outer\", axis=None, fill_value=None):\n return super().align(other, join=join, axis=axis, fill_value=fill_value)\n\n @derived_from(pd.Series)\n def combine(self, other, func, fill_value=None):\n return self.map_partitions(M.combine, other, func, fill_value=fill_value)\n\n @derived_from(pd.Series)\n def squeeze(self):\n return self\n\n @derived_from(pd.Series)\n def combine_first(self, other):\n return self.map_partitions(M.combine_first, other)\n\n def to_bag(self, index=False):\n \"\"\" Create a Dask Bag from a Series \"\"\"\n from .io import to_bag\n\n return to_bag(self, index)\n\n @derived_from(pd.Series)\n def to_frame(self, name=None):\n return self.map_partitions(M.to_frame, name, meta=self._meta.to_frame(name))\n\n @derived_from(pd.Series)\n def to_string(self, max_rows=5):\n # option_context doesn't affect\n return self._repr_data().to_string(max_rows=max_rows)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series._bind_operator_method_Series._bind_operator_method.setattr_cls_name_derive": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series._bind_operator_method_Series._bind_operator_method.setattr_cls_name_derive", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3117, "end_line": 3131, "span_ids": ["Series._bind_operator_method"], "tokens": 150}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Series(_Frame):\n\n @classmethod\n def _bind_operator_method(cls, name, op, original=pd.Series):\n \"\"\" bind operator method like Series.add to this class \"\"\"\n\n def meth(self, other, level=None, fill_value=None, axis=0):\n if level is not None:\n raise NotImplementedError(\"level must be None\")\n axis = self._validate_axis(axis)\n meta = _emulate(op, self, other, axis=axis, fill_value=fill_value)\n return map_partitions(\n op, self, other, meta=meta, axis=axis, fill_value=fill_value\n )\n\n meth.__name__ = name\n setattr(cls, name, derived_from(original)(meth))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series._bind_comparison_method_Series._bind_comparison_method.setattr_cls_name_derive": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series._bind_comparison_method_Series._bind_comparison_method.setattr_cls_name_derive", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3133, "end_line": 3148, "span_ids": ["Series._bind_comparison_method"], "tokens": 152}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Series(_Frame):\n\n @classmethod\n def _bind_comparison_method(cls, name, comparison, original=pd.Series):\n \"\"\" bind comparison method like Series.eq to this class \"\"\"\n\n def meth(self, other, level=None, fill_value=None, axis=0):\n if level is not None:\n raise NotImplementedError(\"level must be None\")\n axis = self._validate_axis(axis)\n if fill_value is None:\n return elemwise(comparison, self, other, axis=axis)\n else:\n op = partial(comparison, fill_value=fill_value)\n return elemwise(op, self, other, axis=axis)\n\n meth.__name__ = name\n setattr(cls, name, derived_from(original)(meth))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.apply_Series.apply.return.map_partitions_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.apply_Series.apply.return.map_partitions_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3150, "end_line": 3219, "span_ids": ["Series.apply"], "tokens": 548}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Series(_Frame):\n\n @insert_meta_param_description(pad=12)\n def apply(self, func, convert_dtype=True, meta=no_default, args=(), **kwds):\n \"\"\"Parallel version of pandas.Series.apply\n\n Parameters\n ----------\n func : function\n Function to apply\n convert_dtype : boolean, default True\n Try to find better dtype for elementwise function results.\n If False, leave as dtype=object.\n $META\n args : tuple\n Positional arguments to pass to function in addition to the value.\n\n Additional keyword arguments will be passed as keywords to the function.\n\n Returns\n -------\n applied : Series or DataFrame if func returns a Series.\n\n Examples\n --------\n >>> import dask.dataframe as dd\n >>> s = pd.Series(range(5), name='x')\n >>> ds = dd.from_pandas(s, npartitions=2)\n\n Apply a function elementwise across the Series, passing in extra\n arguments in ``args`` and ``kwargs``:\n\n >>> def myadd(x, a, b=1):\n ... return x + a + b\n >>> res = ds.apply(myadd, args=(2,), b=1.5) # doctest: +SKIP\n\n By default, dask tries to infer the output metadata by running your\n provided function on some fake data. This works well in many cases, but\n can sometimes be expensive, or even fail. To avoid this, you can\n manually specify the output metadata with the ``meta`` keyword. This\n can be specified in many forms, for more information see\n ``dask.dataframe.utils.make_meta``.\n\n Here we specify the output is a Series with name ``'x'``, and dtype\n ``float64``:\n\n >>> res = ds.apply(myadd, args=(2,), b=1.5, meta=('x', 'f8'))\n\n In the case where the metadata doesn't change, you can also pass in\n the object itself directly:\n\n >>> res = ds.apply(lambda x: x + 1, meta=ds)\n\n See Also\n --------\n dask.Series.map_partitions\n \"\"\"\n if meta is no_default:\n meta = _emulate(\n M.apply,\n self._meta_nonempty,\n func,\n convert_dtype=convert_dtype,\n args=args,\n udf=True,\n **kwds,\n )\n warnings.warn(meta_warning(meta))\n\n return map_partitions(\n M.apply, self, func, convert_dtype, args, meta=meta, **kwds\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.cov_Series.corr.return.cov_corr_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.cov_Series.corr.return.cov_corr_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3221, "end_line": 3241, "span_ids": ["Series.corr", "Series.cov"], "tokens": 200}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Series(_Frame):\n\n @derived_from(pd.Series)\n def cov(self, other, min_periods=None, split_every=False):\n from .multi import concat\n\n if not isinstance(other, Series):\n raise TypeError(\"other must be a dask.dataframe.Series\")\n df = concat([self, other], axis=1)\n return cov_corr(df, min_periods, scalar=True, split_every=split_every)\n\n @derived_from(pd.Series)\n def corr(self, other, method=\"pearson\", min_periods=None, split_every=False):\n from .multi import concat\n\n if not isinstance(other, Series):\n raise TypeError(\"other must be a dask.dataframe.Series\")\n if method != \"pearson\":\n raise NotImplementedError(\"Only Pearson correlation has been implemented\")\n df = concat([self, other], axis=1)\n return cov_corr(\n df, min_periods, corr=True, scalar=True, split_every=split_every\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.autocorr_Series.__rdivmod__.return.res1_res2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.autocorr_Series.__rdivmod__.return.res1_res2", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3243, "end_line": 3264, "span_ids": ["Series.__divmod__", "Series.__rdivmod__", "Series.autocorr", "Series.memory_usage"], "tokens": 190}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Series(_Frame):\n\n @derived_from(pd.Series)\n def autocorr(self, lag=1, split_every=False):\n if not isinstance(lag, Integral):\n raise TypeError(\"lag must be an integer\")\n return self.corr(self if lag == 0 else self.shift(lag), split_every=split_every)\n\n @derived_from(pd.Series)\n def memory_usage(self, index=True, deep=False):\n result = self.map_partitions(\n M.memory_usage, index=index, deep=deep, enforce_metadata=False\n )\n return delayed(sum)(result.to_delayed())\n\n def __divmod__(self, other):\n res1 = self // other\n res2 = self % other\n return res1, res2\n\n def __rdivmod__(self, other):\n res1 = other // self\n res2 = other % self\n return res1, res2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Index_Index.__array_wrap__.return.pd_Index_array_name_self": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Index_Index.__array_wrap__.return.pd_Index_array_name_self", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3282, "end_line": 3344, "span_ids": ["Index.index", "Index.__getattr__", "Index.__dir__", "Index.__array_wrap__", "Index"], "tokens": 362}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Index(Series):\n\n _partition_type = pd.Index\n _is_partition_type = staticmethod(is_index_like)\n _token_prefix = \"index-\"\n _accessors = set()\n\n _dt_attributes = {\n \"nanosecond\",\n \"microsecond\",\n \"millisecond\",\n \"dayofyear\",\n \"minute\",\n \"hour\",\n \"day\",\n \"dayofweek\",\n \"second\",\n \"week\",\n \"weekday\",\n \"weekofyear\",\n \"month\",\n \"quarter\",\n \"year\",\n }\n\n _cat_attributes = {\n \"known\",\n \"as_known\",\n \"as_unknown\",\n \"add_categories\",\n \"categories\",\n \"remove_categories\",\n \"reorder_categories\",\n \"as_ordered\",\n \"codes\",\n \"remove_unused_categories\",\n \"set_categories\",\n \"as_unordered\",\n \"ordered\",\n \"rename_categories\",\n }\n\n def __getattr__(self, key):\n if is_categorical_dtype(self.dtype) and key in self._cat_attributes:\n return getattr(self.cat, key)\n elif key in self._dt_attributes:\n return getattr(self.dt, key)\n raise AttributeError(\"'Index' object has no attribute %r\" % key)\n\n def __dir__(self):\n out = super().__dir__()\n out.extend(self._dt_attributes)\n if is_categorical_dtype(self.dtype):\n out.extend(self._cat_attributes)\n return out\n\n @property\n def index(self):\n msg = \"'{0}' object has no attribute 'index'\"\n raise AttributeError(msg.format(self.__class__.__name__))\n\n def __array_wrap__(self, array, context=None):\n return pd.Index(array, name=self.name)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Index.head_Index.head.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Index.head_Index.head.return.result", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3331, "end_line": 3344, "span_ids": ["Index.head"], "tokens": 136}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Index(Series):\n\n def head(self, n=5, compute=True):\n \"\"\"First n items of the Index.\n\n Caveat, this only checks the first partition.\n \"\"\"\n name = \"head-%d-%s\" % (n, self._name)\n dsk = {(name, 0): (operator.getitem, (self._name, 0), slice(0, n))}\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])\n\n result = new_dd_object(graph, name, self._meta, self.divisions[:2])\n\n if compute:\n result = result.compute()\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Index.max_Index.count.return.self_reduction_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Index.max_Index.count.return.self_reduction_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3346, "end_line": 3371, "span_ids": ["Index.max", "Index.min", "Index.count"], "tokens": 156}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Index(Series):\n\n @derived_from(pd.Index)\n def max(self, split_every=False):\n return self.reduction(\n M.max,\n meta=self._meta_nonempty.max(),\n token=self._token_prefix + \"max\",\n split_every=split_every,\n )\n\n @derived_from(pd.Index)\n def min(self, split_every=False):\n return self.reduction(\n M.min,\n meta=self._meta_nonempty.min(),\n token=self._token_prefix + \"min\",\n split_every=split_every,\n )\n\n def count(self, split_every=False):\n return self.reduction(\n methods.index_count,\n np.sum,\n token=\"index-count\",\n meta=int,\n split_every=split_every,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Index.shift_Index.shift.return.maybe_shift_divisions_out": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Index.shift_Index.shift.return.maybe_shift_divisions_out", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3373, "end_line": 3395, "span_ids": ["Index.shift"], "tokens": 195}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Index(Series):\n\n @derived_from(pd.Index)\n def shift(self, periods=1, freq=None):\n if isinstance(self._meta, pd.PeriodIndex):\n if freq is not None:\n raise ValueError(\"PeriodIndex doesn't accept `freq` argument\")\n meta = self._meta_nonempty.shift(periods)\n out = self.map_partitions(\n M.shift, periods, meta=meta, token=\"shift\", transform_divisions=False\n )\n else:\n # Pandas will raise for other index types that don't implement shift\n meta = self._meta_nonempty.shift(periods, freq=freq)\n out = self.map_partitions(\n M.shift,\n periods,\n token=\"shift\",\n meta=meta,\n freq=freq,\n transform_divisions=False,\n )\n if freq is None:\n freq = meta.freq\n return maybe_shift_divisions(out, periods, freq=freq)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Index.to_series_Index.to_frame.if_PANDAS_VERSION_0_2.else_.if_name_is_not_None_.else_.return.self_map_partitions_M_to_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Index.to_series_Index.to_frame.if_PANDAS_VERSION_0_2.else_.if_name_is_not_None_.else_.return.self_map_partitions_M_to_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3397, "end_line": 3417, "span_ids": ["Index.to_series", "Index.to_frame"], "tokens": 177}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Index(Series):\n\n @derived_from(pd.Index)\n def to_series(self):\n return self.map_partitions(M.to_series, meta=self._meta.to_series())\n\n @derived_from(pd.Index, ua_args=[\"index\"])\n def to_frame(self, index=True, name=None):\n if not index:\n raise NotImplementedError()\n\n if PANDAS_VERSION >= \"0.24.0\":\n return self.map_partitions(\n M.to_frame, index, name, meta=self._meta.to_frame(index, name)\n )\n else:\n if name is not None:\n raise ValueError(\n \"The 'name' keyword was added in pandas 0.24.0. \"\n \"Your version of pandas is '{}'.\".format(PANDAS_VERSION)\n )\n else:\n return self.map_partitions(M.to_frame, meta=self._meta.to_frame())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Index.map_Index.map.return.applied": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Index.map_Index.map.return.applied", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3434, "end_line": 3452, "span_ids": ["Index.map"], "tokens": 167}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Index(Series):\n\n @insert_meta_param_description(pad=12)\n @derived_from(pd.Index)\n def map(self, arg, na_action=None, meta=no_default, is_monotonic=False):\n \"\"\"\n Note that this method clears any known divisions.\n\n If your mapping function is monotonically increasing then use `is_monotonic`\n to apply the maping function to the old divisions and assign the new\n divisions to the output.\n\n \"\"\"\n applied = super().map(arg, na_action=na_action, meta=meta)\n if is_monotonic and self.known_divisions:\n applied.divisions = tuple(\n pd.Series(self.divisions).map(arg, na_action=na_action)\n )\n else:\n applied = applied.clear_divisions()\n return applied", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame_DataFrame.columns_2.self.dask.renamed_dask": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame_DataFrame.columns_2.self.dask.renamed_dask", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3440, "end_line": 3484, "span_ids": ["DataFrame.__array_wrap__", "DataFrame", "DataFrame.columns_2", "DataFrame.columns"], "tokens": 328}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrame(_Frame):\n \"\"\"\n Parallel Pandas DataFrame\n\n Do not use this class directly. Instead use functions like\n ``dd.read_csv``, ``dd.read_parquet``, or ``dd.from_pandas``.\n\n Parameters\n ----------\n dsk: dict\n The dask graph to compute this DataFrame\n name: str\n The key prefix that specifies which keys in the dask comprise this\n particular DataFrame\n meta: pandas.DataFrame\n An empty ``pandas.DataFrame`` with names, dtypes, and index matching\n the expected output.\n divisions: tuple of index values\n Values along which we partition our blocks on the index\n \"\"\"\n\n _partition_type = pd.DataFrame\n _is_partition_type = staticmethod(is_dataframe_like)\n _token_prefix = \"dataframe-\"\n _accessors = set()\n\n def __array_wrap__(self, array, context=None):\n if isinstance(context, tuple) and len(context) > 0:\n if isinstance(context[1][0], np.ndarray) and context[1][0].shape == ():\n index = None\n else:\n index = context[1][0].index\n\n return pd.DataFrame(array, index=index, columns=self.columns)\n\n @property\n def columns(self):\n return self._meta.columns\n\n @columns.setter\n def columns(self, columns):\n renamed = _rename_dask(self, columns)\n self._meta = renamed._meta\n self._name = renamed._name\n self.dask = renamed.dask", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.iloc_DataFrame.iloc.return._iLocIndexer_self_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.iloc_DataFrame.iloc.return._iLocIndexer_self_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3486, "end_line": 3502, "span_ids": ["DataFrame.iloc"], "tokens": 133}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrame(_Frame):\n\n @property\n def iloc(self):\n \"\"\"Purely integer-location based indexing for selection by position.\n\n Only indexing the column positions is supported. Trying to select\n row positions will raise a ValueError.\n\n See :ref:`dataframe.indexing` for more.\n\n Examples\n --------\n >>> df.iloc[:, [2, 0, 1]] # doctest: +SKIP\n \"\"\"\n from .indexing import _iLocIndexer\n\n # For dataframes with unique column names, this will be transformed into a __getitem__ call\n return _iLocIndexer(self)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.__len___DataFrame.empty.raise_NotImplementedError": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.__len___DataFrame.empty.raise_NotImplementedError", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3504, "end_line": 3522, "span_ids": ["DataFrame.__contains__", "DataFrame.__len__", "DataFrame.empty"], "tokens": 138}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrame(_Frame):\n\n def __len__(self):\n try:\n s = self.iloc[:, 0]\n except IndexError:\n return super().__len__()\n else:\n return len(s)\n\n def __contains__(self, key):\n return key in self._meta\n\n @property\n def empty(self):\n raise NotImplementedError(\n \"Checking whether a Dask DataFrame has any rows may be expensive. \"\n \"However, checking the number of columns is fast. \"\n \"Depending on which of these results you need, use either \"\n \"`len(df.index) == 0` or `len(df.columns) == 0`\"\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.__getitem___DataFrame.__getitem__.raise_NotImplementedError": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.__getitem___DataFrame.__getitem__.raise_NotImplementedError", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3539, "end_line": 3590, "span_ids": ["DataFrame.__getitem__"], "tokens": 519}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrame(_Frame):\n\n def __getitem__(self, key):\n name = \"getitem-%s\" % tokenize(self, key)\n if np.isscalar(key) or isinstance(key, (tuple, str)):\n\n if isinstance(self._meta.index, (pd.DatetimeIndex, pd.PeriodIndex)):\n if key not in self._meta.columns:\n return self.loc[key]\n\n # error is raised from pandas\n meta = self._meta[_extract_meta(key)]\n dsk = partitionwise_graph(operator.getitem, name, self, key)\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])\n return new_dd_object(graph, name, meta, self.divisions)\n elif isinstance(key, slice):\n from pandas.api.types import is_float_dtype\n\n is_integer_slice = any(\n isinstance(i, Integral) for i in (key.start, key.step, key.stop)\n )\n # Slicing with integer labels is always iloc based except for a\n # float indexer for some reason\n if is_integer_slice and not is_float_dtype(self.index.dtype):\n # NOTE: this always fails currently, as iloc is mostly\n # unsupported, but we call it anyway here for future-proofing\n # and error-attribution purposes\n return self.iloc[key]\n else:\n return self.loc[key]\n\n if isinstance(key, (np.ndarray, list)) or (\n not is_dask_collection(key) and (is_series_like(key) or is_index_like(key))\n ):\n # error is raised from pandas\n meta = self._meta[_extract_meta(key)]\n\n dsk = partitionwise_graph(operator.getitem, name, self, key)\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])\n return new_dd_object(graph, name, meta, self.divisions)\n if isinstance(key, Series):\n # do not perform dummy calculation, as columns will not be changed.\n #\n if self.divisions != key.divisions:\n from .multi import _maybe_align_partitions\n\n self, key = _maybe_align_partitions([self, key])\n dsk = partitionwise_graph(operator.getitem, name, self, key)\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self, key])\n return new_dd_object(graph, name, self, self.divisions)\n if isinstance(key, DataFrame):\n return self.where(key, np.nan)\n\n raise NotImplementedError(key)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.__setitem___DataFrame.__setitem__.self.divisions.df_divisions": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.__setitem___DataFrame.__setitem__.self.divisions.df_divisions", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3592, "end_line": 3609, "span_ids": ["DataFrame.__setitem__"], "tokens": 188}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrame(_Frame):\n\n def __setitem__(self, key, value):\n if isinstance(key, (tuple, list)) and isinstance(value, DataFrame):\n df = self.assign(**{k: value[c] for k, c in zip(key, value.columns)})\n\n elif isinstance(key, pd.Index) and not isinstance(value, DataFrame):\n key = list(key)\n df = self.assign(**{k: value for k in key})\n elif is_dataframe_like(key) or isinstance(key, DataFrame):\n df = self.where(~key, value)\n elif not isinstance(key, str):\n raise NotImplementedError(f\"Item assignment with {type(key)} not supported\")\n else:\n df = self.assign(**{key: value})\n\n self.dask = df.dask\n self._name = df._name\n self._meta = df._meta\n self.divisions = df.divisions", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.__delitem___DataFrame.ndim.return.2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.__delitem___DataFrame.ndim.return.2", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3589, "end_line": 3627, "span_ids": ["DataFrame.__dir__", "DataFrame.__delitem__", "DataFrame._ipython_key_completions_", "DataFrame.__setattr__", "DataFrame.ndim", "DataFrame.__iter__", "DataFrame.__getattr__"], "tokens": 260}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrame(_Frame):\n\n def __delitem__(self, key):\n result = self.drop([key], axis=1)\n self.dask = result.dask\n self._name = result._name\n self._meta = result._meta\n\n def __setattr__(self, key, value):\n try:\n columns = object.__getattribute__(self, \"_meta\").columns\n except AttributeError:\n columns = ()\n\n if key in columns:\n self[key] = value\n else:\n object.__setattr__(self, key, value)\n\n def __getattr__(self, key):\n if key in self.columns:\n return self[key]\n else:\n raise AttributeError(\"'DataFrame' object has no attribute %r\" % key)\n\n def __dir__(self):\n o = set(dir(type(self)))\n o.update(self.__dict__)\n o.update(c for c in self.columns if (isinstance(c, str) and c.isidentifier()))\n return list(o)\n\n def __iter__(self):\n return iter(self._meta)\n\n def _ipython_key_completions_(self):\n return methods.tolist(self.columns)\n\n @property\n def ndim(self):\n \"\"\" Return dimensionality \"\"\"\n return 2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.shape_DataFrame.shape.return._row_size_col_size_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.shape_DataFrame.shape.return._row_size_col_size_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3629, "end_line": 3646, "span_ids": ["DataFrame.shape"], "tokens": 152}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrame(_Frame):\n\n @property\n def shape(self):\n \"\"\"\n Return a tuple representing the dimensionality of the DataFrame.\n\n The number of rows is a Delayed result. The number of columns\n is a concrete integer.\n\n Examples\n --------\n >>> df.size # doctest: +SKIP\n (Delayed('int-07f06075-5ecc-4d77-817e-63c69a9188a8'), 2)\n \"\"\"\n col_size = len(self.columns)\n if col_size == 0:\n return (self.index.shape[0], 0)\n row_size = delayed(int)(self.size / col_size)\n return (row_size, col_size)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.dtypes_DataFrame.select_dtypes.return.self_list_cs_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.dtypes_DataFrame.select_dtypes.return.self_list_cs_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3648, "end_line": 3664, "span_ids": ["DataFrame.get_ftype_counts", "DataFrame.get_dtype_counts", "DataFrame.select_dtypes", "DataFrame.dtypes"], "tokens": 122}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrame(_Frame):\n\n @property\n def dtypes(self):\n \"\"\" Return data types \"\"\"\n return self._meta.dtypes\n\n @derived_from(pd.DataFrame)\n def get_dtype_counts(self):\n return self._meta.get_dtype_counts()\n\n @derived_from(pd.DataFrame)\n def get_ftype_counts(self):\n return self._meta.get_ftype_counts()\n\n @derived_from(pd.DataFrame)\n def select_dtypes(self, include=None, exclude=None):\n cs = self._meta.select_dtypes(include=include, exclude=exclude).columns\n return self[list(cs)]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.set_index_DataFrame.set_index._Set_the_DataFrame_inde": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.set_index_DataFrame.set_index._Set_the_DataFrame_inde", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3688, "end_line": 3768, "span_ids": ["DataFrame.set_index"], "tokens": 889}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrame(_Frame):\n\n def set_index(\n self,\n other,\n drop=True,\n sorted=False,\n npartitions=None,\n divisions=None,\n inplace=False,\n **kwargs,\n ):\n \"\"\"Set the DataFrame index (row labels) using an existing column.\n\n This realigns the dataset to be sorted by a new column. This can have a\n significant impact on performance, because joins, groupbys, lookups, etc.\n are all much faster on that column. However, this performance increase\n comes with a cost, sorting a parallel dataset requires expensive shuffles.\n Often we ``set_index`` once directly after data ingest and filtering and\n then perform many cheap computations off of the sorted dataset.\n\n This function operates exactly like ``pandas.set_index`` except with\n different performance costs (dask dataframe ``set_index`` is much more expensive).\n Under normal operation this function does an initial pass over the index column\n to compute approximate qunatiles to serve as future divisions. It then passes\n over the data a second time, splitting up each input partition into several\n pieces and sharing those pieces to all of the output partitions now in\n sorted order.\n\n In some cases we can alleviate those costs, for example if your dataset is\n sorted already then we can avoid making many small pieces or if you know\n good values to split the new index column then we can avoid the initial\n pass over the data. For example if your new index is a datetime index and\n your data is already sorted by day then this entire operation can be done\n for free. You can control these options with the following parameters.\n\n Parameters\n ----------\n other: string or Dask Series\n drop: boolean, default True\n Delete column to be used as the new index.\n sorted: bool, optional\n If the index column is already sorted in increasing order.\n Defaults to False\n npartitions: int, None, or 'auto'\n The ideal number of output partitions. If None, use the same as\n the input. If 'auto' then decide by memory use.\n divisions: list, optional\n Known values on which to separate index values of the partitions.\n See https://docs.dask.org/en/latest/dataframe-design.html#partitions\n Defaults to computing this with a single pass over the data. Note\n that if ``sorted=True``, specified divisions are assumed to match\n the existing partitions in the data. If ``sorted=False``, you should\n leave divisions empty and call ``repartition`` after ``set_index``.\n inplace: bool, optional\n Modifying the DataFrame in place is not supported by Dask.\n Defaults to False.\n shuffle: string, 'disk' or 'tasks', optional\n Either ``'disk'`` for single-node operation or ``'tasks'`` for\n distributed operation. Will be inferred by your current scheduler.\n compute: bool, default False\n Whether or not to trigger an immediate computation. Defaults to False.\n Note, that even if you set ``compute=False``, an immediate computation\n will still be triggered if ``divisions`` is ``None``.\n partition_size: int, optional\n Desired size of each partitions in bytes.\n Only used when ``npartition='auto'``\n\n Examples\n --------\n >>> df2 = df.set_index('x') # doctest: +SKIP\n >>> df2 = df.set_index(d.x) # doctest: +SKIP\n >>> df2 = df.set_index(d.timestamp, sorted=True) # doctest: +SKIP\n\n A common case is when we have a datetime column that we know to be\n sorted and is cleanly divided by day. We can set this index for free\n by specifying both that the column is pre-sorted and the particular\n divisions along which is is separated\n\n >>> import pandas as pd\n >>> divisions = pd.date_range('2000', '2010', freq='1D')\n >>> df2 = df.set_index('timestamp', sorted=True, divisions=divisions) # doctest: +SKIP\n \"\"\"\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.set_index.if_inplace__DataFrame.set_index.if_pre_sorted_.else_.return.set_index_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.set_index.if_inplace__DataFrame.set_index.if_pre_sorted_.else_.return.set_index_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3746, "end_line": 3770, "span_ids": ["DataFrame.set_index"], "tokens": 170}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrame(_Frame):\n\n def set_index(\n self,\n other,\n drop=True,\n sorted=False,\n npartitions=None,\n divisions=None,\n inplace=False,\n **kwargs,\n ):\n if inplace:\n raise NotImplementedError(\"The inplace= keyword is not supported\")\n pre_sorted = sorted\n del sorted\n\n if divisions is not None:\n check_divisions(divisions)\n\n if pre_sorted:\n from .shuffle import set_sorted_index\n\n return set_sorted_index(\n self, other, drop=drop, divisions=divisions, **kwargs\n )\n else:\n from .shuffle import set_index\n\n return set_index(\n self,\n other,\n drop=drop,\n npartitions=npartitions,\n divisions=divisions,\n **kwargs,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.pop_DataFrame.categorize.return.categorize_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.pop_DataFrame.categorize.return.categorize_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3772, "end_line": 3816, "span_ids": ["DataFrame.nlargest", "DataFrame.pop", "DataFrame.categorize", "DataFrame.groupby", "DataFrame.nsmallest"], "tokens": 303}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrame(_Frame):\n\n @derived_from(pd.DataFrame)\n def pop(self, item):\n out = self[item]\n del self[item]\n return out\n\n @derived_from(pd.DataFrame)\n def nlargest(self, n=5, columns=None, split_every=None):\n token = \"dataframe-nlargest\"\n return aca(\n self,\n chunk=M.nlargest,\n aggregate=M.nlargest,\n meta=self._meta,\n token=token,\n split_every=split_every,\n n=n,\n columns=columns,\n )\n\n @derived_from(pd.DataFrame)\n def nsmallest(self, n=5, columns=None, split_every=None):\n token = \"dataframe-nsmallest\"\n return aca(\n self,\n chunk=M.nsmallest,\n aggregate=M.nsmallest,\n meta=self._meta,\n token=token,\n split_every=split_every,\n n=n,\n columns=columns,\n )\n\n @derived_from(pd.DataFrame)\n def groupby(self, by=None, **kwargs):\n from dask.dataframe.groupby import DataFrameGroupBy\n\n return DataFrameGroupBy(self, by=by, **kwargs)\n\n @wraps(categorize)\n def categorize(self, columns=None, index=None, split_every=None, **kwargs):\n return categorize(\n self, columns=columns, index=index, split_every=split_every, **kwargs\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.assign_DataFrame.assign.return.elemwise_methods_assign_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.assign_DataFrame.assign.return.elemwise_methods_assign_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3818, "end_line": 3853, "span_ids": ["DataFrame.assign"], "tokens": 279}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrame(_Frame):\n\n @derived_from(pd.DataFrame)\n def assign(self, **kwargs):\n for k, v in kwargs.items():\n if not (\n isinstance(v, Scalar)\n or is_series_like(v)\n or callable(v)\n or pd.api.types.is_scalar(v)\n or is_index_like(v)\n or isinstance(v, Array)\n ):\n raise TypeError(\n \"Column assignment doesn't support type \"\n \"{0}\".format(typename(type(v)))\n )\n if callable(v):\n kwargs[k] = v(self)\n\n if isinstance(v, Array):\n from .io import from_dask_array\n\n if len(v.shape) > 1:\n raise ValueError(\"Array assignment only supports 1-D arrays\")\n if v.npartitions != self.npartitions:\n raise ValueError(\n \"Number of partitions do not match ({0} != {1})\".format(\n v.npartitions, self.npartitions\n )\n )\n kwargs[k] = from_dask_array(v, index=self.index, meta=self._meta)\n\n pairs = list(sum(kwargs.items(), ()))\n\n # Figure out columns of the output\n df2 = self._meta_nonempty.assign(**_extract_meta(kwargs, nonempty=True))\n return elemwise(methods.assign, self, *pairs, meta=df2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.rename_DataFrame.query.return.self_map_partitions_M_que": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.rename_DataFrame.query.return.self_map_partitions_M_que", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3855, "end_line": 3880, "span_ids": ["DataFrame.rename", "DataFrame.query"], "tokens": 192}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrame(_Frame):\n\n @derived_from(pd.DataFrame, ua_args=[\"index\"])\n def rename(self, index=None, columns=None):\n if index is not None:\n raise ValueError(\"Cannot rename index.\")\n\n # *args here is index, columns but columns arg is already used\n return self.map_partitions(M.rename, None, columns=columns)\n\n def query(self, expr, **kwargs):\n \"\"\"Filter dataframe with complex expression\n\n Blocked version of pd.DataFrame.query\n\n This is like the sequential version except that this will also happen\n in many threads. This may conflict with ``numexpr`` which will use\n multiple threads itself. We recommend that you set numexpr to use a\n single thread\n\n import numexpr\n numexpr.set_num_threads(1)\n\n See also\n --------\n pandas.DataFrame.query\n \"\"\"\n return self.map_partitions(M.query, expr, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.eval_DataFrame.clip_upper.return.self_map_partitions_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.eval_DataFrame.clip_upper.return.self_map_partitions_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3882, "end_line": 3917, "span_ids": ["DataFrame.dropna", "DataFrame.clip", "DataFrame.eval", "DataFrame.clip_upper", "DataFrame.clip_lower"], "tokens": 291}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrame(_Frame):\n\n @derived_from(pd.DataFrame)\n def eval(self, expr, inplace=None, **kwargs):\n if inplace is None:\n inplace = False\n if \"=\" in expr and inplace in (True, None):\n raise NotImplementedError(\n \"Inplace eval not supported. Please use inplace=False\"\n )\n meta = self._meta.eval(expr, inplace=inplace, **kwargs)\n return self.map_partitions(M.eval, expr, meta=meta, inplace=inplace, **kwargs)\n\n @derived_from(pd.DataFrame)\n def dropna(self, how=\"any\", subset=None, thresh=None):\n return self.map_partitions(\n M.dropna, how=how, subset=subset, thresh=thresh, enforce_metadata=False\n )\n\n @derived_from(pd.DataFrame)\n def clip(self, lower=None, upper=None, out=None):\n if out is not None:\n raise ValueError(\"'out' must be None\")\n return self.map_partitions(\n M.clip, lower=lower, upper=upper, enforce_metadata=False\n )\n\n @derived_from(pd.DataFrame)\n def clip_lower(self, threshold):\n return self.map_partitions(\n M.clip_lower, threshold=threshold, enforce_metadata=False\n )\n\n @derived_from(pd.DataFrame)\n def clip_upper(self, threshold):\n return self.map_partitions(\n M.clip_upper, threshold=threshold, enforce_metadata=False\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.squeeze_DataFrame.squeeze.if_axis_in_None_1_.elif_axis_not_in_0_1_N.raise_ValueError_No_axis": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.squeeze_DataFrame.squeeze.if_axis_in_None_1_.elif_axis_not_in_0_1_N.raise_ValueError_No_axis", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3919, "end_line": 3933, "span_ids": ["DataFrame.squeeze"], "tokens": 119}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrame(_Frame):\n\n @derived_from(pd.DataFrame)\n def squeeze(self, axis=None):\n if axis in [None, 1]:\n if len(self.columns) == 1:\n return self[self.columns[0]]\n else:\n return self\n\n elif axis == 0:\n raise NotImplementedError(\n \"{0} does not support squeeze along axis 0\".format(type(self))\n )\n\n elif axis not in [0, 1, None]:\n raise ValueError(\"No axis {0} for object type {1}\".format(axis, type(self)))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.to_timestamp_DataFrame.drop.raise_NotImplementedError": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.to_timestamp_DataFrame.drop.raise_NotImplementedError", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3935, "end_line": 3997, "span_ids": ["DataFrame.explode", "DataFrame._get_numeric_data", "DataFrame.to_parquet", "DataFrame.drop", "DataFrame._validate_axis", "DataFrame.to_bag", "DataFrame.to_timestamp", "DataFrame.to_string"], "tokens": 558}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrame(_Frame):\n\n @derived_from(pd.DataFrame)\n def to_timestamp(self, freq=None, how=\"start\", axis=0):\n df = elemwise(M.to_timestamp, self, freq, how, axis)\n df.divisions = tuple(pd.Index(self.divisions).to_timestamp())\n return df\n\n @derived_from(pd.DataFrame, version=\"0.25.0\")\n def explode(self, column):\n meta = self._meta.explode(column)\n return self.map_partitions(M.explode, column, meta=meta, enforce_metadata=False)\n\n def to_bag(self, index=False):\n \"\"\"Convert to a dask Bag of tuples of each row.\n\n Parameters\n ----------\n index : bool, optional\n If True, the index is included as the first element of each tuple.\n Default is False.\n \"\"\"\n from .io import to_bag\n\n return to_bag(self, index)\n\n def to_parquet(self, path, *args, **kwargs):\n \"\"\" See dd.to_parquet docstring for more information \"\"\"\n from .io import to_parquet\n\n return to_parquet(self, path, *args, **kwargs)\n\n @derived_from(pd.DataFrame)\n def to_string(self, max_rows=5):\n # option_context doesn't affect\n return self._repr_data().to_string(max_rows=max_rows, show_dimensions=False)\n\n def _get_numeric_data(self, how=\"any\", subset=None):\n # calculate columns to avoid unnecessary calculation\n numerics = self._meta._get_numeric_data()\n\n if len(numerics.columns) < len(self.columns):\n name = self._token_prefix + \"-get_numeric_data\"\n return self.map_partitions(M._get_numeric_data, meta=numerics, token=name)\n else:\n # use myself if all numerics\n return self\n\n @classmethod\n def _validate_axis(cls, axis=0):\n if axis not in (0, 1, \"index\", \"columns\", None):\n raise ValueError(\"No axis named {0}\".format(axis))\n # convert to numeric axis\n return {None: 0, \"index\": 0, \"columns\": 1}.get(axis, axis)\n\n @derived_from(pd.DataFrame)\n def drop(self, labels=None, axis=0, columns=None, errors=\"raise\"):\n axis = self._validate_axis(axis)\n if (axis == 1) or (columns is not None):\n return self.map_partitions(\n drop_by_shallow_copy, columns or labels, errors=errors\n )\n raise NotImplementedError(\n \"Drop currently only works for axis=1 or when columns is not None\"\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.merge_DataFrame.merge.return.merge_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.merge_DataFrame.merge.return.merge_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3999, "end_line": 4108, "span_ids": ["DataFrame.merge"], "tokens": 1033}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrame(_Frame):\n\n def merge(\n self,\n right,\n how=\"inner\",\n on=None,\n left_on=None,\n right_on=None,\n left_index=False,\n right_index=False,\n suffixes=(\"_x\", \"_y\"),\n indicator=False,\n npartitions=None,\n shuffle=None,\n ):\n \"\"\"Merge the DataFrame with another DataFrame\n\n This will merge the two datasets, either on the indices, a certain column\n in each dataset or the index in one dataset and the column in another.\n\n Parameters\n ----------\n right: dask.dataframe.DataFrame\n how : {'left', 'right', 'outer', 'inner'}, default: 'inner'\n How to handle the operation of the two objects:\n\n - left: use calling frame's index (or column if on is specified)\n - right: use other frame's index\n - outer: form union of calling frame's index (or column if on is\n specified) with other frame's index, and sort it\n lexicographically\n - inner: form intersection of calling frame's index (or column if\n on is specified) with other frame's index, preserving the order\n of the calling's one\n\n on : label or list\n Column or index level names to join on. These must be found in both\n DataFrames. If on is None and not merging on indexes then this\n defaults to the intersection of the columns in both DataFrames.\n left_on : label or list, or array-like\n Column to join on in the left DataFrame. Other than in pandas\n arrays and lists are only support if their length is 1.\n right_on : label or list, or array-like\n Column to join on in the right DataFrame. Other than in pandas\n arrays and lists are only support if their length is 1.\n left_index : boolean, default False\n Use the index from the left DataFrame as the join key.\n right_index : boolean, default False\n Use the index from the right DataFrame as the join key.\n suffixes : 2-length sequence (tuple, list, ...)\n Suffix to apply to overlapping column names in the left and\n right side, respectively\n indicator : boolean or string, default False\n If True, adds a column to output DataFrame called \"_merge\" with\n information on the source of each row. If string, column with\n information on source of each row will be added to output DataFrame,\n and column will be named value of string. Information column is\n Categorical-type and takes on a value of \"left_only\" for observations\n whose merge key only appears in `left` DataFrame, \"right_only\" for\n observations whose merge key only appears in `right` DataFrame,\n and \"both\" if the observation\u2019s merge key is found in both.\n npartitions: int or None, optional\n The ideal number of output partitions. This is only utilised when\n performing a hash_join (merging on columns only). If ``None`` then\n ``npartitions = max(lhs.npartitions, rhs.npartitions)``.\n Default is ``None``.\n shuffle: {'disk', 'tasks'}, optional\n Either ``'disk'`` for single-node operation or ``'tasks'`` for\n distributed operation. Will be inferred by your current scheduler.\n\n Notes\n -----\n\n There are three ways to join dataframes:\n\n 1. Joining on indices. In this case the divisions are\n aligned using the function ``dask.dataframe.multi.align_partitions``.\n Afterwards, each partition is merged with the pandas merge function.\n\n 2. Joining one on index and one on column. In this case the divisions of\n dataframe merged by index (:math:`d_i`) are used to divide the column\n merged dataframe (:math:`d_c`) one using\n ``dask.dataframe.multi.rearrange_by_divisions``. In this case the\n merged dataframe (:math:`d_m`) has the exact same divisions\n as (:math:`d_i`). This can lead to issues if you merge multiple rows from\n (:math:`d_c`) to one row in (:math:`d_i`).\n\n 3. Joining both on columns. In this case a hash join is performed using\n ``dask.dataframe.multi.hash_join``.\n\n \"\"\"\n\n if not is_dataframe_like(right):\n raise ValueError(\"right must be DataFrame\")\n\n from .multi import merge\n\n return merge(\n self,\n right,\n how=how,\n on=on,\n left_on=left_on,\n right_on=right_on,\n left_index=left_index,\n right_index=right_index,\n suffixes=suffixes,\n npartitions=npartitions,\n indicator=indicator,\n shuffle=shuffle,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.join_DataFrame.join.return.merge_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.join_DataFrame.join.return.merge_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4133, "end_line": 4160, "span_ids": ["DataFrame.join"], "tokens": 144}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrame(_Frame):\n\n @derived_from(pd.DataFrame) # doctest: +SKIP\n def join(\n self,\n other,\n on=None,\n how=\"left\",\n lsuffix=\"\",\n rsuffix=\"\",\n npartitions=None,\n shuffle=None,\n ):\n\n if not is_dataframe_like(other):\n raise ValueError(\"other must be DataFrame\")\n\n from .multi import merge\n\n return merge(\n self,\n other,\n how=how,\n left_index=on is None,\n right_index=True,\n left_on=on,\n suffixes=(lsuffix, rsuffix),\n npartitions=npartitions,\n shuffle=shuffle,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.append_DataFrame.items.for_col_idx_label_in_enu.yield_label_self_iloc_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.append_DataFrame.items.for_col_idx_label_in_enu.yield_label_self_iloc_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4162, "end_line": 4191, "span_ids": ["DataFrame.append", "DataFrame.items", "DataFrame.iterrows", "DataFrame.itertuples"], "tokens": 232}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrame(_Frame):\n\n @derived_from(pd.DataFrame)\n def append(self, other, interleave_partitions=False):\n if isinstance(other, Series):\n msg = (\n \"Unable to appending dd.Series to dd.DataFrame.\"\n \"Use pd.Series to append as row.\"\n )\n raise ValueError(msg)\n elif is_series_like(other):\n other = other.to_frame().T\n return super().append(other, interleave_partitions=interleave_partitions)\n\n @derived_from(pd.DataFrame)\n def iterrows(self):\n for i in range(self.npartitions):\n df = self.get_partition(i).compute()\n for row in df.iterrows():\n yield row\n\n @derived_from(pd.DataFrame)\n def itertuples(self, index=True, name=\"Pandas\"):\n for i in range(self.npartitions):\n df = self.get_partition(i).compute()\n for row in df.itertuples(index=index, name=name):\n yield row\n\n @derived_from(pd.DataFrame)\n def items(self):\n for col_idx, label in enumerate(self.columns):\n yield label, self.iloc[:, col_idx]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame._bind_operator_method_DataFrame._bind_comparison_method.setattr_cls_name_derive": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame._bind_operator_method_DataFrame._bind_comparison_method.setattr_cls_name_derive", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4172, "end_line": 4232, "span_ids": ["DataFrame._bind_operator_method", "DataFrame._bind_comparison_method"], "tokens": 470}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrame(_Frame):\n\n @classmethod\n def _bind_operator_method(cls, name, op, original=pd.DataFrame):\n \"\"\" bind operator method like DataFrame.add to this class \"\"\"\n\n # name must be explicitly passed for div method whose name is truediv\n\n def meth(self, other, axis=\"columns\", level=None, fill_value=None):\n if level is not None:\n raise NotImplementedError(\"level must be None\")\n\n axis = self._validate_axis(axis)\n\n if axis in (1, \"columns\"):\n # When axis=1 and other is a series, `other` is transposed\n # and the operator is applied broadcast across rows. This\n # isn't supported with dd.Series.\n if isinstance(other, Series):\n msg = \"Unable to {0} dd.Series with axis=1\".format(name)\n raise ValueError(msg)\n elif is_series_like(other):\n # Special case for pd.Series to avoid unwanted partitioning\n # of other. We pass it in as a kwarg to prevent this.\n meta = _emulate(\n op, self, other=other, axis=axis, fill_value=fill_value\n )\n return map_partitions(\n op,\n self,\n other=other,\n meta=meta,\n axis=axis,\n fill_value=fill_value,\n enforce_metadata=False,\n )\n\n meta = _emulate(op, self, other, axis=axis, fill_value=fill_value)\n return map_partitions(\n op,\n self,\n other,\n meta=meta,\n axis=axis,\n fill_value=fill_value,\n enforce_metadata=False,\n )\n\n meth.__name__ = name\n setattr(cls, name, derived_from(original)(meth))\n\n @classmethod\n def _bind_comparison_method(cls, name, comparison, original=pd.DataFrame):\n \"\"\" bind comparison method like DataFrame.eq to this class \"\"\"\n\n def meth(self, other, axis=\"columns\", level=None):\n if level is not None:\n raise NotImplementedError(\"level must be None\")\n axis = self._validate_axis(axis)\n return elemwise(comparison, self, other, axis=axis)\n\n meth.__name__ = name\n setattr(cls, name, derived_from(original)(meth))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.apply_DataFrame.apply.return.map_partitions_M_apply_s": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.apply_DataFrame.apply.return.map_partitions_M_apply_s", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4255, "end_line": 4353, "span_ids": ["DataFrame.apply"], "tokens": 803}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrame(_Frame):\n\n @insert_meta_param_description(pad=12)\n def apply(\n self,\n func,\n axis=0,\n broadcast=None,\n raw=False,\n reduce=None,\n args=(),\n meta=no_default,\n **kwds,\n ):\n \"\"\"Parallel version of pandas.DataFrame.apply\n\n This mimics the pandas version except for the following:\n\n 1. Only ``axis=1`` is supported (and must be specified explicitly).\n 2. The user should provide output metadata via the `meta` keyword.\n\n Parameters\n ----------\n func : function\n Function to apply to each column/row\n axis : {0 or 'index', 1 or 'columns'}, default 0\n - 0 or 'index': apply function to each column (NOT SUPPORTED)\n - 1 or 'columns': apply function to each row\n $META\n args : tuple\n Positional arguments to pass to function in addition to the array/series\n\n Additional keyword arguments will be passed as keywords to the function\n\n Returns\n -------\n applied : Series or DataFrame\n\n Examples\n --------\n >>> import pandas as pd\n >>> import dask.dataframe as dd\n >>> df = pd.DataFrame({'x': [1, 2, 3, 4, 5],\n ... 'y': [1., 2., 3., 4., 5.]})\n >>> ddf = dd.from_pandas(df, npartitions=2)\n\n Apply a function to row-wise passing in extra arguments in ``args`` and\n ``kwargs``:\n\n >>> def myadd(row, a, b=1):\n ... return row.sum() + a + b\n >>> res = ddf.apply(myadd, axis=1, args=(2,), b=1.5) # doctest: +SKIP\n\n By default, dask tries to infer the output metadata by running your\n provided function on some fake data. This works well in many cases, but\n can sometimes be expensive, or even fail. To avoid this, you can\n manually specify the output metadata with the ``meta`` keyword. This\n can be specified in many forms, for more information see\n ``dask.dataframe.utils.make_meta``.\n\n Here we specify the output is a Series with name ``'x'``, and dtype\n ``float64``:\n\n >>> res = ddf.apply(myadd, axis=1, args=(2,), b=1.5, meta=('x', 'f8'))\n\n In the case where the metadata doesn't change, you can also pass in\n the object itself directly:\n\n >>> res = ddf.apply(lambda row: row + 1, axis=1, meta=ddf)\n\n See Also\n --------\n dask.DataFrame.map_partitions\n \"\"\"\n\n axis = self._validate_axis(axis)\n pandas_kwargs = {\"axis\": axis, \"raw\": raw}\n\n if PANDAS_VERSION >= \"0.23.0\":\n kwds.setdefault(\"result_type\", None)\n\n if not PANDAS_GT_100:\n pandas_kwargs[\"broadcast\"] = broadcast\n pandas_kwargs[\"reduce\"] = None\n\n kwds.update(pandas_kwargs)\n\n if axis == 0:\n msg = (\n \"dd.DataFrame.apply only supports axis=1\\n\"\n \" Try: df.apply(func, axis=1)\"\n )\n raise NotImplementedError(msg)\n\n if meta is no_default:\n meta = _emulate(\n M.apply, self._meta_nonempty, func, args=args, udf=True, **kwds\n )\n warnings.warn(meta_warning(meta))\n\n return map_partitions(M.apply, self, func, args=args, meta=meta, **kwds)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.applymap_DataFrame.corr.return.cov_corr_self_min_period": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.applymap_DataFrame.corr.return.cov_corr_self_min_period", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4333, "end_line": 4379, "span_ids": ["DataFrame.round", "DataFrame.applymap", "DataFrame.mode", "DataFrame.corr", "DataFrame.cov"], "tokens": 381}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrame(_Frame):\n\n @derived_from(pd.DataFrame)\n def applymap(self, func, meta=\"__no_default__\"):\n return elemwise(M.applymap, self, func, meta=meta)\n\n @derived_from(pd.DataFrame)\n def round(self, decimals=0):\n return elemwise(M.round, self, decimals)\n\n @derived_from(pd.DataFrame)\n def mode(self, dropna=True, split_every=False):\n mode_series_list = []\n for col_index in range(len(self.columns)):\n col_series = self.iloc[:, col_index]\n mode_series = Series.mode(\n col_series, dropna=dropna, split_every=split_every\n )\n mode_series.name = col_series.name\n mode_series_list.append(mode_series)\n\n name = \"concat-\" + tokenize(*mode_series_list)\n\n dsk = {\n (name, 0): (\n apply,\n methods.concat,\n [[(df._name, 0) for df in mode_series_list]],\n {\"axis\": 1},\n )\n }\n\n meta = methods.concat([df._meta for df in mode_series_list], axis=1)\n graph = HighLevelGraph.from_collections(\n name, dsk, dependencies=mode_series_list\n )\n ddf = new_dd_object(graph, name, meta, divisions=(None, None))\n\n return ddf\n\n @derived_from(pd.DataFrame)\n def cov(self, min_periods=None, split_every=False):\n return cov_corr(self, min_periods, split_every=split_every)\n\n @derived_from(pd.DataFrame)\n def corr(self, method=\"pearson\", min_periods=None, split_every=False):\n if method != \"pearson\":\n raise NotImplementedError(\"Only Pearson correlation has been implemented\")\n return cov_corr(self, min_periods, True, split_every=split_every)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.info_DataFrame.info.put_lines_buf_lines_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.info_DataFrame.info.put_lines_buf_lines_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4381, "end_line": 4465, "span_ids": ["DataFrame.info"], "tokens": 591}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrame(_Frame):\n\n def info(self, buf=None, verbose=False, memory_usage=False):\n \"\"\"\n Concise summary of a Dask DataFrame.\n \"\"\"\n\n if buf is None:\n import sys\n\n buf = sys.stdout\n\n lines = [str(type(self))]\n\n if len(self.columns) == 0:\n lines.append(\"Index: 0 entries\")\n lines.append(\"Empty %s\" % type(self).__name__)\n put_lines(buf, lines)\n return\n\n # Group and execute the required computations\n computations = {}\n if verbose:\n computations.update({\"index\": self.index, \"count\": self.count()})\n if memory_usage:\n computations.update(\n {\"memory_usage\": self.map_partitions(M.memory_usage, index=True)}\n )\n computations = dict(\n zip(computations.keys(), da.compute(*computations.values()))\n )\n\n if verbose:\n import textwrap\n\n index = computations[\"index\"]\n counts = computations[\"count\"]\n lines.append(index_summary(index))\n lines.append(\"Data columns (total {} columns):\".format(len(self.columns)))\n\n from pandas.io.formats.printing import pprint_thing\n\n space = max([len(pprint_thing(k)) for k in self.columns]) + 1\n column_width = max(space, 7)\n\n header = (\n textwrap.dedent(\n \"\"\"\\\n # {{column:<{column_width}}} Non-Null Count Dtype\n --- {{underl:<{column_width}}} -------------- -----\"\"\"\n )\n .format(column_width=column_width)\n .format(column=\"Column\", underl=\"------\")\n )\n column_template = textwrap.dedent(\n \"\"\"\\\n {{i:^3}} {{name:<{column_width}}} {{count}} non-null {{dtype}}\"\"\".format(\n column_width=column_width\n )\n )\n column_info = [\n column_template.format(\n i=pprint_thing(i),\n name=pprint_thing(name),\n count=pprint_thing(count),\n dtype=pprint_thing(dtype),\n )\n for i, (name, count, dtype) in enumerate(\n zip(self.columns, counts, self.dtypes)\n )\n ]\n lines.extend(header.split(\"\\n\"))\n else:\n column_info = [index_summary(self.columns, name=\"Columns\")]\n\n lines.extend(column_info)\n dtype_counts = [\n \"%s(%d)\" % k\n for k in sorted(self.dtypes.value_counts().iteritems(), key=str)\n ]\n lines.append(\"dtypes: {}\".format(\", \".join(dtype_counts)))\n\n if memory_usage:\n memory_int = computations[\"memory_usage\"].sum()\n lines.append(\"memory usage: {}\\n\".format(memory_repr(memory_int)))\n\n put_lines(buf, lines)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.memory_usage_DataFrame.pivot_table.return.pivot_table_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.memory_usage_DataFrame.pivot_table.return.pivot_table_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4467, "end_line": 4497, "span_ids": ["DataFrame.memory_usage", "DataFrame.pivot_table"], "tokens": 226}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrame(_Frame):\n\n @derived_from(pd.DataFrame)\n def memory_usage(self, index=True, deep=False):\n result = self.map_partitions(M.memory_usage, index=index, deep=deep)\n result = result.groupby(result.index).sum()\n return result\n\n def pivot_table(self, index=None, columns=None, values=None, aggfunc=\"mean\"):\n \"\"\"\n Create a spreadsheet-style pivot table as a DataFrame. Target ``columns``\n must have category dtype to infer result's ``columns``.\n ``index``, ``columns``, ``values`` and ``aggfunc`` must be all scalar.\n\n Parameters\n ----------\n values : scalar\n column to aggregate\n index : scalar\n column to be index\n columns : scalar\n column to be columns\n aggfunc : {'mean', 'sum', 'count'}, default 'mean'\n\n Returns\n -------\n table : DataFrame\n \"\"\"\n from .reshape import pivot_table\n\n return pivot_table(\n self, index=index, columns=columns, values=values, aggfunc=aggfunc\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.melt_DataFrame.melt.return.melt_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.melt_DataFrame.melt.return.melt_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4499, "end_line": 4551, "span_ids": ["DataFrame.melt"], "tokens": 363}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrame(_Frame):\n\n def melt(\n self,\n id_vars=None,\n value_vars=None,\n var_name=None,\n value_name=\"value\",\n col_level=None,\n ):\n \"\"\"\n Unpivots a DataFrame from wide format to long format,\n optionally leaving identifier variables set.\n\n This function is useful to massage a DataFrame into a format where\n one or more columns are identifier variables (``id_vars``), while\n all other columns, considered measured variables (``value_vars``),\n are \"unpivoted\" to the row axis, leaving just two non-identifier\n columns, 'variable' and 'value'.\n\n Parameters\n ----------\n frame : DataFrame\n id_vars : tuple, list, or ndarray, optional\n Column(s) to use as identifier variables.\n value_vars : tuple, list, or ndarray, optional\n Column(s) to unpivot. If not specified, uses all columns that\n are not set as `id_vars`.\n var_name : scalar\n Name to use for the 'variable' column. If None it uses\n ``frame.columns.name`` or 'variable'.\n value_name : scalar, default 'value'\n Name to use for the 'value' column.\n col_level : int or string, optional\n If columns are a MultiIndex then use this level to melt.\n\n Returns\n -------\n DataFrame\n Unpivoted DataFrame.\n\n See Also\n --------\n pandas.DataFrame.melt\n \"\"\"\n from .reshape import melt\n\n return melt(\n self,\n id_vars=id_vars,\n value_vars=value_vars,\n var_name=var_name,\n value_name=value_name,\n col_level=col_level,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.to_records_DataFrame._repr_html_.return.self__HTML_FMT_format_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.to_records_DataFrame._repr_html_.return.self__HTML_FMT_format_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4553, "end_line": 4596, "span_ids": ["DataFrame._repr_data", "DataFrame.to_records", "DataFrame:11", "DataFrame._repr_html_", "DataFrame.to_html"], "tokens": 344}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrame(_Frame):\n\n def to_records(self, index=False, lengths=None):\n from .io import to_records\n\n if lengths is True:\n lengths = tuple(self.map_partitions(len).compute())\n\n records = to_records(self)\n\n chunks = self._validate_chunks(records, lengths)\n records._chunks = (chunks[0],)\n\n return records\n\n @derived_from(pd.DataFrame)\n def to_html(self, max_rows=5):\n # pd.Series doesn't have html repr\n data = self._repr_data().to_html(max_rows=max_rows, show_dimensions=False)\n return self._HTML_FMT.format(\n data=data, name=key_split(self._name), task=len(self.dask)\n )\n\n def _repr_data(self):\n meta = self._meta\n index = self._repr_divisions\n cols = meta.columns\n if len(cols) == 0:\n series_df = pd.DataFrame([[]] * len(index), columns=cols, index=index)\n else:\n series_df = pd.concat(\n [_repr_data_series(s, index=index) for _, s in meta.iteritems()], axis=1\n )\n return series_df\n\n _HTML_FMT = \"\"\"
Dask DataFrame Structure:
\n{data}\n
Dask Name: {name}, {task} tasks
\"\"\"\n\n def _repr_html_(self):\n data = self._repr_data().to_html(\n max_rows=5, show_dimensions=False, notebook=True\n )\n return self._HTML_FMT.format(\n data=data, name=key_split(self._name), task=len(self.dask)\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame._select_columns_or_index_DataFrame._is_column_label_reference.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame._select_columns_or_index_DataFrame._is_column_label_reference.return._", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4598, "end_line": 4642, "span_ids": ["DataFrame._is_column_label_reference", "DataFrame._select_columns_or_index"], "tokens": 282}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrame(_Frame):\n\n def _select_columns_or_index(self, columns_or_index):\n \"\"\"\n Parameters\n ----------\n columns_or_index\n Column or index name, or a list of these\n\n Returns\n -------\n dd.DataFrame\n Dask DataFrame with columns corresponding to each column or\n index level in columns_or_index. If included, the column\n corresponding to the index level is named _index\n \"\"\"\n\n # Ensure columns_or_index is a list\n columns_or_index = (\n columns_or_index\n if isinstance(columns_or_index, list)\n else [columns_or_index]\n )\n\n column_names = [\n n for n in columns_or_index if self._is_column_label_reference(n)\n ]\n\n selected_df = self[column_names]\n if self._contains_index_name(columns_or_index):\n # Index name was included\n selected_df = selected_df.assign(_index=self.index)\n\n return selected_df\n\n def _is_column_label_reference(self, key):\n \"\"\"\n Test whether a key is a column label reference\n\n To be considered a column label reference, `key` must match the name of at\n least one column.\n \"\"\"\n return (\n not is_dask_collection(key)\n and (np.isscalar(key) or isinstance(key, tuple))\n and key in self.columns\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__bind_operators_is_broadcastable.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__bind_operators_is_broadcastable.return._", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4645, "end_line": 4716, "span_ids": ["impl:9", "DataFrame._is_column_label_reference", "is_broadcastable"], "tokens": 380}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# bind operators\nfor op in [\n operator.abs,\n operator.add,\n operator.and_,\n operator.eq,\n operator.gt,\n operator.ge,\n operator.inv,\n operator.lt,\n operator.le,\n operator.mod,\n operator.mul,\n operator.ne,\n operator.neg,\n operator.or_,\n operator.pow,\n operator.sub,\n operator.truediv,\n operator.floordiv,\n operator.xor,\n]:\n _Frame._bind_operator(op)\n Scalar._bind_operator(op)\n\nfor name in [\n \"add\",\n \"sub\",\n \"mul\",\n \"div\",\n \"divide\",\n \"truediv\",\n \"floordiv\",\n \"mod\",\n \"pow\",\n \"radd\",\n \"rsub\",\n \"rmul\",\n \"rdiv\",\n \"rtruediv\",\n \"rfloordiv\",\n \"rmod\",\n \"rpow\",\n]:\n meth = getattr(pd.DataFrame, name)\n DataFrame._bind_operator_method(name, meth)\n\n meth = getattr(pd.Series, name)\n Series._bind_operator_method(name, meth)\n\nfor name in [\"lt\", \"gt\", \"le\", \"ge\", \"ne\", \"eq\"]:\n meth = getattr(pd.DataFrame, name)\n DataFrame._bind_comparison_method(name, meth)\n\n meth = getattr(pd.Series, name)\n Series._bind_comparison_method(name, meth)\n\n\ndef is_broadcastable(dfs, s):\n \"\"\"\n This Series is broadcastable against another dataframe in the sequence\n \"\"\"\n return (\n isinstance(s, Series)\n and s.npartitions == 1\n and s.known_divisions\n and any(\n s.divisions == (df.columns.min(), df.columns.max())\n for df in dfs\n if isinstance(df, DataFrame)\n )\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_elemwise_elemwise.graph.HighLevelGraph_from_colle": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_elemwise_elemwise.graph.HighLevelGraph_from_colle", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4719, "end_line": 4800, "span_ids": ["elemwise"], "tokens": 718}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def elemwise(op, *args, **kwargs):\n \"\"\"Elementwise operation for Dask dataframes\n\n Parameters\n ----------\n op: callable\n Function to apply across input dataframes\n *args: DataFrames, Series, Scalars, Arrays,\n The arguments of the operation\n **kwrags: scalars\n meta: pd.DataFrame, pd.Series (optional)\n Valid metadata for the operation. Will evaluate on a small piece of\n data if not provided.\n transform_divisions: boolean\n If the input is a ``dask.dataframe.Index`` we normally will also apply\n the function onto the divisions and apply those transformed divisions\n to the output. You can pass ``transform_divisions=False`` to override\n this behavior\n\n Examples\n --------\n >>> elemwise(operator.add, df.x, df.y) # doctest: +SKIP\n \"\"\"\n meta = kwargs.pop(\"meta\", no_default)\n out = kwargs.pop(\"out\", None)\n transform_divisions = kwargs.pop(\"transform_divisions\", True)\n\n _name = funcname(op) + \"-\" + tokenize(op, *args, **kwargs)\n\n args = _maybe_from_pandas(args)\n\n from .multi import _maybe_align_partitions\n\n args = _maybe_align_partitions(args)\n dasks = [arg for arg in args if isinstance(arg, (_Frame, Scalar, Array))]\n dfs = [df for df in dasks if isinstance(df, _Frame)]\n\n # Clean up dask arrays if present\n deps = dasks.copy()\n for i, a in enumerate(dasks):\n if not isinstance(a, Array):\n continue\n # Ensure that they have similar-ish chunk structure\n if not all(not a.chunks or len(a.chunks[0]) == df.npartitions for df in dfs):\n msg = (\n \"When combining dask arrays with dataframes they must \"\n \"match chunking exactly. Operation: %s\" % funcname(op)\n )\n raise ValueError(msg)\n # Rechunk to have a single chunk along all other axes\n if a.ndim > 1:\n a = a.rechunk({i + 1: d for i, d in enumerate(a.shape[1:])})\n dasks[i] = a\n\n divisions = dfs[0].divisions\n if transform_divisions and isinstance(dfs[0], Index) and len(dfs) == 1:\n try:\n divisions = op(\n *[pd.Index(arg.divisions) if arg is dfs[0] else arg for arg in args],\n **kwargs,\n )\n if isinstance(divisions, pd.Index):\n divisions = methods.tolist(divisions)\n except Exception:\n pass\n else:\n if not valid_divisions(divisions):\n divisions = [None] * (dfs[0].npartitions + 1)\n\n _is_broadcastable = partial(is_broadcastable, dfs)\n dfs = list(remove(_is_broadcastable, dfs))\n\n other = [\n (i, arg)\n for i, arg in enumerate(args)\n if not isinstance(arg, (_Frame, Scalar, Array))\n ]\n\n # adjust the key length of Scalar\n dsk = partitionwise_graph(op, _name, *args, **kwargs)\n\n graph = HighLevelGraph.from_collections(_name, dsk, dependencies=deps)\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_elemwise.if_meta_is_no_default__elemwise.return.handle_out_out_result_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_elemwise.if_meta_is_no_default__elemwise.return.handle_out_out_result_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4802, "end_line": 4820, "span_ids": ["elemwise"], "tokens": 196}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def elemwise(op, *args, **kwargs):\n # ... other code\n\n if meta is no_default:\n if len(dfs) >= 2 and not all(hasattr(d, \"npartitions\") for d in dasks):\n # should not occur in current funcs\n msg = \"elemwise with 2 or more DataFrames and Scalar is not supported\"\n raise NotImplementedError(msg)\n # For broadcastable series, use no rows.\n parts = [\n d._meta\n if _is_broadcastable(d)\n else np.empty((), dtype=d.dtype)\n if isinstance(d, Array)\n else d._meta_nonempty\n for d in dasks\n ]\n with raise_on_meta_error(funcname(op)):\n meta = partial_by_order(*parts, function=op, other=other)\n\n result = new_dd_object(graph, _name, meta, divisions)\n return handle_out(out, result)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_handle_out_handle_out.if_isinstance_out_Serie.else_.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_handle_out_handle_out.if_isinstance_out_Serie.else_.return.result", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4823, "end_line": 4865, "span_ids": ["handle_out"], "tokens": 321}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def handle_out(out, result):\n \"\"\"Handle out parameters\n\n If out is a dask.DataFrame, dask.Series or dask.Scalar then\n this overwrites the contents of it with the result\n \"\"\"\n if isinstance(out, tuple):\n if len(out) == 1:\n out = out[0]\n elif len(out) > 1:\n raise NotImplementedError(\"The out parameter is not fully supported\")\n else:\n out = None\n\n if out is not None and type(out) != type(result):\n raise TypeError(\n \"Mismatched types between result and out parameter. \"\n \"out=%s, result=%s\" % (str(type(out)), str(type(result)))\n )\n\n if isinstance(out, DataFrame):\n if len(out.columns) != len(result.columns):\n raise ValueError(\n \"Mismatched columns count between result and out parameter. \"\n \"out=%s, result=%s\" % (str(len(out.columns)), str(len(result.columns)))\n )\n\n if isinstance(out, (Series, DataFrame, Scalar)):\n out._meta = result._meta\n out._name = result._name\n out.dask = result.dask\n\n if not isinstance(out, Scalar):\n out.divisions = result.divisions\n elif out is not None:\n msg = (\n \"The out parameter is not fully supported.\"\n \" Received type %s, expected %s \"\n % (typename(type(out)), typename(type(result)))\n )\n raise NotImplementedError(msg)\n else:\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__maybe_from_pandas_split_out_on_cols.return.df_cols_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__maybe_from_pandas_split_out_on_cols.return.df_cols_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4868, "end_line": 4909, "span_ids": ["split_evenly", "split_out_on_index", "hash_shard", "split_out_on_cols", "_maybe_from_pandas"], "tokens": 297}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _maybe_from_pandas(dfs):\n from .io import from_pandas\n\n dfs = [\n from_pandas(df, 1)\n if (is_series_like(df) or is_dataframe_like(df)) and not is_dask_collection(df)\n else df\n for df in dfs\n ]\n return dfs\n\n\ndef hash_shard(\n df, nparts, split_out_setup=None, split_out_setup_kwargs=None, ignore_index=False\n):\n if split_out_setup:\n h = split_out_setup(df, **(split_out_setup_kwargs or {}))\n else:\n h = df\n\n h = hash_object_dispatch(h, index=False)\n if is_series_like(h):\n h = h.values\n np.mod(h, nparts, out=h)\n return group_split_dispatch(df, h, nparts, ignore_index=ignore_index)\n\n\ndef split_evenly(df, k):\n \"\"\" Split dataframe into k roughly equal parts \"\"\"\n divisions = np.linspace(0, len(df), k + 1).astype(int)\n return {i: df.iloc[divisions[i] : divisions[i + 1]] for i in range(k)}\n\n\ndef split_out_on_index(df):\n h = df.index\n if isinstance(h, pd.MultiIndex):\n h = pd.DataFrame([], index=h).reset_index()\n return h\n\n\ndef split_out_on_cols(df, cols=None):\n return df[cols]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_apply_concat_apply_apply_concat_apply.npartitions_2.npartitions_pop_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_apply_concat_apply_apply_concat_apply.npartitions_2.npartitions_pop_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4912, "end_line": 5011, "span_ids": ["apply_concat_apply"], "tokens": 760}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@insert_meta_param_description\ndef apply_concat_apply(\n args,\n chunk=None,\n aggregate=None,\n combine=None,\n meta=no_default,\n token=None,\n chunk_kwargs=None,\n aggregate_kwargs=None,\n combine_kwargs=None,\n split_every=None,\n split_out=None,\n split_out_setup=None,\n split_out_setup_kwargs=None,\n sort=None,\n ignore_index=False,\n **kwargs,\n):\n \"\"\"Apply a function to blocks, then concat, then apply again\n\n Parameters\n ----------\n args :\n Positional arguments for the `chunk` function. All `dask.dataframe`\n objects should be partitioned and indexed equivalently.\n chunk : function [block-per-arg] -> block\n Function to operate on each block of data\n aggregate : function concatenated-block -> block\n Function to operate on the concatenated result of chunk\n combine : function concatenated-block -> block, optional\n Function to operate on intermediate concatenated results of chunk\n in a tree-reduction. If not provided, defaults to aggregate.\n $META\n token : str, optional\n The name to use for the output keys.\n chunk_kwargs : dict, optional\n Keywords for the chunk function only.\n aggregate_kwargs : dict, optional\n Keywords for the aggregate function only.\n combine_kwargs : dict, optional\n Keywords for the combine function only.\n split_every : int, optional\n Group partitions into groups of this size while performing a\n tree-reduction. If set to False, no tree-reduction will be used,\n and all intermediates will be concatenated and passed to ``aggregate``.\n Default is 8.\n split_out : int, optional\n Number of output partitions. Split occurs after first chunk reduction.\n split_out_setup : callable, optional\n If provided, this function is called on each chunk before performing\n the hash-split. It should return a pandas object, where each row\n (excluding the index) is hashed. If not provided, the chunk is hashed\n as is.\n split_out_setup_kwargs : dict, optional\n Keywords for the `split_out_setup` function only.\n sort : bool, default None\n If allowed, sort the keys of the output aggregation.\n ignore_index : bool, default False\n If True, do not preserve index values throughout ACA operations.\n kwargs :\n All remaining keywords will be passed to ``chunk``, ``aggregate``, and\n ``combine``.\n\n Examples\n --------\n >>> def chunk(a_block, b_block):\n ... pass\n\n >>> def agg(df):\n ... pass\n\n >>> apply_concat_apply([a, b], chunk=chunk, aggregate=agg) # doctest: +SKIP\n \"\"\"\n if chunk_kwargs is None:\n chunk_kwargs = dict()\n if aggregate_kwargs is None:\n aggregate_kwargs = dict()\n chunk_kwargs.update(kwargs)\n aggregate_kwargs.update(kwargs)\n\n if combine is None:\n if combine_kwargs:\n raise ValueError(\"`combine_kwargs` provided with no `combine`\")\n combine = aggregate\n combine_kwargs = aggregate_kwargs\n else:\n if combine_kwargs is None:\n combine_kwargs = dict()\n combine_kwargs.update(kwargs)\n\n if not isinstance(args, (tuple, list)):\n args = [args]\n\n dfs = [arg for arg in args if isinstance(arg, _Frame)]\n\n npartitions = set(arg.npartitions for arg in dfs)\n if len(npartitions) > 1:\n raise ValueError(\"All arguments must have same number of partitions\")\n npartitions = npartitions.pop()\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_apply_concat_apply.if_split_every_is_None__apply_concat_apply._Aggregate": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_apply_concat_apply.if_split_every_is_None__apply_concat_apply._Aggregate", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 5035, "end_line": 5121, "span_ids": ["apply_concat_apply"], "tokens": 762}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@insert_meta_param_description\ndef apply_concat_apply(\n args,\n chunk=None,\n aggregate=None,\n combine=None,\n meta=no_default,\n token=None,\n chunk_kwargs=None,\n aggregate_kwargs=None,\n combine_kwargs=None,\n split_every=None,\n split_out=None,\n split_out_setup=None,\n split_out_setup_kwargs=None,\n sort=None,\n ignore_index=False,\n **kwargs,\n):\n # ... other code\n\n if split_every is None:\n split_every = 8\n elif split_every is False:\n split_every = npartitions\n elif split_every < 2 or not isinstance(split_every, Integral):\n raise ValueError(\"split_every must be an integer >= 2\")\n\n token_key = tokenize(\n token or (chunk, aggregate),\n meta,\n args,\n chunk_kwargs,\n aggregate_kwargs,\n combine_kwargs,\n split_every,\n split_out,\n split_out_setup,\n split_out_setup_kwargs,\n )\n\n # Chunk\n a = \"{0}-chunk-{1}\".format(token or funcname(chunk), token_key)\n if len(args) == 1 and isinstance(args[0], _Frame) and not chunk_kwargs:\n dsk = {\n (a, 0, i, 0): (chunk, key) for i, key in enumerate(args[0].__dask_keys__())\n }\n else:\n dsk = {\n (a, 0, i, 0): (\n apply,\n chunk,\n [(x._name, i) if isinstance(x, _Frame) else x for x in args],\n chunk_kwargs,\n )\n for i in range(npartitions)\n }\n\n # Split\n if split_out and split_out > 1:\n split_prefix = \"split-%s\" % token_key\n shard_prefix = \"shard-%s\" % token_key\n for i in range(npartitions):\n dsk[(split_prefix, i)] = (\n hash_shard,\n (a, 0, i, 0),\n split_out,\n split_out_setup,\n split_out_setup_kwargs,\n ignore_index,\n )\n for j in range(split_out):\n dsk[(shard_prefix, 0, i, j)] = (getitem, (split_prefix, i), j)\n a = shard_prefix\n else:\n split_out = 1\n\n # Combine\n b = \"{0}-combine-{1}\".format(token or funcname(combine), token_key)\n k = npartitions\n depth = 0\n while k > split_every:\n for part_i, inds in enumerate(partition_all(split_every, range(k))):\n for j in range(split_out):\n conc = (_concat, [(a, depth, i, j) for i in inds], ignore_index)\n if combine_kwargs:\n dsk[(b, depth + 1, part_i, j)] = (\n apply,\n combine,\n [conc],\n combine_kwargs,\n )\n else:\n dsk[(b, depth + 1, part_i, j)] = (combine, conc)\n k = part_i + 1\n a = b\n depth += 1\n\n if sort is not None:\n if sort and split_out > 1:\n raise NotImplementedError(\n \"Cannot guarantee sorted keys for `split_out>1`.\"\n \" Try using split_out=1, or grouping with sort=False.\"\n )\n aggregate_kwargs = aggregate_kwargs or {}\n aggregate_kwargs[\"sort\"] = sort\n\n # Aggregate\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_apply_concat_apply.for_j_in_range_split_out__apply_concat_apply.return.new_dd_object_graph_b_m": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_apply_concat_apply.for_j_in_range_split_out__apply_concat_apply.return.new_dd_object_graph_b_m", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 5100, "end_line": 5121, "span_ids": ["apply_concat_apply"], "tokens": 319}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@insert_meta_param_description\ndef apply_concat_apply(\n args,\n chunk=None,\n aggregate=None,\n combine=None,\n meta=no_default,\n token=None,\n chunk_kwargs=None,\n aggregate_kwargs=None,\n combine_kwargs=None,\n split_every=None,\n split_out=None,\n split_out_setup=None,\n split_out_setup_kwargs=None,\n sort=None,\n ignore_index=False,\n **kwargs,\n):\n # ... other code\n for j in range(split_out):\n b = \"{0}-agg-{1}\".format(token or funcname(aggregate), token_key)\n conc = (_concat, [(a, depth, i, j) for i in range(k)], ignore_index)\n if aggregate_kwargs:\n dsk[(b, j)] = (apply, aggregate, [conc], aggregate_kwargs)\n else:\n dsk[(b, j)] = (aggregate, conc)\n\n if meta is no_default:\n meta_chunk = _emulate(chunk, *args, udf=True, **chunk_kwargs)\n meta = _emulate(\n aggregate, _concat([meta_chunk], ignore_index), udf=True, **aggregate_kwargs\n )\n meta = make_meta(\n meta, index=(getattr(make_meta(dfs[0]), \"index\", None) if dfs else None)\n )\n\n graph = HighLevelGraph.from_collections(b, dsk, dependencies=dfs)\n\n divisions = [None] * (split_out + 1)\n\n return new_dd_object(graph, b, meta, divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_aca__emulate.with_raise_on_meta_error_.return.func__extract_meta_args_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_aca__emulate.with_raise_on_meta_error_.return.func__extract_meta_args_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 5124, "end_line": 5156, "span_ids": ["_emulate", "impl:26", "_extract_meta"], "tokens": 273}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "aca = apply_concat_apply\n\n\ndef _extract_meta(x, nonempty=False):\n \"\"\"\n Extract internal cache data (``_meta``) from dd.DataFrame / dd.Series\n \"\"\"\n if isinstance(x, (Scalar, _Frame)):\n return x._meta_nonempty if nonempty else x._meta\n elif isinstance(x, list):\n return [_extract_meta(_x, nonempty) for _x in x]\n elif isinstance(x, tuple):\n return tuple([_extract_meta(_x, nonempty) for _x in x])\n elif isinstance(x, dict):\n res = {}\n for k in x:\n res[k] = _extract_meta(x[k], nonempty)\n return res\n elif isinstance(x, Delayed):\n raise ValueError(\n \"Cannot infer dataframe metadata with a `dask.delayed` argument\"\n )\n else:\n return x\n\n\ndef _emulate(func, *args, **kwargs):\n \"\"\"\n Apply a function using args / kwargs. If arguments contain dd.DataFrame /\n dd.Series, using internal cache (``_meta``) for calculation\n \"\"\"\n with raise_on_meta_error(funcname(func), udf=kwargs.pop(\"udf\", False)):\n return func(*_extract_meta(args, True), **_extract_meta(kwargs, True))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_map_partitions_map_partitions.divisions.dfs_0_divisions": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_map_partitions_map_partitions.divisions.dfs_0_divisions", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 5159, "end_line": 5265, "span_ids": ["map_partitions"], "tokens": 791}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@insert_meta_param_description\ndef map_partitions(\n func,\n *args,\n meta=no_default,\n enforce_metadata=True,\n transform_divisions=True,\n **kwargs,\n):\n \"\"\"Apply Python function on each DataFrame partition.\n\n Parameters\n ----------\n func : function\n Function applied to each partition.\n args, kwargs :\n Arguments and keywords to pass to the function. At least one of the\n args should be a Dask.dataframe. Arguments and keywords may contain\n ``Scalar``, ``Delayed`` or regular python objects. DataFrame-like args\n (both dask and pandas) will be repartitioned to align (if necessary)\n before applying the function.\n enforce_metadata : bool\n Whether or not to enforce the structure of the metadata at runtime.\n This will rename and reorder columns for each partition,\n and will raise an error if this doesn't work or types don't match.\n $META\n \"\"\"\n name = kwargs.pop(\"token\", None)\n\n assert callable(func)\n if name is not None:\n token = tokenize(meta, *args, **kwargs)\n else:\n name = funcname(func)\n token = tokenize(func, meta, *args, **kwargs)\n name = \"{0}-{1}\".format(name, token)\n\n from .multi import _maybe_align_partitions\n\n args = _maybe_from_pandas(args)\n args = _maybe_align_partitions(args)\n dfs = [df for df in args if isinstance(df, _Frame)]\n meta_index = getattr(make_meta(dfs[0]), \"index\", None) if dfs else None\n\n if meta is no_default:\n # Use non-normalized kwargs here, as we want the real values (not\n # delayed values)\n meta = _emulate(func, *args, udf=True, **kwargs)\n else:\n meta = make_meta(meta, index=meta_index)\n\n if all(isinstance(arg, Scalar) for arg in args):\n layer = {\n (name, 0): (apply, func, (tuple, [(arg._name, 0) for arg in args]), kwargs)\n }\n graph = HighLevelGraph.from_collections(name, layer, dependencies=args)\n return Scalar(graph, name, meta)\n elif not (has_parallel_type(meta) or is_arraylike(meta) and meta.shape):\n # If `meta` is not a pandas object, the concatenated results will be a\n # different type\n meta = make_meta(_concat([meta]), index=meta_index)\n\n # Ensure meta is empty series\n meta = make_meta(meta)\n\n args2 = []\n dependencies = []\n for arg in args:\n if isinstance(arg, _Frame):\n args2.append(arg)\n dependencies.append(arg)\n continue\n arg = normalize_arg(arg)\n arg2, collections = unpack_collections(arg)\n if collections:\n args2.append(arg2)\n dependencies.extend(collections)\n else:\n args2.append(arg)\n\n kwargs3 = {}\n simple = True\n for k, v in kwargs.items():\n v = normalize_arg(v)\n v, collections = unpack_collections(v)\n dependencies.extend(collections)\n kwargs3[k] = v\n if collections:\n simple = False\n\n if enforce_metadata:\n dsk = partitionwise_graph(\n apply_and_enforce,\n name,\n *args2,\n dependencies=dependencies,\n _func=func,\n _meta=meta,\n **kwargs3,\n )\n else:\n kwargs4 = kwargs if simple else kwargs3\n dsk = partitionwise_graph(\n func, name, *args2, **kwargs4, dependencies=dependencies\n )\n\n divisions = dfs[0].divisions\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_map_partitions.if_transform_divisions_an_map_partitions.return.new_dd_object_graph_name": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_map_partitions.if_transform_divisions_an_map_partitions.return.new_dd_object_graph_name", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 5266, "end_line": 5280, "span_ids": ["map_partitions"], "tokens": 187}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@insert_meta_param_description\ndef map_partitions(\n func,\n *args,\n meta=no_default,\n enforce_metadata=True,\n transform_divisions=True,\n **kwargs,\n):\n # ... other code\n if transform_divisions and isinstance(dfs[0], Index) and len(dfs) == 1:\n try:\n divisions = func(\n *[pd.Index(a.divisions) if a is dfs[0] else a for a in args], **kwargs\n )\n if isinstance(divisions, pd.Index):\n divisions = methods.tolist(divisions)\n except Exception:\n pass\n else:\n if not valid_divisions(divisions):\n divisions = [None] * (dfs[0].npartitions + 1)\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=dependencies)\n return new_dd_object(graph, name, meta, divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_apply_and_enforce_apply_and_enforce.return.df": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_apply_and_enforce_apply_and_enforce.return.df", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 5283, "end_line": 5299, "span_ids": ["apply_and_enforce"], "tokens": 135}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def apply_and_enforce(*args, **kwargs):\n \"\"\"Apply a function, and enforce the output to match meta\n\n Ensures the output has the same columns, even if empty.\"\"\"\n func = kwargs.pop(\"_func\")\n meta = kwargs.pop(\"_meta\")\n df = func(*args, **kwargs)\n if is_dataframe_like(df) or is_series_like(df) or is_index_like(df):\n if not len(df):\n return meta\n if is_dataframe_like(df):\n check_matching_columns(meta, df)\n c = meta.columns\n else:\n c = meta.name\n return _rename(c, df)\n return df", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__rename__rename.return.df": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__rename__rename.return.df", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 5302, "end_line": 5346, "span_ids": ["_rename"], "tokens": 303}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _rename(columns, df):\n \"\"\"\n Rename columns of pd.DataFrame or name of pd.Series.\n Not for dd.DataFrame or dd.Series.\n\n Parameters\n ----------\n columns : tuple, string, pd.DataFrame or pd.Series\n Column names, Series name or pandas instance which has the\n target column names / name.\n df : pd.DataFrame or pd.Series\n target DataFrame / Series to be renamed\n \"\"\"\n assert not isinstance(df, _Frame)\n\n if columns is no_default:\n return df\n\n if isinstance(columns, Iterator):\n columns = list(columns)\n\n if is_dataframe_like(df):\n if is_dataframe_like(columns):\n columns = columns.columns\n if not isinstance(columns, pd.Index):\n columns = pd.Index(columns)\n if (\n len(columns) == len(df.columns)\n and type(columns) is type(df.columns)\n and columns.equals(df.columns)\n ):\n # if target is identical, rename is not necessary\n return df\n # deep=False doesn't doesn't copy any data/indices, so this is cheap\n df = df.copy(deep=False)\n df.columns = columns\n return df\n elif is_series_like(df) or is_index_like(df):\n if is_series_like(columns) or is_index_like(columns):\n columns = columns.name\n if df.name == columns:\n return df\n return df.rename(columns)\n # map_partition may pass other types\n return df", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__rename_dask__rename_dask.return.new_dd_object_graph_name": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__rename_dask__rename_dask.return.new_dd_object_graph_name", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 5349, "end_line": 5371, "span_ids": ["_rename_dask"], "tokens": 184}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _rename_dask(df, names):\n \"\"\"\n Destructively rename columns of dd.DataFrame or name of dd.Series.\n Not for pd.DataFrame or pd.Series.\n\n Internally used to overwrite dd.DataFrame.columns and dd.Series.name\n We can't use map_partition because it applies function then rename\n\n Parameters\n ----------\n df : dd.DataFrame or dd.Series\n target DataFrame / Series to be renamed\n names : tuple, string\n Column names/Series name\n \"\"\"\n\n assert isinstance(df, _Frame)\n metadata = _rename(names, df._meta)\n name = \"rename-{0}\".format(tokenize(df, metadata))\n\n dsk = partitionwise_graph(_rename, name, metadata, df)\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[df])\n return new_dd_object(graph, name, metadata, df.divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_quantile_quantile.df.df_dropna_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_quantile_quantile.df.df_dropna_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 5374, "end_line": 5443, "span_ids": ["quantile"], "tokens": 595}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def quantile(df, q, method=\"default\"):\n \"\"\"Approximate quantiles of Series.\n\n Parameters\n ----------\n q : list/array of floats\n Iterable of numbers ranging from 0 to 100 for the desired quantiles\n method : {'default', 'tdigest', 'dask'}, optional\n What method to use. By default will use dask's internal custom\n algorithm (``'dask'``). If set to ``'tdigest'`` will use tdigest for\n floats and ints and fallback to the ``'dask'`` otherwise.\n \"\"\"\n # current implementation needs q to be sorted so\n # sort if array-like, otherwise leave it alone\n q_ndarray = np.array(q)\n if q_ndarray.ndim > 0:\n q_ndarray.sort(kind=\"mergesort\")\n q = q_ndarray\n\n assert isinstance(df, Series)\n\n allowed_methods = [\"default\", \"dask\", \"tdigest\"]\n if method not in allowed_methods:\n raise ValueError(\"method can only be 'default', 'dask' or 'tdigest'\")\n\n if method == \"default\":\n internal_method = \"dask\"\n else:\n internal_method = method\n\n # currently, only Series has quantile method\n if isinstance(df, Index):\n series_typ = df._meta.to_series()._constructor\n meta = df._meta_nonempty.to_series().quantile(q)\n else:\n if is_series_like(df._meta):\n series_typ = df._meta._constructor\n else:\n series_typ = df._meta._constructor_sliced\n meta = df._meta_nonempty.quantile(q)\n\n if is_series_like(meta):\n # Index.quantile(list-like) must be pd.Series, not pd.Index\n df_name = df.name\n finalize_tsk = lambda tsk: (series_typ, tsk, q, None, df_name)\n return_type = Series\n else:\n finalize_tsk = lambda tsk: (getitem, tsk, 0)\n return_type = Scalar\n q = [q]\n\n # pandas uses quantile in [0, 1]\n # numpy / everyone else uses [0, 100]\n qs = np.asarray(q) * 100\n token = tokenize(df, qs)\n\n if len(qs) == 0:\n name = \"quantiles-\" + token\n empty_index = pd.Index([], dtype=float)\n\n return Series(\n {(name, 0): series_typ([], name=df.name, index=empty_index, dtype=\"float\")},\n name,\n df._meta,\n [None, None],\n )\n else:\n new_divisions = [np.min(q), np.max(q)]\n\n df = df.dropna()\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_quantile.if_internal_method_td_quantile.return.return_type_graph_name2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_quantile.if_internal_method_td_quantile.return.return_type_graph_name2_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 5445, "end_line": 5488, "span_ids": ["quantile"], "tokens": 434}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def quantile(df, q, method=\"default\"):\n # ... other code\n\n if internal_method == \"tdigest\" and (\n np.issubdtype(df.dtype, np.floating) or np.issubdtype(df.dtype, np.integer)\n ):\n\n from dask.utils import import_required\n\n import_required(\n \"crick\", \"crick is a required dependency for using the t-digest method.\"\n )\n\n from dask.array.percentile import _tdigest_chunk, _percentiles_from_tdigest\n\n name = \"quantiles_tdigest-1-\" + token\n val_dsk = {\n (name, i): (_tdigest_chunk, (getattr, key, \"values\"))\n for i, key in enumerate(df.__dask_keys__())\n }\n\n name2 = \"quantiles_tdigest-2-\" + token\n merge_dsk = {\n (name2, 0): finalize_tsk((_percentiles_from_tdigest, qs, sorted(val_dsk)))\n }\n else:\n\n from dask.array.percentile import _percentile, merge_percentiles\n\n # Add 0 and 100 during calculation for more robust behavior (hopefully)\n calc_qs = np.pad(qs, 1, mode=\"constant\")\n calc_qs[-1] = 100\n name = \"quantiles-1-\" + token\n val_dsk = {\n (name, i): (_percentile, (getattr, key, \"values\"), calc_qs)\n for i, key in enumerate(df.__dask_keys__())\n }\n\n name2 = \"quantiles-2-\" + token\n merge_dsk = {\n (name2, 0): finalize_tsk(\n (merge_percentiles, qs, [calc_qs] * df.npartitions, sorted(val_dsk))\n )\n }\n dsk = merge(val_dsk, merge_dsk)\n graph = HighLevelGraph.from_collections(name2, dsk, dependencies=[df])\n return return_type(graph, name2, meta, new_divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_cov_corr_cov_corr.return.DataFrame_graph_name_me": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_cov_corr_cov_corr.return.DataFrame_graph_name_me", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 5491, "end_line": 5563, "span_ids": ["cov_corr"], "tokens": 646}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def cov_corr(df, min_periods=None, corr=False, scalar=False, split_every=False):\n \"\"\"DataFrame covariance and pearson correlation.\n\n Computes pairwise covariance or correlation of columns, excluding NA/null\n values.\n\n Parameters\n ----------\n df : DataFrame\n min_periods : int, optional\n Minimum number of observations required per pair of columns\n to have a valid result.\n corr : bool, optional\n If True, compute the Pearson correlation. If False [default], compute\n the covariance.\n scalar : bool, optional\n If True, compute covariance between two variables as a scalar. Only\n valid if `df` has 2 columns. If False [default], compute the entire\n covariance/correlation matrix.\n split_every : int, optional\n Group partitions into groups of this size while performing a\n tree-reduction. If set to False, no tree-reduction will be used.\n Default is False.\n \"\"\"\n if min_periods is None:\n min_periods = 2\n elif min_periods < 2:\n raise ValueError(\"min_periods must be >= 2\")\n\n if split_every is False:\n split_every = df.npartitions\n elif split_every < 2 or not isinstance(split_every, Integral):\n raise ValueError(\"split_every must be an integer >= 2\")\n\n df = df._get_numeric_data()\n\n if scalar and len(df.columns) != 2:\n raise ValueError(\"scalar only valid for 2 column dataframe\")\n\n token = tokenize(df, min_periods, scalar, split_every)\n\n funcname = \"corr\" if corr else \"cov\"\n a = \"{0}-chunk-{1}\".format(funcname, df._name)\n dsk = {\n (a, i): (cov_corr_chunk, f, corr) for (i, f) in enumerate(df.__dask_keys__())\n }\n\n prefix = \"{0}-combine-{1}-\".format(funcname, df._name)\n k = df.npartitions\n b = a\n depth = 0\n while k > split_every:\n b = prefix + str(depth)\n for part_i, inds in enumerate(partition_all(split_every, range(k))):\n dsk[(b, part_i)] = (cov_corr_combine, [(a, i) for i in inds], corr)\n k = part_i + 1\n a = b\n depth += 1\n\n name = \"{0}-{1}\".format(funcname, token)\n dsk[(name, 0)] = (\n cov_corr_agg,\n [(a, i) for i in range(k)],\n df.columns,\n min_periods,\n corr,\n scalar,\n )\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[df])\n if scalar:\n return Scalar(graph, name, \"f8\")\n meta = make_meta([(c, \"f8\") for c in df.columns], index=df.columns)\n return DataFrame(graph, name, meta, (df.columns[0], df.columns[-1]))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_cov_corr_chunk_cov_corr_chunk.return.out": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_cov_corr_chunk_cov_corr_chunk.return.out", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 5566, "end_line": 5597, "span_ids": ["cov_corr_chunk"], "tokens": 339}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def cov_corr_chunk(df, corr=False):\n \"\"\"Chunk part of a covariance or correlation computation\"\"\"\n shape = (df.shape[1], df.shape[1])\n df = df.astype(\"float64\", copy=False)\n sums = zeros_like_safe(df.values, shape=shape)\n counts = zeros_like_safe(df.values, shape=shape)\n for idx, col in enumerate(df):\n mask = df.iloc[:, idx].notnull()\n sums[idx] = df[mask].sum().values\n counts[idx] = df[mask].count().values\n cov = df.cov().values\n dtype = [(\"sum\", sums.dtype), (\"count\", counts.dtype), (\"cov\", cov.dtype)]\n if corr:\n with warnings.catch_warnings(record=True):\n warnings.simplefilter(\"always\")\n mu = (sums / counts).T\n m = zeros_like_safe(df.values, shape=shape)\n mask = df.isnull().values\n for idx, x in enumerate(df):\n # Avoid using ufunc.outer (not supported by cupy)\n mu_discrepancy = (\n np.subtract(df.iloc[:, idx].values[:, None], mu[idx][None, :]) ** 2\n )\n mu_discrepancy[mask] = np.nan\n m[idx] = np.nansum(mu_discrepancy, axis=0)\n m = m.T\n dtype.append((\"m\", m.dtype))\n\n out = {\"sum\": sums, \"count\": counts, \"cov\": cov * (counts - 1)}\n if corr:\n out[\"m\"] = m\n return out", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_cov_corr_combine_cov_corr_combine.return.out": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_cov_corr_combine_cov_corr_combine.return.out", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 5600, "end_line": 5634, "span_ids": ["cov_corr_combine"], "tokens": 369}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def cov_corr_combine(data_in, corr=False):\n\n data = {\"sum\": None, \"count\": None, \"cov\": None}\n if corr:\n data[\"m\"] = None\n\n for k in data.keys():\n data[k] = [d[k] for d in data_in]\n data[k] = np.concatenate(data[k]).reshape((len(data[k]),) + data[k][0].shape)\n\n sums = np.nan_to_num(data[\"sum\"])\n counts = data[\"count\"]\n\n cum_sums = np.cumsum(sums, 0)\n cum_counts = np.cumsum(counts, 0)\n\n s1 = cum_sums[:-1]\n s2 = sums[1:]\n n1 = cum_counts[:-1]\n n2 = counts[1:]\n with np.errstate(invalid=\"ignore\"):\n d = (s2 / n2) - (s1 / n1)\n C = np.nansum(\n (n1 * n2) / (n1 + n2) * (d * d.transpose((0, 2, 1))), 0\n ) + np.nansum(data[\"cov\"], 0)\n\n out = {\"sum\": cum_sums[-1], \"count\": cum_counts[-1], \"cov\": C}\n\n if corr:\n nobs = np.where(cum_counts[-1], cum_counts[-1], np.nan)\n mu = cum_sums[-1] / nobs\n counts_na = np.where(counts, counts, np.nan)\n m = np.nansum(data[\"m\"] + counts * (sums / counts_na - mu) ** 2, axis=0)\n out[\"m\"] = m\n return out", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_cov_corr_agg_cov_corr_agg.return.pd_DataFrame_mat_columns": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_cov_corr_agg_cov_corr_agg.return.pd_DataFrame_mat_columns", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 5637, "end_line": 5651, "span_ids": ["cov_corr_agg"], "tokens": 148}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def cov_corr_agg(data, cols, min_periods=2, corr=False, scalar=False):\n out = cov_corr_combine(data, corr)\n counts = out[\"count\"]\n C = out[\"cov\"]\n C[counts < min_periods] = np.nan\n if corr:\n m2 = out[\"m\"]\n den = np.sqrt(m2 * m2.T)\n else:\n den = np.where(counts, counts, np.nan) - 1\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n mat = C / den\n if scalar:\n return float(mat[0, 1])\n return pd.DataFrame(mat, columns=cols, index=cols)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_pd_split_pd_split.return._df_iloc_index_i_for_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_pd_split_pd_split.return._df_iloc_index_i_for_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 5654, "end_line": 5680, "span_ids": ["pd_split"], "tokens": 282}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def pd_split(df, p, random_state=None, shuffle=False):\n \"\"\"Split DataFrame into multiple pieces pseudorandomly\n\n >>> df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6],\n ... 'b': [2, 3, 4, 5, 6, 7]})\n\n >>> a, b = pd_split(\n ... df, [0.5, 0.5], random_state=123, shuffle=True\n ... ) # roughly 50/50 split\n >>> a\n a b\n 3 4 5\n 0 1 2\n 5 6 7\n >>> b\n a b\n 1 2 3\n 4 5 6\n 2 3 4\n \"\"\"\n p = list(p)\n if shuffle:\n if not isinstance(random_state, np.random.RandomState):\n random_state = np.random.RandomState(random_state)\n df = df.sample(frac=1.0, random_state=random_state)\n index = pseudorandom(len(df), p, random_state)\n return [df.iloc[index == i] for i in range(len(p))]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__take_last_check_divisions.if_len_divisions_1_.raise_ValueError_msg_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__take_last_check_divisions.if_len_divisions_1_.raise_ValueError_msg_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 5683, "end_line": 5732, "span_ids": ["_take_last", "check_divisions"], "tokens": 355}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _take_last(a, skipna=True):\n \"\"\"\n take last row (Series) of DataFrame / last value of Series\n considering NaN.\n\n Parameters\n ----------\n a : pd.DataFrame or pd.Series\n skipna : bool, default True\n Whether to exclude NaN\n\n \"\"\"\n\n def _last_valid(s):\n for i in range(1, min(10, len(s) + 1)):\n val = s.iloc[-i]\n if not pd.isnull(val):\n return val\n else:\n nonnull = s[s.notna()]\n if not nonnull.empty:\n return nonnull.iloc[-1]\n return None\n\n if skipna is False:\n return a.iloc[-1]\n else:\n # take last valid value excluding NaN, NaN location may be different\n # in each column\n if is_dataframe_like(a):\n # create Series from appropriate backend dataframe library\n series_typ = type(a.iloc[0:1, 0])\n if a.empty:\n return series_typ([], dtype=\"float\")\n return series_typ(\n {col: _last_valid(a[col]) for col in a.columns}, index=a.columns\n )\n else:\n return _last_valid(a)\n\n\ndef check_divisions(divisions):\n if not isinstance(divisions, (list, tuple)):\n raise ValueError(\"New division must be list or tuple\")\n divisions = list(divisions)\n if divisions != sorted(divisions):\n raise ValueError(\"New division must be sorted\")\n if len(divisions[:-1]) != len(list(unique(divisions[:-1]))):\n msg = \"New division must be unique, except for the last element\"\n raise ValueError(msg)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_repartition_divisions_repartition_divisions._left_part_of_new_divisi": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_repartition_divisions_repartition_divisions._left_part_of_new_divisi", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 5735, "end_line": 5808, "span_ids": ["repartition_divisions"], "tokens": 678}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def repartition_divisions(a, b, name, out1, out2, force=False):\n \"\"\"dask graph to repartition dataframe by new divisions\n\n Parameters\n ----------\n a : tuple\n old divisions\n b : tuple, list\n new divisions\n name : str\n name of old dataframe\n out1 : str\n name of temporary splits\n out2 : str\n name of new dataframe\n force : bool, default False\n Allows the expansion of the existing divisions.\n If False then the new divisions lower and upper bounds must be\n the same as the old divisions.\n\n Examples\n --------\n >>> repartition_divisions([1, 3, 7], [1, 4, 6, 7], 'a', 'b', 'c') # doctest: +SKIP\n {('b', 0): (, ('a', 0), 1, 3, False),\n ('b', 1): (, ('a', 1), 3, 4, False),\n ('b', 2): (, ('a', 1), 4, 6, False),\n ('b', 3): (, ('a', 1), 6, 7, False)\n ('c', 0): (,\n (, [('b', 0), ('b', 1)])),\n ('c', 1): ('b', 2),\n ('c', 2): ('b', 3)}\n \"\"\"\n check_divisions(b)\n\n if len(b) < 2:\n # minimum division is 2 elements, like [0, 0]\n raise ValueError(\"New division must be longer than 2 elements\")\n\n if force:\n if a[0] < b[0]:\n msg = (\n \"left side of the new division must be equal or smaller \"\n \"than old division\"\n )\n raise ValueError(msg)\n if a[-1] > b[-1]:\n msg = (\n \"right side of the new division must be equal or larger \"\n \"than old division\"\n )\n raise ValueError(msg)\n else:\n if a[0] != b[0]:\n msg = \"left side of old and new divisions are different\"\n raise ValueError(msg)\n if a[-1] != b[-1]:\n msg = \"right side of old and new divisions are different\"\n raise ValueError(msg)\n\n def _is_single_last_div(x):\n \"\"\"Whether last division only contains single label\"\"\"\n return len(x) >= 2 and x[-1] == x[-2]\n\n c = [a[0]]\n d = dict()\n low = a[0]\n\n i, j = 1, 1 # indices for old/new divisions\n k = 0 # index for temp divisions\n\n last_elem = _is_single_last_div(a)\n\n # process through old division\n # left part of new division can be processed in this loop\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_repartition_divisions.while_i_len_a_and_j__repartition_divisions.return.d": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_repartition_divisions.while_i_len_a_and_j__repartition_divisions.return.d", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 5809, "end_line": 5882, "span_ids": ["repartition_divisions"], "tokens": 795}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def repartition_divisions(a, b, name, out1, out2, force=False):\n # ... other code\n while i < len(a) and j < len(b):\n if a[i] < b[j]:\n # tuple is something like:\n # (methods.boundary_slice, ('from_pandas-#', 0), 3, 4, False))\n d[(out1, k)] = (methods.boundary_slice, (name, i - 1), low, a[i], False)\n low = a[i]\n i += 1\n elif a[i] > b[j]:\n d[(out1, k)] = (methods.boundary_slice, (name, i - 1), low, b[j], False)\n low = b[j]\n j += 1\n else:\n d[(out1, k)] = (methods.boundary_slice, (name, i - 1), low, b[j], False)\n low = b[j]\n if len(a) == i + 1 or a[i] < a[i + 1]:\n j += 1\n i += 1\n c.append(low)\n k += 1\n\n # right part of new division can remain\n if a[-1] < b[-1] or b[-1] == b[-2]:\n for _j in range(j, len(b)):\n # always use right-most of old division\n # because it may contain last element\n m = len(a) - 2\n d[(out1, k)] = (methods.boundary_slice, (name, m), low, b[_j], False)\n low = b[_j]\n c.append(low)\n k += 1\n else:\n # even if new division is processed through,\n # right-most element of old division can remain\n if last_elem and i < len(a):\n d[(out1, k)] = (methods.boundary_slice, (name, i - 1), a[i], a[i], False)\n k += 1\n c.append(a[-1])\n\n # replace last element of tuple with True\n d[(out1, k - 1)] = d[(out1, k - 1)][:-1] + (True,)\n\n i, j = 0, 1\n\n last_elem = _is_single_last_div(c)\n\n while j < len(b):\n tmp = []\n while c[i] < b[j]:\n tmp.append((out1, i))\n i += 1\n while (\n last_elem\n and c[i] == b[-1]\n and (b[-1] != b[-2] or j == len(b) - 1)\n and i < k\n ):\n # append if last split is not included\n tmp.append((out1, i))\n i += 1\n if len(tmp) == 0:\n # dummy slice to return empty DataFrame or Series,\n # which retain original data attributes (columns / name)\n d[(out2, j - 1)] = (methods.boundary_slice, (name, 0), a[0], a[0], False)\n elif len(tmp) == 1:\n d[(out2, j - 1)] = tmp[0]\n else:\n if not tmp:\n raise ValueError(\n \"check for duplicate partitions\\nold:\\n%s\\n\\n\"\n \"new:\\n%s\\n\\ncombined:\\n%s\" % (pformat(a), pformat(b), pformat(c))\n )\n d[(out2, j - 1)] = (methods.concat, tmp)\n j += 1\n return d", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_repartition_freq_repartition_freq.return.df_repartition_divisions_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_repartition_freq_repartition_freq.return.df_repartition_divisions_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 5885, "end_line": 5904, "span_ids": ["repartition_freq"], "tokens": 191}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def repartition_freq(df, freq=None):\n \"\"\" Repartition a timeseries dataframe by a new frequency \"\"\"\n if not isinstance(df.divisions[0], pd.Timestamp):\n raise TypeError(\"Can only repartition on frequency for timeseries\")\n try:\n start = df.divisions[0].ceil(freq)\n except ValueError:\n start = df.divisions[0]\n divisions = methods.tolist(\n pd.date_range(start=start, end=df.divisions[-1], freq=freq)\n )\n if not len(divisions):\n divisions = [df.divisions[0], df.divisions[-1]]\n else:\n if divisions[-1] != df.divisions[-1]:\n divisions.append(df.divisions[-1])\n if divisions[0] != df.divisions[0]:\n divisions = [df.divisions[0]] + divisions\n\n return df.repartition(divisions=divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_repartition_size_total_mem_usage.return.mem_usage": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_repartition_size_total_mem_usage.return.mem_usage", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 5907, "end_line": 5940, "span_ids": ["total_mem_usage", "repartition_size"], "tokens": 341}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def repartition_size(df, size):\n \"\"\"\n Repartition dataframe so that new partitions have approximately `size` memory usage each\n \"\"\"\n if isinstance(size, str):\n size = parse_bytes(size)\n size = int(size)\n\n mem_usages = df.map_partitions(total_mem_usage, deep=True).compute()\n\n # 1. split each partition that is larger than partition_size\n nsplits = 1 + mem_usages // size\n if np.any(nsplits > 1):\n split_name = \"repartition-split-{}-{}\".format(size, tokenize(df))\n df = _split_partitions(df, nsplits, split_name)\n # update mem_usages to account for the split partitions\n split_mem_usages = []\n for n, usage in zip(nsplits, mem_usages):\n split_mem_usages.extend([usage / n] * n)\n mem_usages = pd.Series(split_mem_usages)\n\n # 2. now that all partitions are less than size, concat them up to size\n assert np.all(mem_usages <= size)\n new_npartitions = list(map(len, iter_chunks(mem_usages, size)))\n new_partitions_boundaries = np.cumsum(new_npartitions)\n new_name = \"repartition-{}-{}\".format(size, tokenize(df))\n return _repartition_from_boundaries(df, new_partitions_boundaries, new_name)\n\n\ndef total_mem_usage(df, index=True, deep=False):\n mem_usage = df.memory_usage(index=index, deep=deep)\n if is_series_like(mem_usage):\n mem_usage = mem_usage.sum()\n return mem_usage", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_repartition_npartitions_repartition_npartitions.if_df_npartitions_npar.else_.if_df_known_divisions_and.else_.return._split_partitions_df_nsp": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_repartition_npartitions_repartition_npartitions.if_df_npartitions_npar.else_.if_df_known_divisions_and.else_.return._split_partitions_df_nsp", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 5943, "end_line": 5992, "span_ids": ["repartition_npartitions"], "tokens": 455}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def repartition_npartitions(df, npartitions):\n \"\"\" Repartition dataframe to a smaller number of partitions \"\"\"\n new_name = \"repartition-%d-%s\" % (npartitions, tokenize(df))\n if df.npartitions == npartitions:\n return df\n elif df.npartitions > npartitions:\n npartitions_ratio = df.npartitions / npartitions\n new_partitions_boundaries = [\n int(new_partition_index * npartitions_ratio)\n for new_partition_index in range(npartitions + 1)\n ]\n return _repartition_from_boundaries(df, new_partitions_boundaries, new_name)\n else:\n original_divisions = divisions = pd.Series(df.divisions)\n if df.known_divisions and (\n np.issubdtype(divisions.dtype, np.datetime64)\n or np.issubdtype(divisions.dtype, np.number)\n ):\n if np.issubdtype(divisions.dtype, np.datetime64):\n divisions = divisions.values.astype(\"float64\")\n\n if is_series_like(divisions):\n divisions = divisions.values\n\n n = len(divisions)\n divisions = np.interp(\n x=np.linspace(0, n, npartitions + 1),\n xp=np.linspace(0, n, n),\n fp=divisions,\n )\n if np.issubdtype(original_divisions.dtype, np.datetime64):\n divisions = methods.tolist(\n pd.Series(divisions).astype(original_divisions.dtype)\n )\n elif np.issubdtype(original_divisions.dtype, np.integer):\n divisions = divisions.astype(original_divisions.dtype)\n\n if isinstance(divisions, np.ndarray):\n divisions = divisions.tolist()\n\n divisions = list(divisions)\n divisions[0] = df.divisions[0]\n divisions[-1] = df.divisions[-1]\n\n return df.repartition(divisions=divisions)\n else:\n div, mod = divmod(npartitions, df.npartitions)\n nsplits = [div] * df.npartitions\n nsplits[-1] += mod\n return _split_partitions(df, nsplits, new_name)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__repartition_from_boundaries__repartition_from_boundaries.return.new_dd_object_graph_new_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__repartition_from_boundaries__repartition_from_boundaries.return.new_dd_object_graph_new_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 5995, "end_line": 6009, "span_ids": ["_repartition_from_boundaries"], "tokens": 203}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _repartition_from_boundaries(df, new_partitions_boundaries, new_name):\n if not isinstance(new_partitions_boundaries, list):\n new_partitions_boundaries = list(new_partitions_boundaries)\n if new_partitions_boundaries[0] > 0:\n new_partitions_boundaries.insert(0, 0)\n if new_partitions_boundaries[-1] < df.npartitions:\n new_partitions_boundaries.append(df.npartitions)\n dsk = {}\n for i, (start, end) in enumerate(\n zip(new_partitions_boundaries, new_partitions_boundaries[1:])\n ):\n dsk[new_name, i] = (methods.concat, [(df._name, j) for j in range(start, end)])\n divisions = [df.divisions[i] for i in new_partitions_boundaries]\n graph = HighLevelGraph.from_collections(new_name, dsk, dependencies=[df])\n return new_dd_object(graph, new_name, df._meta, divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__split_partitions__split_partitions.return.new_dd_object_graph_new_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__split_partitions__split_partitions.return.new_dd_object_graph_new_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 6012, "end_line": 6046, "span_ids": ["_split_partitions"], "tokens": 299}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _split_partitions(df, nsplits, new_name):\n \"\"\"Split a Dask dataframe into new partitions\n\n Parameters\n ----------\n df: DataFrame or Series\n nsplits: List[int]\n Number of target dataframes for each partition\n The length of nsplits should be the same as df.npartitions\n new_name: str\n\n See Also\n --------\n repartition_npartitions\n repartition_size\n \"\"\"\n if len(nsplits) != df.npartitions:\n raise ValueError(\"nsplits should have len={}\".format(df.npartitions))\n\n dsk = {}\n split_name = \"split-{}\".format(tokenize(df, nsplits))\n j = 0\n for i, k in enumerate(nsplits):\n if k == 1:\n dsk[new_name, j] = (df._name, i)\n j += 1\n else:\n dsk[split_name, i] = (split_evenly, (df._name, i), k)\n for jj in range(k):\n dsk[new_name, j] = (getitem, (split_name, i), jj)\n j += 1\n\n divisions = [None] * (1 + sum(nsplits))\n graph = HighLevelGraph.from_collections(new_name, dsk, dependencies=[df])\n return new_dd_object(graph, new_name, df._meta, divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_repartition_repartition.raise_ValueError_Data_mu": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_repartition_repartition.raise_ValueError_Data_mu", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 6049, "end_line": 6095, "span_ids": ["repartition"], "tokens": 437}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def repartition(df, divisions=None, force=False):\n \"\"\"Repartition dataframe along new divisions\n\n Dask.DataFrame objects are partitioned along their index. Often when\n multiple dataframes interact we need to align these partitionings. The\n ``repartition`` function constructs a new DataFrame object holding the same\n data but partitioned on different values. It does this by performing a\n sequence of ``loc`` and ``concat`` calls to split and merge the previous\n generation of partitions.\n\n Parameters\n ----------\n\n divisions : list\n List of partitions to be used\n force : bool, default False\n Allows the expansion of the existing divisions.\n If False then the new divisions lower and upper bounds must be\n the same as the old divisions.\n\n Examples\n --------\n\n >>> df = df.repartition([0, 5, 10, 20]) # doctest: +SKIP\n\n Also works on Pandas objects\n\n >>> ddf = dd.repartition(df, [0, 5, 10, 20]) # doctest: +SKIP\n \"\"\"\n\n token = tokenize(df, divisions)\n if isinstance(df, _Frame):\n tmp = \"repartition-split-\" + token\n out = \"repartition-merge-\" + token\n dsk = repartition_divisions(\n df.divisions, divisions, df._name, tmp, out, force=force\n )\n graph = HighLevelGraph.from_collections(out, dsk, dependencies=[df])\n return new_dd_object(graph, out, df._meta, divisions)\n elif is_dataframe_like(df) or is_series_like(df):\n name = \"repartition-dataframe-\" + token\n from .utils import shard_df_on_index\n\n dfs = shard_df_on_index(df, divisions[1:-1])\n dsk = dict(((name, i), df) for i, df in enumerate(dfs))\n return new_dd_object(dsk, name, df, divisions)\n raise ValueError(\"Data must be DataFrame or Series\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__reduction_chunk__reduction_aggregate.return.aca_aggregate_x_kwargs": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__reduction_chunk__reduction_aggregate.return.aca_aggregate_x_kwargs", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 6098, "end_line": 6115, "span_ids": ["_reduction_chunk", "_reduction_aggregate", "_reduction_combine"], "tokens": 170}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _reduction_chunk(x, aca_chunk=None, **kwargs):\n o = aca_chunk(x, **kwargs)\n # Return a dataframe so that the concatenated version is also a dataframe\n return o.to_frame().T if is_series_like(o) else o\n\n\ndef _reduction_combine(x, aca_combine=None, **kwargs):\n if isinstance(x, list):\n x = pd.Series(x)\n o = aca_combine(x, **kwargs)\n # Return a dataframe so that the concatenated version is also a dataframe\n return o.to_frame().T if is_series_like(o) else o\n\n\ndef _reduction_aggregate(x, aca_aggregate=None, **kwargs):\n if isinstance(x, list):\n x = pd.Series(x)\n return aca_aggregate(x, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_idxmaxmin_chunk_idxmaxmin_chunk.return.pd_DataFrame_idx_idx": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_idxmaxmin_chunk_idxmaxmin_chunk.return.pd_DataFrame_idx_idx", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 6118, "end_line": 6127, "span_ids": ["idxmaxmin_chunk"], "tokens": 124}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def idxmaxmin_chunk(x, fn=None, skipna=True):\n minmax = \"max\" if fn == \"idxmax\" else \"min\"\n if len(x) > 0:\n idx = getattr(x, fn)(skipna=skipna)\n value = getattr(x, minmax)(skipna=skipna)\n else:\n idx = value = pd.Series([], dtype=\"i8\")\n if is_series_like(idx):\n return pd.DataFrame({\"idx\": idx, \"value\": value})\n return pd.DataFrame({\"idx\": [idx], \"value\": [value]})", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_idxmaxmin_row_idxmaxmin_row.return.pd_DataFrame_idx_idx_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_idxmaxmin_row_idxmaxmin_row.return.pd_DataFrame_idx_idx_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 6130, "end_line": 6138, "span_ids": ["idxmaxmin_row"], "tokens": 113}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def idxmaxmin_row(x, fn=None, skipna=True):\n minmax = \"max\" if fn == \"idxmax\" else \"min\"\n if len(x) > 0:\n x = x.set_index(\"idx\")\n idx = [getattr(x.value, fn)(skipna=skipna)]\n value = [getattr(x.value, minmax)(skipna=skipna)]\n else:\n idx = value = pd.Series([], dtype=\"i8\")\n return pd.DataFrame({\"idx\": idx, \"value\": value})", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_idxmaxmin_combine_safe_head.return.r": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_idxmaxmin_combine_safe_head.return.r", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 6141, "end_line": 6186, "span_ids": ["idxmaxmin_combine", "_count_aggregate", "safe_head", "idxmaxmin_agg", "_mode_aggregate"], "tokens": 320}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def idxmaxmin_combine(x, fn=None, skipna=True):\n if len(x) == 0:\n return x\n return (\n x.groupby(level=0)\n .apply(idxmaxmin_row, fn=fn, skipna=skipna)\n .reset_index(level=1, drop=True)\n )\n\n\ndef idxmaxmin_agg(x, fn=None, skipna=True, scalar=False):\n res = idxmaxmin_combine(x, fn, skipna=skipna)[\"idx\"]\n if len(res) == 0:\n raise ValueError(\"attempt to get argmax of an empty sequence\")\n if scalar:\n return res[0]\n res.name = None\n return res\n\n\ndef _mode_aggregate(df, dropna):\n value_count_series = df.sum()\n max_val = value_count_series.max(skipna=dropna)\n mode_series = (\n value_count_series[value_count_series == max_val]\n .index.to_series()\n .sort_values()\n .reset_index(drop=True)\n )\n return mode_series\n\n\ndef _count_aggregate(x):\n return x.sum().astype(\"int64\")\n\n\ndef safe_head(df, n):\n r = M.head(df, n)\n if len(r) != n:\n msg = (\n \"Insufficient elements for `head`. {0} elements \"\n \"requested, only {1} elements available. Try passing larger \"\n \"`npartitions` to `head`.\"\n )\n warnings.warn(msg.format(n, len(r)))\n return r", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_maybe_shift_divisions_maybe_shift_divisions.return.df": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_maybe_shift_divisions_maybe_shift_divisions.return.df", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 6189, "end_line": 6222, "span_ids": ["maybe_shift_divisions"], "tokens": 323}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def maybe_shift_divisions(df, periods, freq):\n \"\"\"Maybe shift divisions by periods of size freq\n\n Used to shift the divisions for the `shift` method. If freq isn't a fixed\n size (not anchored or relative), then the divisions are shifted\n appropriately. Otherwise the divisions are cleared.\n\n Parameters\n ----------\n df : dd.DataFrame, dd.Series, or dd.Index\n periods : int\n The number of periods to shift.\n freq : DateOffset, timedelta, or time rule string\n The frequency to shift by.\n \"\"\"\n if isinstance(freq, str):\n freq = pd.tseries.frequencies.to_offset(freq)\n\n is_offset = isinstance(freq, pd.DateOffset)\n if is_offset:\n if PANDAS_GT_100:\n is_anchored = freq.is_anchored()\n else:\n is_anchored = freq.isAnchored()\n if is_anchored or not hasattr(freq, \"delta\"):\n # Can't infer divisions on relative or anchored offsets, as\n # divisions may now split identical index value.\n # (e.g. index_partitions = [[1, 2, 3], [3, 4, 5]])\n return df.clear_divisions()\n if df.known_divisions:\n divs = pd.Series(range(len(df.divisions)), index=df.divisions)\n divisions = divs.shift(periods, freq=freq).index\n return type(df)(df.dask, df._name, df._meta, divisions)\n return df", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_to_datetime_has_parallel_type.return.isinstance_x_parallel_ty": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_to_datetime_has_parallel_type.return.isinstance_x_parallel_ty", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 6225, "end_line": 6303, "span_ids": ["to_timedelta", "get_parallel_type_series", "impl:29", "get_parallel_type_dataframe", "get_parallel_type_object", "get_parallel_type_index", "parallel_types", "_repr_data_series", "has_parallel_type", "get_parallel_type_frame", "impl:28", "to_datetime"], "tokens": 479}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@wraps(pd.to_datetime)\ndef to_datetime(arg, meta=None, **kwargs):\n if meta is None:\n if isinstance(arg, Index):\n meta = pd.DatetimeIndex([])\n meta.name = arg.name\n else:\n meta = pd.Series([pd.Timestamp(\"2000\")])\n meta.index = meta.index.astype(arg.index.dtype)\n meta.index.name = arg.index.name\n return map_partitions(pd.to_datetime, arg, meta=meta, **kwargs)\n\n\n@wraps(pd.to_timedelta)\ndef to_timedelta(arg, unit=\"ns\", errors=\"raise\"):\n meta = pd.Series([pd.Timedelta(1, unit=unit)])\n return map_partitions(pd.to_timedelta, arg, unit=unit, errors=errors, meta=meta)\n\n\nif hasattr(pd, \"isna\"):\n\n @wraps(pd.isna)\n def isna(arg):\n return map_partitions(pd.isna, arg)\n\n\ndef _repr_data_series(s, index):\n \"\"\"A helper for creating the ``_repr_data`` property\"\"\"\n npartitions = len(index) - 1\n if is_categorical_dtype(s):\n if has_known_categories(s):\n dtype = \"category[known]\"\n else:\n dtype = \"category[unknown]\"\n else:\n dtype = str(s.dtype)\n return pd.Series([dtype] + [\"...\"] * npartitions, index=index, name=s.name)\n\n\nget_parallel_type = Dispatch(\"get_parallel_type\")\n\n\n@get_parallel_type.register(pd.Series)\ndef get_parallel_type_series(_):\n return Series\n\n\n@get_parallel_type.register(pd.DataFrame)\ndef get_parallel_type_dataframe(_):\n return DataFrame\n\n\n@get_parallel_type.register(pd.Index)\ndef get_parallel_type_index(_):\n return Index\n\n\n@get_parallel_type.register(object)\ndef get_parallel_type_object(o):\n return Scalar\n\n\n@get_parallel_type.register(_Frame)\ndef get_parallel_type_frame(o):\n return get_parallel_type(o._meta)\n\n\ndef parallel_types():\n return tuple(\n k\n for k, v in get_parallel_type._lookup.items()\n if v is not get_parallel_type_object\n )\n\n\ndef has_parallel_type(x):\n \"\"\" Does this object have a dask dataframe equivalent? \"\"\"\n get_parallel_type(x) # trigger lazy registration\n return isinstance(x, parallel_types())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_new_dd_object_new_dd_object.if_has_parallel_type_meta.else_.return.get_parallel_type_meta_d": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_new_dd_object_new_dd_object.if_has_parallel_type_meta.else_.return.get_parallel_type_meta_d", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 6306, "end_line": 6330, "span_ids": ["new_dd_object"], "tokens": 259}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def new_dd_object(dsk, name, meta, divisions):\n \"\"\"Generic constructor for dask.dataframe objects.\n\n Decides the appropriate output class based on the type of `meta` provided.\n \"\"\"\n if has_parallel_type(meta):\n return get_parallel_type(meta)(dsk, name, meta, divisions)\n elif is_arraylike(meta) and meta.shape:\n import dask.array as da\n\n chunks = ((np.nan,) * (len(divisions) - 1),) + tuple(\n (d,) for d in meta.shape[1:]\n )\n if len(chunks) > 1:\n layer = dsk.layers[name]\n if isinstance(layer, Blockwise):\n layer.new_axes[\"j\"] = chunks[1][0]\n layer.output_indices = layer.output_indices + (\"j\",)\n else:\n suffix = (0,) * (len(chunks) - 1)\n for i in range(len(chunks[0])):\n layer[(name, i) + suffix] = layer.pop((name, i))\n return da.Array(dsk, name=name, chunks=chunks, dtype=meta.dtype)\n else:\n return get_parallel_type(meta)(dsk, name, meta, divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_partitionwise_graph_partitionwise_graph.return.blockwise_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_partitionwise_graph_partitionwise_graph.return.blockwise_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 6333, "end_line": 6391, "span_ids": ["partitionwise_graph"], "tokens": 481}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def partitionwise_graph(func, name, *args, **kwargs):\n \"\"\"\n Apply a function partition-wise across arguments to create layer of a graph\n\n This applies a function, ``func``, in an embarrassingly parallel fashion\n across partitions/chunks in the provided arguments. It handles Dataframes,\n Arrays, and scalars smoothly, and relies on the ``blockwise`` machinery\n to provide a nicely symbolic graph.\n\n It is most commonly used in other graph-building functions to create the\n appropriate layer of the resulting dataframe.\n\n Parameters\n ----------\n func: callable\n name: str\n descriptive name for the operation\n *args:\n **kwargs:\n\n Returns\n -------\n out: Blockwise graph\n\n Examples\n --------\n >>> subgraph = partitionwise_graph(function, x, y, z=123) # doctest: +SKIP\n >>> layer = partitionwise_graph(function, df, x, z=123) # doctest: +SKIP\n >>> graph = HighLevelGraph.from_collections(name, layer, dependencies=[df, x]) # doctest: +SKIP\n >>> result = new_dd_object(graph, name, metadata, df.divisions) # doctest: +SKIP\n\n See Also\n --------\n map_partitions\n \"\"\"\n pairs = []\n numblocks = {}\n for arg in args:\n if isinstance(arg, _Frame):\n pairs.extend([arg._name, \"i\"])\n numblocks[arg._name] = (arg.npartitions,)\n elif isinstance(arg, Scalar):\n pairs.extend([arg._name, \"i\"])\n numblocks[arg._name] = (1,)\n elif isinstance(arg, Array):\n if arg.ndim == 1:\n pairs.extend([arg.name, \"i\"])\n elif arg.ndim == 0:\n pairs.extend([arg.name, \"\"])\n elif arg.ndim == 2:\n pairs.extend([arg.name, \"ij\"])\n else:\n raise ValueError(\"Can't add multi-dimensional array to dataframes\")\n numblocks[arg._name] = arg.numblocks\n else:\n pairs.extend([arg, None])\n return blockwise(\n func, name, \"i\", *pairs, numblocks=numblocks, concatenate=True, **kwargs\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_meta_warning_meta_warning.return.msg": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_meta_warning_meta_warning.return.msg", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 6394, "end_line": 6418, "span_ids": ["meta_warning"], "tokens": 221}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def meta_warning(df):\n \"\"\"\n Provide an informative message when the user is asked to provide metadata\n \"\"\"\n if is_dataframe_like(df):\n meta_str = {k: str(v) for k, v in df.dtypes.to_dict().items()}\n elif is_series_like(df):\n meta_str = (df.name, str(df.dtype))\n else:\n meta_str = None\n msg = (\n \"\\nYou did not provide metadata, so Dask is running your \"\n \"function on a small dataset to guess output types. \"\n \"It is possible that Dask will guess incorrectly.\\n\"\n \"To provide an explicit output types or to silence this message, \"\n \"please provide the `meta=` keyword, as described in the map or \"\n \"apply function that you are using.\"\n )\n if meta_str:\n msg += (\n \"\\n\"\n \" Before: .apply(func)\\n\"\n \" After: .apply(func, meta=%s)\\n\" % str(meta_str)\n )\n return msg", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_prefix_reduction_prefix_reduction.return.new_dd_object_graph_name": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_prefix_reduction_prefix_reduction.return.new_dd_object_graph_name", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 6421, "end_line": 6480, "span_ids": ["prefix_reduction"], "tokens": 608}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def prefix_reduction(f, ddf, identity, **kwargs):\n \"\"\"Computes the prefix sums of f on df\n\n If df has partitions [P1, P2, ..., Pn], then returns the DataFrame with\n partitions [f(identity, P1),\n f(f(identity, P1), P2),\n f(f(f(identity, P1), P2), P3),\n ...]\n\n Parameters\n ----------\n f : callable\n an associative function f\n ddf : dd.DataFrame\n identity : pd.DataFrame\n an identity element of f, that is f(identity, df) = f(df, identity) = df\n \"\"\"\n dsk = dict()\n name = \"prefix_reduction-\" + tokenize(f, ddf, identity, **kwargs)\n meta = ddf._meta\n n = len(ddf.divisions) - 1\n divisions = [None] * (n + 1)\n\n N = 1\n while N < n:\n N *= 2\n for i in range(n):\n dsk[(name, i, 1, 0)] = (apply, f, [(ddf._name, i), identity], kwargs)\n for i in range(n, N):\n dsk[(name, i, 1, 0)] = identity\n\n d = 1\n while d < N:\n for i in range(0, N, 2 * d):\n dsk[(name, i + 2 * d - 1, 2 * d, 0)] = (\n apply,\n f,\n [(name, i + d - 1, d, 0), (name, i + 2 * d - 1, d, 0)],\n kwargs,\n )\n d *= 2\n\n dsk[(name, N - 1, N, 1)] = identity\n\n while d > 1:\n d //= 2\n for i in range(0, N, 2 * d):\n dsk[(name, i + d - 1, d, 1)] = (name, i + 2 * d - 1, 2 * d, 1)\n dsk[(name, i + 2 * d - 1, d, 1)] = (\n apply,\n f,\n [(name, i + 2 * d - 1, 2 * d, 1), (name, i + d - 1, d, 0)],\n kwargs,\n )\n\n for i in range(n):\n dsk[(name, i)] = (apply, f, [(name, i, 1, 1), identity], kwargs)\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[ddf])\n return new_dd_object(graph, name, meta, divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_suffix_reduction_suffix_reduction.return.new_dd_object_graph_name": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_suffix_reduction_suffix_reduction.return.new_dd_object_graph_name", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 6483, "end_line": 6544, "span_ids": ["suffix_reduction"], "tokens": 636}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def suffix_reduction(f, ddf, identity, **kwargs):\n \"\"\"Computes the suffix sums of f on df\n\n If df has partitions [P1, P2, ..., Pn], then returns the DataFrame with\n partitions [f(P1, f(P2, ...f(Pn, identity)...)),\n f(P2, ...f(Pn, identity)...),\n ...f(Pn, identity)...,\n ...]\n\n Parameters\n ----------\n f : callable\n an associative function f\n ddf : dd.DataFrame\n identity : pd.DataFrame\n an identity element of f, that is f(identity, df) = f(df, identity) = df\n kwargs : ??\n keyword arguments of f ??\n \"\"\"\n dsk = dict()\n name = \"suffix_reduction-\" + tokenize(f, ddf, identity, **kwargs)\n meta = ddf._meta\n n = len(ddf.divisions) - 1\n divisions = [None] * (n + 1)\n\n N = 1\n while N < n:\n N *= 2\n for i in range(n):\n dsk[(name, i, 1, 0)] = (apply, f, [(ddf._name, n - 1 - i), identity], kwargs)\n for i in range(n, N):\n dsk[(name, i, 1, 0)] = identity\n\n d = 1\n while d < N:\n for i in range(0, N, 2 * d):\n dsk[(name, i + 2 * d - 1, 2 * d, 0)] = (\n apply,\n f,\n [(name, i + 2 * d - 1, d, 0), (name, i + d - 1, d, 0)],\n kwargs,\n )\n d *= 2\n\n dsk[(name, N - 1, N, 1)] = identity\n\n while d > 1:\n d //= 2\n for i in range(0, N, 2 * d):\n dsk[(name, i + d - 1, d, 1)] = (name, i + 2 * d - 1, 2 * d, 1)\n dsk[(name, i + 2 * d - 1, d, 1)] = (\n apply,\n f,\n [(name, i + d - 1, d, 0), (name, i + 2 * d - 1, 2 * d, 1)],\n kwargs,\n )\n\n for i in range(n):\n dsk[(name, i)] = (apply, f, [(name, n - 1 - i, 1, 1), identity], kwargs)\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[ddf])\n return new_dd_object(graph, name, meta, divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_mapseries_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_mapseries_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 6547, "end_line": 6614, "span_ids": ["mapseries", "mapseries_combine", "series_map"], "tokens": 588}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def mapseries(base_chunk, concat_map):\n return base_chunk.map(concat_map)\n\n\ndef mapseries_combine(index, concat_result):\n final_series = concat_result.sort_index()\n final_series = index.to_series().map(final_series)\n return final_series\n\n\ndef series_map(base_series, map_series):\n npartitions = base_series.npartitions\n split_out = map_series.npartitions\n\n dsk = {}\n\n base_token_key = tokenize(base_series, split_out)\n base_split_prefix = \"base-split-{}\".format(base_token_key)\n base_shard_prefix = \"base-shard-{}\".format(base_token_key)\n for i, key in enumerate(base_series.__dask_keys__()):\n dsk[(base_split_prefix, i)] = (hash_shard, key, split_out)\n for j in range(split_out):\n dsk[(base_shard_prefix, 0, i, j)] = (getitem, (base_split_prefix, i), j)\n\n map_token_key = tokenize(map_series)\n map_split_prefix = \"map-split-{}\".format(map_token_key)\n map_shard_prefix = \"map-shard-{}\".format(map_token_key)\n for i, key in enumerate(map_series.__dask_keys__()):\n dsk[(map_split_prefix, i)] = (\n hash_shard,\n key,\n split_out,\n split_out_on_index,\n None,\n )\n for j in range(split_out):\n dsk[(map_shard_prefix, 0, i, j)] = (getitem, (map_split_prefix, i), j)\n\n token_key = tokenize(base_series, map_series)\n map_prefix = \"map-series-{}\".format(token_key)\n for i in range(npartitions):\n for j in range(split_out):\n dsk[(map_prefix, i, j)] = (\n mapseries,\n (base_shard_prefix, 0, i, j),\n (_concat, [(map_shard_prefix, 0, k, j) for k in range(split_out)]),\n )\n\n final_prefix = \"map-series-combine-{}\".format(token_key)\n for i, key in enumerate(base_series.index.__dask_keys__()):\n dsk[(final_prefix, i)] = (\n mapseries_combine,\n key,\n (_concat, [(map_prefix, i, j) for j in range(split_out)]),\n )\n\n meta = map_series._meta.copy()\n meta.index = base_series._meta.index\n meta = make_meta(meta)\n\n dependencies = [base_series, map_series, base_series.index]\n graph = HighLevelGraph.from_collections(\n final_prefix, dsk, dependencies=dependencies\n )\n divisions = list(base_series.divisions)\n\n return new_dd_object(graph, final_prefix, meta, divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/extensions.py___": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/extensions.py___", "embedding": null, "metadata": {"file_path": "dask/dataframe/extensions.py", "file_name": "extensions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 24, "span_ids": ["docstring"], "tokens": 107}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\"\nSupport for pandas ExtensionArray in dask.dataframe.\n\nSee :ref:`extensionarrays` for more.\n\"\"\"\nfrom ..utils import Dispatch\nfrom ._accessor import (\n register_dataframe_accessor,\n register_index_accessor,\n register_series_accessor,\n)\n\nmake_array_nonempty = Dispatch(\"make_array_nonempty\")\nmake_scalar = Dispatch(\"make_scalar\")\n\n\n__all__ = [\n \"make_array_nonempty\",\n \"make_scalar\",\n \"register_dataframe_accessor\",\n \"register_index_accessor\",\n \"register_series_accessor\",\n]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py_collections__determine_levels.if_isinstance_index_tup.else_.return.0": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py_collections__determine_levels.if_isinstance_index_tup.else_.return.0", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 71, "span_ids": ["imports", "_determine_levels"], "tokens": 545}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import collections\nimport itertools as it\nimport operator\nimport warnings\n\nimport numpy as np\nimport pandas as pd\n\nfrom .core import (\n DataFrame,\n Series,\n aca,\n map_partitions,\n new_dd_object,\n no_default,\n split_out_on_index,\n _extract_meta,\n)\nfrom .methods import drop_columns, concat\nfrom .shuffle import shuffle\nfrom .utils import (\n make_meta,\n insert_meta_param_description,\n raise_on_meta_error,\n is_series_like,\n is_dataframe_like,\n)\nfrom ..base import tokenize\nfrom ..utils import derived_from, M, funcname, itemgetter\nfrom ..highlevelgraph import HighLevelGraph\n\n\n# #############################################\n#\n# GroupBy implementation notes\n#\n# Dask groupby supports reductions, i.e., mean, sum and alike, and apply. The\n# former do not shuffle the data and are efficiently implemented as tree\n# reductions. The latter is implemented by shuffling the underlying partiitons\n# such that all items of a group can be found in the same parititon.\n#\n# The argument to ``.groupby``, the index, can be a ``str``, ``dd.DataFrame``,\n# ``dd.Series``, or a list thereof. In operations on the grouped object, the\n# divisions of the the grouped object and the items of index have to align.\n# Currently, there is no support to shuffle the index values as part of the\n# groupby operation. Therefore, the alignment has to be guaranteed by the\n# caller.\n#\n# To operate on matching partitions, most groupby operations exploit the\n# corresponding support in ``apply_concat_apply``. Specifically, this function\n# operates on matching partitions of frame-like objects passed as varargs.\n#\n# After the initial chunk step, the passed index is implicitly passed along to\n# subsequent operations as the index of the partitions. Groupby operations on\n# the individual partitions can then access the index via the ``levels``\n# parameter of the ``groupby`` function. The correct argument is determined by\n# the ``_determine_levels`` function.\n#\n# To minimize overhead, series in an index that were obtained by getitem on the\n# object to group are not passed as series to the various operations, but as\n# columnn keys. This transformation is implemented as ``_normalize_index``.\n#\n# #############################################\n\n\ndef _determine_levels(index):\n \"\"\"Determine the correct levels argument to groupby.\"\"\"\n if isinstance(index, (tuple, list)) and len(index) > 1:\n return list(range(len(index)))\n else:\n return 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__normalize_index__normalize_index.if_not_isinstance_df_Dat.else_.return.index": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__normalize_index__normalize_index.if_not_isinstance_df_Dat.else_.return.index", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 74, "end_line": 97, "span_ids": ["_normalize_index"], "tokens": 137}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _normalize_index(df, index):\n \"\"\"Replace series with column names in an index wherever possible.\"\"\"\n if not isinstance(df, DataFrame):\n return index\n\n elif isinstance(index, list):\n return [_normalize_index(df, col) for col in index]\n\n elif (\n is_series_like(index)\n and index.name in df.columns\n and index._name == df[index.name]._name\n ):\n return index.name\n\n elif (\n isinstance(index, DataFrame)\n and set(index.columns).issubset(df.columns)\n and index._name == df[index.columns]._name\n ):\n return list(index.columns)\n\n else:\n return index", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__maybe_slice__is_aligned.if_is_series_like_by_or_.else_.return.True": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__maybe_slice__is_aligned.if_is_series_like_by_or_.else_.return.True", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 100, "end_line": 120, "span_ids": ["_is_aligned", "_maybe_slice"], "tokens": 173}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _maybe_slice(grouped, columns):\n \"\"\"\n Slice columns if grouped is pd.DataFrameGroupBy\n \"\"\"\n # FIXME: update with better groupby object detection (i.e.: ngroups, get_group)\n if \"groupby\" in type(grouped).__name__.lower():\n if columns is not None:\n if isinstance(columns, (tuple, list, set, pd.Index)):\n columns = list(columns)\n return grouped[columns]\n return grouped\n\n\ndef _is_aligned(df, by):\n \"\"\"Check if `df` and `by` have aligned indices\"\"\"\n if is_series_like(by) or is_dataframe_like(by):\n return df.index.equals(by.index)\n elif isinstance(by, (list, tuple)):\n return all(_is_aligned(df, i) for i in by)\n else:\n return True", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__groupby_raise_unaligned__groupby_raise_unaligned.return.df_groupby_kwargs_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__groupby_raise_unaligned__groupby_raise_unaligned.return.df_groupby_kwargs_", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 123, "end_line": 160, "span_ids": ["_groupby_raise_unaligned"], "tokens": 412}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _groupby_raise_unaligned(df, **kwargs):\n \"\"\"Groupby, but raise if df and `by` key are unaligned.\n\n Pandas supports grouping by a column that doesn't align with the input\n frame/series/index. However, the reindexing does not seem to be\n threadsafe, and can result in incorrect results. Since grouping by an\n unaligned key is generally a bad idea, we just error loudly in dask.\n\n For more information see pandas GH issue #15244 and Dask GH issue #1876.\"\"\"\n by = kwargs.get(\"by\", None)\n if by is not None and not _is_aligned(df, by):\n msg = (\n \"Grouping by an unaligned index is unsafe and unsupported.\\n\"\n \"This can be caused by filtering only one of the object or\\n\"\n \"grouping key. For example, the following works in pandas,\\n\"\n \"but not in dask:\\n\"\n \"\\n\"\n \"df[df.foo < 0].groupby(df.bar)\\n\"\n \"\\n\"\n \"This can be avoided by either filtering beforehand, or\\n\"\n \"passing in the name of the column instead:\\n\"\n \"\\n\"\n \"df2 = df[df.foo < 0]\\n\"\n \"df2.groupby(df2.bar)\\n\"\n \"# or\\n\"\n \"df[df.foo < 0].groupby('bar')\\n\"\n \"\\n\"\n \"For more information see dask GH issue #1876.\"\n )\n raise ValueError(msg)\n elif by is not None and len(by):\n # since we're coming through apply, `by` will be a tuple.\n # Pandas treats tuples as a single key, and lists as multiple keys\n # We want multiple keys\n if isinstance(by, str):\n by = [by]\n kwargs.update(by=list(by))\n return df.groupby(**kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__groupby_slice_apply__groupby_slice_apply.return.g_apply_func_args_kw": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__groupby_slice_apply__groupby_slice_apply.return.g_apply_func_args_kw", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 163, "end_line": 172, "span_ids": ["_groupby_slice_apply"], "tokens": 121}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _groupby_slice_apply(\n df, grouper, key, func, *args, group_keys=True, dropna=None, **kwargs\n):\n # No need to use raise if unaligned here - this is only called after\n # shuffling, which makes everything aligned already\n dropna = {\"dropna\": dropna} if dropna is not None else {}\n g = df.groupby(grouper, group_keys=group_keys, **dropna)\n if key:\n g = g[key]\n return g.apply(func, *args, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__groupby_slice_transform__groupby_slice_transform.return.g_transform_func_args_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__groupby_slice_transform__groupby_slice_transform.return.g_transform_func_args_", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 175, "end_line": 189, "span_ids": ["_groupby_slice_transform"], "tokens": 152}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _groupby_slice_transform(\n df, grouper, key, func, *args, group_keys=True, dropna=None, **kwargs\n):\n # No need to use raise if unaligned here - this is only called after\n # shuffling, which makes everything aligned already\n dropna = {\"dropna\": dropna} if dropna is not None else {}\n g = df.groupby(grouper, group_keys=group_keys, **dropna)\n if key:\n g = g[key]\n\n # Cannot call transform on an empty dataframe\n if len(df) == 0:\n return g.apply(func, *args, **kwargs)\n\n return g.transform(func, *args, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__groupby_get_group__groupby_get_group.if_get_key_in_grouped_gro.else_.return.df_iloc_0_0_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__groupby_get_group__groupby_get_group.if_get_key_in_grouped_gro.else_.return.df_iloc_0_0_", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 192, "end_line": 207, "span_ids": ["_groupby_get_group"], "tokens": 130}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _groupby_get_group(df, by_key, get_key, columns):\n # SeriesGroupBy may pass df which includes group key\n grouped = _groupby_raise_unaligned(df, by=by_key)\n\n if get_key in grouped.groups:\n if is_dataframe_like(df):\n grouped = grouped[columns]\n return grouped.get_group(get_key)\n\n else:\n # to create empty DataFrame/Series, which has the same\n # dtype as the original\n if is_dataframe_like(df):\n # may be SeriesGroupBy\n df = df[columns]\n return df.iloc[0:0]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py_None_31_Aggregation.__init__.self.__name__.name": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py_None_31_Aggregation.__init__.self.__name__.name", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 210, "end_line": 272, "span_ids": ["Aggregation", "_groupby_get_group", "Aggregation.__init__"], "tokens": 514}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "###############################################################\n# Aggregation\n###############################################################\n\n\nclass Aggregation(object):\n \"\"\"User defined groupby-aggregation.\n\n This class allows users to define their own custom aggregation in terms of\n operations on Pandas dataframes in a map-reduce style. You need to specify\n what operation to do on each chunk of data, how to combine those chunks of\n data together, and then how to finalize the result.\n\n See :ref:`dataframe.groupby.aggregate` for more.\n\n Parameters\n ----------\n name : str\n the name of the aggregation. It should be unique, since intermediate\n result will be identified by this name.\n chunk : callable\n a function that will be called with the grouped column of each\n partition. It can either return a single series or a tuple of series.\n The index has to be equal to the groups.\n agg : callable\n a function that will be called to aggregate the results of each chunk.\n Again the argument(s) will be grouped series. If ``chunk`` returned a\n tuple, ``agg`` will be called with all of them as individual positional\n arguments.\n finalize : callable\n an optional finalizer that will be called with the results from the\n aggregation.\n\n Examples\n --------\n We could implement ``sum`` as follows:\n\n >>> custom_sum = dd.Aggregation(\n ... name='custom_sum',\n ... chunk=lambda s: s.sum(),\n ... agg=lambda s0: s0.sum()\n ... ) # doctest: +SKIP\n >>> df.groupby('g').agg(custom_sum) # doctest: +SKIP\n\n We can implement ``mean`` as follows:\n\n >>> custom_mean = dd.Aggregation(\n ... name='custom_mean',\n ... chunk=lambda s: (s.count(), s.sum()),\n ... agg=lambda count, sum: (count.sum(), sum.sum()),\n ... finalize=lambda count, sum: sum / count,\n ... ) # doctest: +SKIP\n >>> df.groupby('g').agg(custom_mean) # doctest: +SKIP\n\n Though of course, both of these are built-in and so you don't need to\n implement them yourself.\n \"\"\"\n\n def __init__(self, name, chunk, agg, finalize=None):\n self.chunk = chunk\n self.agg = agg\n self.finalize = finalize\n self.__name__ = name", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__groupby_aggregate__apply_chunk.if_is_series_like_df_or_.else_.return.func_g_columns_kwargs": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__groupby_aggregate__apply_chunk.if_is_series_like_df_or_.else_.return.func_g_columns_kwargs", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 275, "end_line": 293, "span_ids": ["_apply_chunk", "_groupby_aggregate"], "tokens": 191}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _groupby_aggregate(\n df, aggfunc=None, levels=None, dropna=None, sort=False, **kwargs\n):\n dropna = {\"dropna\": dropna} if dropna is not None else {}\n return aggfunc(df.groupby(level=levels, sort=sort, **dropna), **kwargs)\n\n\ndef _apply_chunk(df, *index, dropna=None, **kwargs):\n func = kwargs.pop(\"chunk\")\n columns = kwargs.pop(\"columns\")\n dropna = {\"dropna\": dropna} if dropna is not None else {}\n g = _groupby_raise_unaligned(df, by=index, **dropna)\n\n if is_series_like(df) or columns is None:\n return func(g, **kwargs)\n else:\n if isinstance(columns, (tuple, list, set, pd.Index)):\n columns = list(columns)\n return func(g[columns], **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__var_chunk__var_combine.return.g_groupby_level_levels_s": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__var_chunk__var_combine.return.g_groupby_level_levels_s", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 296, "end_line": 317, "span_ids": ["_var_chunk", "_var_combine"], "tokens": 164}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _var_chunk(df, *index):\n if is_series_like(df):\n df = df.to_frame()\n\n df = df.copy()\n\n g = _groupby_raise_unaligned(df, by=index)\n x = g.sum()\n\n n = g[x.columns].count().rename(columns=lambda c: (c, \"-count\"))\n\n cols = x.columns\n df[cols] = df[cols] ** 2\n\n g2 = _groupby_raise_unaligned(df, by=index)\n x2 = g2.sum().rename(columns=lambda c: (c, \"-x2\"))\n\n return concat([x, x2, n], axis=1)\n\n\ndef _var_combine(g, levels, sort=False):\n return g.groupby(level=levels, sort=sort).sum()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__var_agg__cov_combine.return.g": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__var_agg__cov_combine.return.g", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 320, "end_line": 340, "span_ids": ["_cov_combine", "_var_agg"], "tokens": 226}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _var_agg(g, levels, ddof, sort=False):\n g = g.groupby(level=levels, sort=sort).sum()\n nc = len(g.columns)\n x = g[g.columns[: nc // 3]]\n # chunks columns are tuples (value, name), so we just keep the value part\n x2 = g[g.columns[nc // 3 : 2 * nc // 3]].rename(columns=lambda c: c[0])\n n = g[g.columns[-nc // 3 :]].rename(columns=lambda c: c[0])\n\n # TODO: replace with _finalize_var?\n result = x2 - x ** 2 / n\n div = n - ddof\n div[div < 0] = 0\n result /= div\n result[(n - ddof) == 0] = np.nan\n assert is_dataframe_like(result)\n result[result < 0] = 0 # avoid rounding errors that take us to zero\n return result\n\n\ndef _cov_combine(g, levels):\n return g", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__cov_finalizer__cov_finalizer.return.pd_Series_vals_index_ind": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__cov_finalizer__cov_finalizer.return.pd_Series_vals_index_ind", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 343, "end_line": 375, "span_ids": ["_cov_finalizer"], "tokens": 373}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _cov_finalizer(df, cols, std=False):\n vals = []\n num_elements = len(list(it.product(cols, repeat=2)))\n num_cols = len(cols)\n vals = list(range(num_elements))\n col_idx_mapping = dict(zip(cols, range(num_cols)))\n for i, j in it.combinations_with_replacement(df[cols].columns, 2):\n x = col_idx_mapping[i]\n y = col_idx_mapping[j]\n idx = x + num_cols * y\n mul_col = \"%s%s\" % (i, j)\n ni = df[\"%s-count\" % i]\n nj = df[\"%s-count\" % j]\n\n n = np.sqrt(ni * nj)\n div = n - 1\n div[div < 0] = 0\n val = (df[mul_col] - df[i] * df[j] / n).values[0] / div.values[0]\n if std:\n ii = \"%s%s\" % (i, i)\n jj = \"%s%s\" % (j, j)\n std_val_i = (df[ii] - (df[i] ** 2) / ni).values[0] / div.values[0]\n std_val_j = (df[jj] - (df[j] ** 2) / nj).values[0] / div.values[0]\n val = val / np.sqrt(std_val_i * std_val_j)\n\n vals[idx] = val\n if i != j:\n idx = num_cols * x + y\n vals[idx] = val\n\n level_1 = cols\n index = pd.MultiIndex.from_product([level_1, level_1])\n return pd.Series(vals, index=index)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__mul_cols__cov_chunk.return._x_mul_n_col_mapping_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__mul_cols__cov_chunk.return._x_mul_n_col_mapping_", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 378, "end_line": 429, "span_ids": ["_mul_cols", "_cov_chunk"], "tokens": 411}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _mul_cols(df, cols):\n \"\"\"Internal function to be used with apply to multiply\n each column in a dataframe by every other column\n\n a b c -> a*a, a*b, b*b, b*c, c*c\n \"\"\"\n _df = type(df)()\n for i, j in it.combinations_with_replacement(cols, 2):\n col = \"%s%s\" % (i, j)\n _df[col] = df[i] * df[j]\n return _df\n\n\ndef _cov_chunk(df, *index):\n \"\"\"Covariance Chunk Logic\n\n Parameters\n ----------\n df : Pandas.DataFrame\n std : bool, optional\n When std=True we are calculating with Correlation\n\n Returns\n -------\n tuple\n Processed X, Multiplied Cols,\n \"\"\"\n if is_series_like(df):\n df = df.to_frame()\n df = df.copy()\n\n # mapping columns to str(numerical) values allows us to easily handle\n # arbitrary column names (numbers, string, empty strings)\n col_mapping = collections.OrderedDict()\n for i, c in enumerate(df.columns):\n col_mapping[c] = str(i)\n df = df.rename(columns=col_mapping)\n cols = df._get_numeric_data().columns\n\n # when grouping by external series don't exclude columns\n is_mask = any(is_series_like(s) for s in index)\n if not is_mask:\n index = [col_mapping[k] for k in index]\n cols = cols.drop(np.array(index))\n\n g = _groupby_raise_unaligned(df, by=index)\n x = g.sum()\n\n level = len(index)\n mul = g.apply(_mul_cols, cols=cols).reset_index(level=level, drop=True)\n n = g[x.columns].count().rename(columns=lambda c: \"{}-count\".format(c))\n return (x, mul, n, col_mapping)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__cov_agg__cov_agg.return.s_result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__cov_agg__cov_agg.return.s_result", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 432, "end_line": 486, "span_ids": ["_cov_agg"], "tokens": 463}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _cov_agg(_t, levels, ddof, std=False, sort=False):\n sums = []\n muls = []\n counts = []\n\n # sometime we get a series back from concat combiner\n t = list(_t)\n\n cols = t[0][0].columns\n for x, mul, n, col_mapping in t:\n sums.append(x)\n muls.append(mul)\n counts.append(n)\n col_mapping = col_mapping\n\n total_sums = concat(sums).groupby(level=levels, sort=sort).sum()\n total_muls = concat(muls).groupby(level=levels, sort=sort).sum()\n total_counts = concat(counts).groupby(level=levels).sum()\n result = (\n concat([total_sums, total_muls, total_counts], axis=1)\n .groupby(level=levels)\n .apply(_cov_finalizer, cols=cols, std=std)\n )\n\n inv_col_mapping = {v: k for k, v in col_mapping.items()}\n idx_vals = result.index.names\n idx_mapping = list()\n\n # when index is None we probably have selected a particular column\n # df.groupby('a')[['b']].cov()\n if len(idx_vals) == 1 and all(n is None for n in idx_vals):\n idx_vals = list(set(inv_col_mapping.keys()) - set(total_sums.columns))\n\n for idx, val in enumerate(idx_vals):\n idx_name = inv_col_mapping.get(val, val)\n idx_mapping.append(idx_name)\n\n if len(result.columns.levels[0]) < len(col_mapping):\n # removing index from col_mapping (produces incorrect multiindexes)\n try:\n col_mapping.pop(idx_name)\n except KeyError:\n # when slicing the col_map will not have the index\n pass\n\n keys = list(col_mapping.keys())\n for level in range(len(result.columns.levels)):\n result.columns.set_levels(keys, level=level, inplace=True)\n\n result.index.set_names(idx_mapping, inplace=True)\n\n # stacking can lead to a sorted index\n s_result = result.stack(dropna=False)\n assert is_dataframe_like(s_result)\n return s_result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py_None_34__nunique_df_chunk.return.grouped": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py_None_34__nunique_df_chunk.return.grouped", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 489, "end_line": 515, "span_ids": ["_cov_agg", "_nunique_df_chunk"], "tokens": 204}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "###############################################################\n# nunique\n###############################################################\n\n\ndef _nunique_df_chunk(df, *index, **kwargs):\n levels = kwargs.pop(\"levels\")\n name = kwargs.pop(\"name\")\n\n g = _groupby_raise_unaligned(df, by=index)\n if len(df) > 0:\n grouped = g[[name]].apply(M.drop_duplicates)\n # we set the index here to force a possibly duplicate index\n # for our reduce step\n if isinstance(levels, list):\n grouped.index = pd.MultiIndex.from_arrays(\n [grouped.index.get_level_values(level=level) for level in levels]\n )\n else:\n grouped.index = grouped.index.get_level_values(level=levels)\n else:\n # Manually create empty version, since groupby-apply for empty frame\n # results in df with no columns\n grouped = g[[name]].nunique()\n grouped = grouped.astype(df.dtypes[grouped.columns].to_dict())\n\n return grouped", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__drop_duplicates_rename__make_agg_id.return._s_s_format_fun": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__drop_duplicates_rename__make_agg_id.return._s_s_format_fun", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 518, "end_line": 566, "span_ids": ["_drop_duplicates_rename", "_nunique_series_chunk", "_nunique_df_aggregate", "_nunique_df_combine", "_make_agg_id"], "tokens": 393}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _drop_duplicates_rename(df):\n # Avoid duplicate index labels in a groupby().apply() context\n # https://github.com/dask/dask/issues/3039\n # https://github.com/pandas-dev/pandas/pull/18882\n names = [None] * df.index.nlevels\n return df.drop_duplicates().rename_axis(names, copy=False)\n\n\ndef _nunique_df_combine(df, levels, sort=False):\n result = df.groupby(level=levels, sort=sort).apply(_drop_duplicates_rename)\n\n if isinstance(levels, list):\n result.index = pd.MultiIndex.from_arrays(\n [result.index.get_level_values(level=level) for level in levels]\n )\n else:\n result.index = result.index.get_level_values(level=levels)\n\n return result\n\n\ndef _nunique_df_aggregate(df, levels, name, sort=False):\n return df.groupby(level=levels, sort=sort)[name].nunique()\n\n\ndef _nunique_series_chunk(df, *index, **_ignored_):\n # convert series to data frame, then hand over to dataframe code path\n assert is_series_like(df)\n\n df = df.to_frame()\n kwargs = dict(name=df.columns[0], levels=_determine_levels(index))\n return _nunique_df_chunk(df, *index, **kwargs)\n\n\n###############################################################\n# Aggregate support\n#\n# Aggregate is implemented as:\n#\n# 1. group-by-aggregate all partitions into intermediate values\n# 2. collect all partitions into a single partition\n# 3. group-by-aggregate the result into intermediate values\n# 4. transform all intermediate values into the result\n#\n# In Step 1 and 3 the dataframe is grouped on the same columns.\n#\n###############################################################\ndef _make_agg_id(func, column):\n return \"{!s}-{!s}-{}\".format(func, column, tokenize(func, column))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__normalize_spec__normalize_spec.return.res": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__normalize_spec__normalize_spec.return.res", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 569, "end_line": 652, "span_ids": ["_normalize_spec"], "tokens": 819}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _normalize_spec(spec, non_group_columns):\n \"\"\"\n Return a list of ``(result_column, func, input_column)`` tuples.\n\n Spec can be\n\n - a function\n - a list of functions\n - a dictionary that maps input-columns to functions\n - a dictionary that maps input-columns to a lists of functions\n - a dictionary that maps input-columns to a dictionaries that map\n output-columns to functions.\n\n The non-group columns are a list of all column names that are not used in\n the groupby operation.\n\n Usually, the result columns are mutli-level names, returned as tuples.\n If only a single function is supplied or dictionary mapping columns\n to single functions, simple names are returned as strings (see the first\n two examples below).\n\n Examples\n --------\n >>> _normalize_spec('mean', ['a', 'b', 'c'])\n [('a', 'mean', 'a'), ('b', 'mean', 'b'), ('c', 'mean', 'c')]\n\n >>> spec = collections.OrderedDict([('a', 'mean'), ('b', 'count')])\n >>> _normalize_spec(spec, ['a', 'b', 'c'])\n [('a', 'mean', 'a'), ('b', 'count', 'b')]\n\n >>> _normalize_spec(['var', 'mean'], ['a', 'b', 'c'])\n ... # doctest: +NORMALIZE_WHITESPACE\n [(('a', 'var'), 'var', 'a'), (('a', 'mean'), 'mean', 'a'), \\\n (('b', 'var'), 'var', 'b'), (('b', 'mean'), 'mean', 'b'), \\\n (('c', 'var'), 'var', 'c'), (('c', 'mean'), 'mean', 'c')]\n\n >>> spec = collections.OrderedDict([('a', 'mean'), ('b', ['sum', 'count'])])\n >>> _normalize_spec(spec, ['a', 'b', 'c'])\n ... # doctest: +NORMALIZE_WHITESPACE\n [(('a', 'mean'), 'mean', 'a'), (('b', 'sum'), 'sum', 'b'), \\\n (('b', 'count'), 'count', 'b')]\n\n >>> spec = collections.OrderedDict()\n >>> spec['a'] = ['mean', 'size']\n >>> spec['b'] = collections.OrderedDict([('e', 'count'), ('f', 'var')])\n >>> _normalize_spec(spec, ['a', 'b', 'c'])\n ... # doctest: +NORMALIZE_WHITESPACE\n [(('a', 'mean'), 'mean', 'a'), (('a', 'size'), 'size', 'a'), \\\n (('b', 'e'), 'count', 'b'), (('b', 'f'), 'var', 'b')]\n \"\"\"\n if not isinstance(spec, dict):\n spec = collections.OrderedDict(zip(non_group_columns, it.repeat(spec)))\n\n res = []\n\n if isinstance(spec, dict):\n for input_column, subspec in spec.items():\n if isinstance(subspec, dict):\n res.extend(\n ((input_column, result_column), func, input_column)\n for result_column, func in subspec.items()\n )\n\n else:\n if not isinstance(subspec, list):\n subspec = [subspec]\n\n res.extend(\n ((input_column, funcname(func)), func, input_column)\n for func in subspec\n )\n\n else:\n raise ValueError(\"unsupported agg spec of type {}\".format(type(spec)))\n\n compounds = (list, tuple, dict)\n use_flat_columns = not any(\n isinstance(subspec, compounds) for subspec in spec.values()\n )\n\n if use_flat_columns:\n res = [(input_col, func, input_col) for (_, func, input_col) in res]\n\n return res", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__build_agg_args__build_agg_args.return.chunks_aggs_finalizers": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__build_agg_args__build_agg_args.return.chunks_aggs_finalizers", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 655, "end_line": 711, "span_ids": ["_build_agg_args"], "tokens": 460}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _build_agg_args(spec):\n \"\"\"\n Create transformation functions for a normalized aggregate spec.\n\n Parameters\n ----------\n spec: a list of (result-column, aggregation-function, input-column) triples.\n To work with all argument forms understood by pandas use\n ``_normalize_spec`` to normalize the argment before passing it on to\n ``_build_agg_args``.\n\n Returns\n -------\n chunk_funcs: a list of (intermediate-column, function, keyword) triples\n that are applied on grouped chunks of the initial dataframe.\n\n agg_funcs: a list of (intermediate-column, functions, keyword) triples that\n are applied on the grouped concatination of the preprocessed chunks.\n\n finalizers: a list of (result-column, function, keyword) triples that are\n applied after the ``agg_funcs``. They are used to create final results\n from intermediate representations.\n \"\"\"\n known_np_funcs = {np.min: \"min\", np.max: \"max\"}\n\n # check that there are no name conflicts for a single input column\n by_name = {}\n for _, func, input_column in spec:\n key = funcname(known_np_funcs.get(func, func)), input_column\n by_name.setdefault(key, []).append((func, input_column))\n\n for funcs in by_name.values():\n if len(funcs) != 1:\n raise ValueError(\"conflicting aggregation functions: {}\".format(funcs))\n\n chunks = {}\n aggs = {}\n finalizers = []\n\n for (result_column, func, input_column) in spec:\n if not isinstance(func, Aggregation):\n func = funcname(known_np_funcs.get(func, func))\n\n impls = _build_agg_args_single(result_column, func, input_column)\n\n # overwrite existing result-columns, generate intermediates only once\n for spec in impls[\"chunk_funcs\"]:\n chunks[spec[0]] = spec\n for spec in impls[\"aggregate_funcs\"]:\n aggs[spec[0]] = spec\n\n finalizers.append(impls[\"finalizer\"])\n\n chunks = sorted(chunks.values())\n aggs = sorted(aggs.values())\n\n return chunks, aggs, finalizers", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__build_agg_args_single__build_agg_args_single.if_func_in_simple_impl_ke.else_.raise_ValueError_unknown": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__build_agg_args_single__build_agg_args_single.if_func_in_simple_impl_ke.else_.raise_ValueError_unknown", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 714, "end_line": 747, "span_ids": ["_build_agg_args_single"], "tokens": 265}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _build_agg_args_single(result_column, func, input_column):\n simple_impl = {\n \"sum\": (M.sum, M.sum),\n \"min\": (M.min, M.min),\n \"max\": (M.max, M.max),\n \"count\": (M.count, M.sum),\n \"size\": (M.size, M.sum),\n \"first\": (M.first, M.first),\n \"last\": (M.last, M.last),\n \"prod\": (M.prod, M.prod),\n }\n\n if func in simple_impl.keys():\n return _build_agg_args_simple(\n result_column, func, input_column, simple_impl[func]\n )\n\n elif func == \"var\":\n return _build_agg_args_var(result_column, func, input_column)\n\n elif func == \"std\":\n return _build_agg_args_std(result_column, func, input_column)\n\n elif func == \"mean\":\n return _build_agg_args_mean(result_column, func, input_column)\n\n elif func == \"list\":\n return _build_agg_args_list(result_column, func, input_column)\n\n elif isinstance(func, Aggregation):\n return _build_agg_args_custom(result_column, func, input_column)\n\n else:\n raise ValueError(\"unknown aggregate {}\".format(func))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__build_agg_args_simple__build_agg_args_simple.return.dict_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__build_agg_args_simple__build_agg_args_simple.return.dict_", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 747, "end_line": 767, "span_ids": ["_build_agg_args_simple"], "tokens": 122}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _build_agg_args_simple(result_column, func, input_column, impl_pair):\n intermediate = _make_agg_id(func, input_column)\n chunk_impl, agg_impl = impl_pair\n\n return dict(\n chunk_funcs=[\n (\n intermediate,\n _apply_func_to_column,\n dict(column=input_column, func=chunk_impl),\n )\n ],\n aggregate_funcs=[\n (\n intermediate,\n _apply_func_to_column,\n dict(column=intermediate, func=agg_impl),\n )\n ],\n finalizer=(result_column, itemgetter(intermediate), dict()),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__build_agg_args_var__build_agg_args_var.return.dict_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__build_agg_args_var__build_agg_args_var.return.dict_", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 770, "end_line": 790, "span_ids": ["_build_agg_args_var"], "tokens": 202}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _build_agg_args_var(result_column, func, input_column):\n int_sum = _make_agg_id(\"sum\", input_column)\n int_sum2 = _make_agg_id(\"sum2\", input_column)\n int_count = _make_agg_id(\"count\", input_column)\n\n return dict(\n chunk_funcs=[\n (int_sum, _apply_func_to_column, dict(column=input_column, func=M.sum)),\n (int_count, _apply_func_to_column, dict(column=input_column, func=M.count)),\n (int_sum2, _compute_sum_of_squares, dict(column=input_column)),\n ],\n aggregate_funcs=[\n (col, _apply_func_to_column, dict(column=col, func=M.sum))\n for col in (int_sum, int_count, int_sum2)\n ],\n finalizer=(\n result_column,\n _finalize_var,\n dict(sum_column=int_sum, count_column=int_count, sum2_column=int_sum2),\n ),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__build_agg_args_std__build_agg_args_mean.return.dict_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__build_agg_args_std__build_agg_args_mean.return.dict_", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 793, "end_line": 820, "span_ids": ["_build_agg_args_std", "_build_agg_args_mean"], "tokens": 224}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _build_agg_args_std(result_column, func, input_column):\n impls = _build_agg_args_var(result_column, func, input_column)\n\n result_column, _, kwargs = impls[\"finalizer\"]\n impls[\"finalizer\"] = (result_column, _finalize_std, kwargs)\n\n return impls\n\n\ndef _build_agg_args_mean(result_column, func, input_column):\n int_sum = _make_agg_id(\"sum\", input_column)\n int_count = _make_agg_id(\"count\", input_column)\n\n return dict(\n chunk_funcs=[\n (int_sum, _apply_func_to_column, dict(column=input_column, func=M.sum)),\n (int_count, _apply_func_to_column, dict(column=input_column, func=M.count)),\n ],\n aggregate_funcs=[\n (col, _apply_func_to_column, dict(column=col, func=M.sum))\n for col in (int_sum, int_count)\n ],\n finalizer=(\n result_column,\n _finalize_mean,\n dict(sum_column=int_sum, count_column=int_count),\n ),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__build_agg_args_custom__build_agg_args_custom.return.dict_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__build_agg_args_custom__build_agg_args_custom.return.dict_", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 823, "end_line": 844, "span_ids": ["_build_agg_args_custom"], "tokens": 150}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _build_agg_args_custom(result_column, func, input_column):\n col = _make_agg_id(funcname(func), input_column)\n\n if func.finalize is None:\n finalizer = (result_column, operator.itemgetter(col), dict())\n\n else:\n finalizer = (\n result_column,\n _apply_func_to_columns,\n dict(func=func.finalize, prefix=col),\n )\n\n return dict(\n chunk_funcs=[\n (col, _apply_func_to_column, dict(func=func.chunk, column=input_column))\n ],\n aggregate_funcs=[\n (col, _apply_func_to_columns, dict(func=func.agg, prefix=col))\n ],\n finalizer=finalizer,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__groupby_apply_funcs__groupby_apply_funcs.if_is_dataframe_like_df_.else_.return.type_df_head_0_to_frame_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__groupby_apply_funcs__groupby_apply_funcs.if_is_dataframe_like_df_.else_.return.type_df_head_0_to_frame_", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 847, "end_line": 894, "span_ids": ["_groupby_apply_funcs"], "tokens": 342}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _groupby_apply_funcs(df, *index, **kwargs):\n \"\"\"\n Group a dataframe and apply multiple aggregation functions.\n\n Parameters\n ----------\n df: pandas.DataFrame\n The dataframe to work on.\n index: list of groupers\n If given, they are added to the keyword arguments as the ``by``\n argument.\n funcs: list of result-colum, function, keywordargument triples\n The list of functions that are applied on the grouped data frame.\n Has to be passed as a keyword argument.\n kwargs:\n All keyword arguments, but ``funcs``, are passed verbatim to the groupby\n operation of the dataframe\n\n Returns\n -------\n aggregated:\n the aggregated dataframe.\n \"\"\"\n if len(index):\n # since we're coming through apply, `by` will be a tuple.\n # Pandas treats tuples as a single key, and lists as multiple keys\n # We want multiple keys\n kwargs.update(by=list(index))\n\n funcs = kwargs.pop(\"funcs\")\n grouped = _groupby_raise_unaligned(df, **kwargs)\n\n result = collections.OrderedDict()\n for result_column, func, func_kwargs in funcs:\n r = func(grouped, **func_kwargs)\n\n if isinstance(r, tuple):\n for idx, s in enumerate(r):\n result[\"{}-{}\".format(result_column, idx)] = s\n\n else:\n result[result_column] = r\n\n if is_dataframe_like(df):\n return type(df)(result)\n else:\n # Get the DataFrame type of this Series object\n return type(df.head(0).to_frame())(result)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__compute_sum_of_squares__compute_sum_of_squares.return.df_groupby_keys_sum_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__compute_sum_of_squares__compute_sum_of_squares.return.df_groupby_keys_sum_", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 897, "end_line": 906, "span_ids": ["_compute_sum_of_squares"], "tokens": 109}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _compute_sum_of_squares(grouped, column):\n # Note: CuDF cannot use `groupby.apply`.\n # Need to unpack groupby to compute sum of squares\n if hasattr(grouped, \"grouper\"):\n keys = grouped.grouper\n else:\n # Handle CuDF groupby object (different from pandas)\n keys = grouped.grouping.keys\n df = grouped.obj[column].pow(2) if column else grouped.obj.pow(2)\n return df.groupby(keys).sum()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__agg_finalize__cumcount_aggregate.return.a_add_b_fill_value_fill_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__agg_finalize__cumcount_aggregate.return.a_add_b_fill_value_fill_", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 909, "end_line": 980, "span_ids": ["_agg_finalize", "_cum_agg_filled", "_finalize_var", "_finalize_std", "_apply_func_to_column", "_cumcount_aggregate", "_apply_func_to_columns", "_finalize_mean", "_cum_agg_aligned"], "tokens": 523}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _agg_finalize(df, aggregate_funcs, finalize_funcs, level, sort=False):\n # finish the final aggregation level\n df = _groupby_apply_funcs(df, funcs=aggregate_funcs, level=level, sort=sort)\n\n # and finalize the result\n result = collections.OrderedDict()\n for result_column, func, kwargs in finalize_funcs:\n result[result_column] = func(df, **kwargs)\n\n return type(df)(result)\n\n\ndef _apply_func_to_column(df_like, column, func):\n if column is None:\n return func(df_like)\n\n return func(df_like[column])\n\n\ndef _apply_func_to_columns(df_like, prefix, func):\n if is_dataframe_like(df_like):\n columns = df_like.columns\n else:\n # handle GroupBy objects\n columns = df_like._selected_obj.columns\n\n columns = sorted(col for col in columns if col.startswith(prefix))\n\n columns = [df_like[col] for col in columns]\n return func(*columns)\n\n\ndef _finalize_mean(df, sum_column, count_column):\n return df[sum_column] / df[count_column]\n\n\ndef _finalize_var(df, count_column, sum_column, sum2_column, ddof=1):\n n = df[count_column]\n x = df[sum_column]\n x2 = df[sum2_column]\n\n result = x2 - x ** 2 / n\n div = n - ddof\n div[div < 0] = 0\n result /= div\n result[(n - ddof) == 0] = np.nan\n\n return result\n\n\ndef _finalize_std(df, count_column, sum_column, sum2_column, ddof=1):\n result = _finalize_var(df, count_column, sum_column, sum2_column, ddof)\n return np.sqrt(result)\n\n\ndef _cum_agg_aligned(part, cum_last, index, columns, func, initial):\n align = cum_last.reindex(part.set_index(index).index, fill_value=initial)\n align.index = part.index\n return func(part[columns], align)\n\n\ndef _cum_agg_filled(a, b, func, initial):\n union = a.index.union(b.index)\n return func(\n a.reindex(union, fill_value=initial),\n b.reindex(union, fill_value=initial),\n fill_value=initial,\n )\n\n\ndef _cumcount_aggregate(a, b, fill_value=None):\n return a.add(b, fill_value=fill_value) + 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy__GroupBy.__init__.self._meta.self_obj__meta_groupby_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy__GroupBy.__init__.self._meta.self_obj__meta_groupby_", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 983, "end_line": 1051, "span_ids": ["_GroupBy", "_GroupBy.__init__"], "tokens": 458}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _GroupBy(object):\n \"\"\"Superclass for DataFrameGroupBy and SeriesGroupBy\n\n Parameters\n ----------\n\n obj: DataFrame or Series\n DataFrame or Series to be grouped\n by: str, list or Series\n The key for grouping\n slice: str, list\n The slice keys applied to GroupBy result\n group_keys: bool\n Passed to pandas.DataFrame.groupby()\n dropna: bool\n Whether to drop null values from groupby index\n sort: bool, defult None\n Passed along to aggregation methods. If allowed,\n the output aggregation will have sorted keys.\n \"\"\"\n\n def __init__(\n self, df, by=None, slice=None, group_keys=True, dropna=None, sort=None\n ):\n\n assert isinstance(df, (DataFrame, Series))\n self.group_keys = group_keys\n self.obj = df\n # grouping key passed via groupby method\n self.index = _normalize_index(df, by)\n self.sort = sort\n\n if isinstance(self.index, list):\n do_index_partition_align = all(\n item.npartitions == df.npartitions if isinstance(item, Series) else True\n for item in self.index\n )\n elif isinstance(self.index, Series):\n do_index_partition_align = df.npartitions == self.index.npartitions\n else:\n do_index_partition_align = True\n\n if not do_index_partition_align:\n raise NotImplementedError(\n \"The grouped object and index of the \"\n \"groupby must have the same divisions.\"\n )\n\n # slicing key applied to _GroupBy instance\n self._slice = slice\n\n if isinstance(self.index, list):\n index_meta = [\n item._meta if isinstance(item, Series) else item for item in self.index\n ]\n\n elif isinstance(self.index, Series):\n index_meta = self.index._meta\n\n else:\n index_meta = self.index\n\n self.dropna = {}\n if dropna is not None:\n self.dropna[\"dropna\"] = dropna\n\n self._meta = self.obj._meta.groupby(\n index_meta, group_keys=group_keys, **self.dropna\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy._meta_nonempty__GroupBy._meta_nonempty.return._maybe_slice_grouped_sel": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy._meta_nonempty__GroupBy._meta_nonempty.return._maybe_slice_grouped_sel", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1053, "end_line": 1073, "span_ids": ["_GroupBy._meta_nonempty"], "tokens": 146}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _GroupBy(object):\n\n @property\n def _meta_nonempty(self):\n \"\"\"\n Return a pd.DataFrameGroupBy / pd.SeriesGroupBy which contains sample data.\n \"\"\"\n sample = self.obj._meta_nonempty\n\n if isinstance(self.index, list):\n index_meta = [\n item._meta_nonempty if isinstance(item, Series) else item\n for item in self.index\n ]\n\n elif isinstance(self.index, Series):\n index_meta = self.index._meta_nonempty\n\n else:\n index_meta = self.index\n\n grouped = sample.groupby(index_meta, group_keys=self.group_keys, **self.dropna)\n return _maybe_slice(grouped, self._slice)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy._aca_agg__GroupBy._aca_agg.return.aca_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy._aca_agg__GroupBy._aca_agg.return.aca_", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1075, "end_line": 1112, "span_ids": ["_GroupBy._aca_agg"], "tokens": 246}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _GroupBy(object):\n\n def _aca_agg(\n self,\n token,\n func,\n aggfunc=None,\n split_every=None,\n split_out=1,\n chunk_kwargs={},\n aggregate_kwargs={},\n ):\n if aggfunc is None:\n aggfunc = func\n\n meta = func(self._meta_nonempty)\n columns = meta.name if is_series_like(meta) else meta.columns\n\n token = self._token_prefix + token\n levels = _determine_levels(self.index)\n\n return aca(\n [self.obj, self.index]\n if not isinstance(self.index, list)\n else [self.obj] + self.index,\n chunk=_apply_chunk,\n chunk_kwargs=dict(\n chunk=func, columns=columns, **chunk_kwargs, **self.dropna\n ),\n aggregate=_groupby_aggregate,\n meta=meta,\n token=token,\n split_every=split_every,\n aggregate_kwargs=dict(\n aggfunc=aggfunc, levels=levels, **aggregate_kwargs, **self.dropna\n ),\n split_out=split_out,\n split_out_setup=split_out_on_index,\n sort=self.sort,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy._cum_agg__GroupBy._cum_agg.return.new_dd_object_graph_name": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy._cum_agg__GroupBy._cum_agg.return.new_dd_object_graph_name", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1114, "end_line": 1198, "span_ids": ["_GroupBy._cum_agg"], "tokens": 675}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _GroupBy(object):\n\n def _cum_agg(self, token, chunk, aggregate, initial):\n \"\"\" Wrapper for cumulative groupby operation \"\"\"\n meta = chunk(self._meta)\n columns = meta.name if is_series_like(meta) else meta.columns\n index = self.index if isinstance(self.index, list) else [self.index]\n\n name = self._token_prefix + token\n name_part = name + \"-map\"\n name_last = name + \"-take-last\"\n name_cum = name + \"-cum-last\"\n\n # cumulate each partitions\n cumpart_raw = map_partitions(\n _apply_chunk,\n self.obj,\n *index,\n chunk=chunk,\n columns=columns,\n token=name_part,\n meta=meta,\n **self.dropna\n )\n\n cumpart_raw_frame = (\n cumpart_raw.to_frame() if is_series_like(meta) else cumpart_raw\n )\n\n cumpart_ext = cumpart_raw_frame.assign(\n **{\n i: self.obj[i]\n if np.isscalar(i) and i in self.obj.columns\n else self.obj.index\n for i in index\n }\n )\n\n # Use pd.Grouper objects to specify that we are grouping by columns.\n # Otherwise, pandas will throw an ambiguity warning if the\n # DataFrame's index (self.obj.index) was included in the grouping\n # specification (self.index). See pandas #14432\n index_groupers = [pd.Grouper(key=ind) for ind in index]\n cumlast = map_partitions(\n _apply_chunk,\n cumpart_ext,\n *index_groupers,\n columns=0 if columns is None else columns,\n chunk=M.last,\n meta=meta,\n token=name_last,\n **self.dropna\n )\n\n # aggregate cumulated partitions and its previous last element\n _hash = tokenize(self, token, chunk, aggregate, initial)\n name += \"-\" + _hash\n name_cum += \"-\" + _hash\n dask = {}\n dask[(name, 0)] = (cumpart_raw._name, 0)\n\n for i in range(1, self.obj.npartitions):\n # store each cumulative step to graph to reduce computation\n if i == 1:\n dask[(name_cum, i)] = (cumlast._name, i - 1)\n else:\n # aggregate with previous cumulation results\n dask[(name_cum, i)] = (\n _cum_agg_filled,\n (name_cum, i - 1),\n (cumlast._name, i - 1),\n aggregate,\n initial,\n )\n dask[(name, i)] = (\n _cum_agg_aligned,\n (cumpart_ext._name, i),\n (name_cum, i),\n index,\n 0 if columns is None else columns,\n aggregate,\n initial,\n )\n graph = HighLevelGraph.from_collections(\n name, dask, dependencies=[cumpart_raw, cumpart_ext, cumlast]\n )\n return new_dd_object(graph, name, chunk(self._meta), self.obj.divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy._shuffle__GroupBy._shuffle.return.df4_index2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy._shuffle__GroupBy._shuffle.return.df4_index2", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1200, "end_line": 1246, "span_ids": ["_GroupBy._shuffle"], "tokens": 377}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _GroupBy(object):\n\n def _shuffle(self, meta):\n df = self.obj\n\n if isinstance(self.obj, Series):\n # Temporarily convert series to dataframe for shuffle\n df = df.to_frame(\"__series__\")\n convert_back_to_series = True\n else:\n convert_back_to_series = False\n\n if isinstance(self.index, DataFrame): # add index columns to dataframe\n df2 = df.assign(\n **{\"_index_\" + c: self.index[c] for c in self.index.columns}\n )\n index = self.index\n elif isinstance(self.index, Series):\n df2 = df.assign(_index=self.index)\n index = self.index\n else:\n df2 = df\n index = df._select_columns_or_index(self.index)\n\n df3 = shuffle(df2, index) # shuffle dataframe and index\n\n if isinstance(self.index, DataFrame):\n # extract index from dataframe\n cols = [\"_index_\" + c for c in self.index.columns]\n index2 = df3[cols]\n if is_dataframe_like(meta):\n df4 = df3.map_partitions(drop_columns, cols, meta.columns.dtype)\n else:\n df4 = df3.drop(cols, axis=1)\n elif isinstance(self.index, Series):\n index2 = df3[\"_index\"]\n index2.name = self.index.name\n if is_dataframe_like(meta):\n df4 = df3.map_partitions(drop_columns, \"_index\", meta.columns.dtype)\n else:\n df4 = df3.drop(\"_index\", axis=1)\n else:\n df4 = df3\n index2 = self.index\n\n if convert_back_to_series:\n df4 = df4[\"__series__\"].rename(self.obj.name)\n\n return df4, index2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy.cumsum__GroupBy.mean.return.s_c": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy.cumsum__GroupBy.mean.return.s_c", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1248, "end_line": 1338, "span_ids": ["_GroupBy.cumprod", "_GroupBy.idxmin", "_GroupBy.mean", "_GroupBy.sum", "_GroupBy.max", "_GroupBy.count", "_GroupBy.idxmax", "_GroupBy.cumcount", "_GroupBy.cumsum", "_GroupBy.prod", "_GroupBy.min"], "tokens": 755}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _GroupBy(object):\n\n @derived_from(pd.core.groupby.GroupBy)\n def cumsum(self, axis=0):\n if axis:\n return self.obj.cumsum(axis=axis)\n else:\n return self._cum_agg(\"cumsum\", chunk=M.cumsum, aggregate=M.add, initial=0)\n\n @derived_from(pd.core.groupby.GroupBy)\n def cumprod(self, axis=0):\n if axis:\n return self.obj.cumprod(axis=axis)\n else:\n return self._cum_agg(\"cumprod\", chunk=M.cumprod, aggregate=M.mul, initial=1)\n\n @derived_from(pd.core.groupby.GroupBy)\n def cumcount(self, axis=None):\n return self._cum_agg(\n \"cumcount\", chunk=M.cumcount, aggregate=_cumcount_aggregate, initial=-1\n )\n\n @derived_from(pd.core.groupby.GroupBy)\n def sum(self, split_every=None, split_out=1, min_count=None):\n result = self._aca_agg(\n token=\"sum\", func=M.sum, split_every=split_every, split_out=split_out\n )\n if min_count:\n return result.where(self.count() >= min_count, other=np.NaN)\n else:\n return result\n\n @derived_from(pd.core.groupby.GroupBy)\n def prod(self, split_every=None, split_out=1, min_count=None):\n result = self._aca_agg(\n token=\"prod\", func=M.prod, split_every=split_every, split_out=split_out\n )\n if min_count:\n return result.where(self.count() >= min_count, other=np.NaN)\n else:\n return result\n\n @derived_from(pd.core.groupby.GroupBy)\n def min(self, split_every=None, split_out=1):\n return self._aca_agg(\n token=\"min\", func=M.min, split_every=split_every, split_out=split_out\n )\n\n @derived_from(pd.core.groupby.GroupBy)\n def max(self, split_every=None, split_out=1):\n return self._aca_agg(\n token=\"max\", func=M.max, split_every=split_every, split_out=split_out\n )\n\n @derived_from(pd.DataFrame)\n def idxmin(self, split_every=None, split_out=1, axis=None, skipna=True):\n return self._aca_agg(\n token=\"idxmin\",\n func=M.idxmin,\n aggfunc=M.first,\n split_every=split_every,\n split_out=split_out,\n chunk_kwargs=dict(skipna=skipna),\n )\n\n @derived_from(pd.DataFrame)\n def idxmax(self, split_every=None, split_out=1, axis=None, skipna=True):\n return self._aca_agg(\n token=\"idxmax\",\n func=M.idxmax,\n aggfunc=M.first,\n split_every=split_every,\n split_out=split_out,\n chunk_kwargs=dict(skipna=skipna),\n )\n\n @derived_from(pd.core.groupby.GroupBy)\n def count(self, split_every=None, split_out=1):\n return self._aca_agg(\n token=\"count\",\n func=M.count,\n aggfunc=M.sum,\n split_every=split_every,\n split_out=split_out,\n )\n\n @derived_from(pd.core.groupby.GroupBy)\n def mean(self, split_every=None, split_out=1):\n s = self.sum(split_every=split_every, split_out=split_out)\n c = self.count(split_every=split_every, split_out=split_out)\n if is_dataframe_like(s):\n c = c[s.columns]\n return s / c", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy.size__GroupBy.var.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy.size__GroupBy.var.return.result", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1340, "end_line": 1374, "span_ids": ["_GroupBy.var", "_GroupBy.size"], "tokens": 257}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _GroupBy(object):\n\n @derived_from(pd.core.groupby.GroupBy)\n def size(self, split_every=None, split_out=1):\n return self._aca_agg(\n token=\"size\",\n func=M.size,\n aggfunc=M.sum,\n split_every=split_every,\n split_out=split_out,\n )\n\n @derived_from(pd.core.groupby.GroupBy)\n def var(self, ddof=1, split_every=None, split_out=1):\n levels = _determine_levels(self.index)\n result = aca(\n [self.obj, self.index]\n if not isinstance(self.index, list)\n else [self.obj] + self.index,\n chunk=_var_chunk,\n aggregate=_var_agg,\n combine=_var_combine,\n token=self._token_prefix + \"var\",\n aggregate_kwargs={\"ddof\": ddof, \"levels\": levels},\n combine_kwargs={\"levels\": levels},\n split_every=split_every,\n split_out=split_out,\n split_out_setup=split_out_on_index,\n sort=self.sort,\n )\n\n if isinstance(self.obj, Series):\n result = result[result.columns[0]]\n if self._slice:\n result = result[self._slice]\n\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy.std__GroupBy.corr.return.self_cov_split_every_spli": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy.std__GroupBy.corr.return.self_cov_split_every_spli", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1376, "end_line": 1387, "span_ids": ["_GroupBy.std", "_GroupBy.corr"], "tokens": 146}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _GroupBy(object):\n\n @derived_from(pd.core.groupby.GroupBy)\n def std(self, ddof=1, split_every=None, split_out=1):\n v = self.var(ddof, split_every=split_every, split_out=split_out)\n result = map_partitions(np.sqrt, v, meta=v)\n return result\n\n @derived_from(pd.DataFrame)\n def corr(self, ddof=1, split_every=None, split_out=1):\n \"\"\"Groupby correlation:\n corr(X, Y) = cov(X, Y) / (std_x * std_y)\n \"\"\"\n return self.cov(split_every=split_every, split_out=split_out, std=True)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy.cov__GroupBy.cov.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy.cov__GroupBy.cov.return.result", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1389, "end_line": 1432, "span_ids": ["_GroupBy.cov"], "tokens": 359}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _GroupBy(object):\n\n @derived_from(pd.DataFrame)\n def cov(self, ddof=1, split_every=None, split_out=1, std=False):\n \"\"\"Groupby covariance is accomplished by\n\n 1. Computing intermediate values for sum, count, and the product of\n all columns: a b c -> a*a, a*b, b*b, b*c, c*c.\n\n 2. The values are then aggregated and the final covariance value is calculated:\n cov(X, Y) = X*Y - Xbar * Ybar\n\n When `std` is True calculate Correlation\n \"\"\"\n\n levels = _determine_levels(self.index)\n\n is_mask = any(is_series_like(s) for s in self.index)\n if self._slice:\n if is_mask:\n self.obj = self.obj[self._slice]\n else:\n sliced_plus = list(self._slice) + list(self.index)\n self.obj = self.obj[sliced_plus]\n\n result = aca(\n [self.obj, self.index]\n if not isinstance(self.index, list)\n else [self.obj] + self.index,\n chunk=_cov_chunk,\n aggregate=_cov_agg,\n combine=_cov_combine,\n token=self._token_prefix + \"cov\",\n aggregate_kwargs={\"ddof\": ddof, \"levels\": levels, \"std\": std},\n combine_kwargs={\"levels\": levels},\n split_every=split_every,\n split_out=split_out,\n split_out_setup=split_out_on_index,\n sort=self.sort,\n )\n\n if isinstance(self.obj, Series):\n result = result[result.columns[0]]\n if self._slice:\n result = result[self._slice]\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy.first__GroupBy.last.return.self__aca_agg_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy.first__GroupBy.last.return.self__aca_agg_", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1434, "end_line": 1444, "span_ids": ["_GroupBy.last", "_GroupBy.first"], "tokens": 114}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _GroupBy(object):\n\n @derived_from(pd.core.groupby.GroupBy)\n def first(self, split_every=None, split_out=1):\n return self._aca_agg(\n token=\"first\", func=M.first, split_every=split_every, split_out=split_out\n )\n\n @derived_from(pd.core.groupby.GroupBy)\n def last(self, split_every=None, split_out=1):\n return self._aca_agg(\n token=\"last\", func=M.last, split_every=split_every, split_out=split_out\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy.get_group__GroupBy.get_group.return.map_partitions_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy.get_group__GroupBy.get_group.return.map_partitions_", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1446, "end_line": 1463, "span_ids": ["_GroupBy.get_group"], "tokens": 120}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _GroupBy(object):\n\n @derived_from(pd.core.groupby.GroupBy)\n def get_group(self, key):\n token = self._token_prefix + \"get_group\"\n\n meta = self._meta.obj\n if is_dataframe_like(meta) and self._slice is not None:\n meta = meta[self._slice]\n columns = meta.columns if is_dataframe_like(meta) else meta.name\n\n return map_partitions(\n _groupby_get_group,\n self.obj,\n self.index,\n key,\n columns,\n meta=meta,\n token=token,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy.aggregate__GroupBy.aggregate.return.aca_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy.aggregate__GroupBy.aggregate.return.aca_", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1465, "end_line": 1542, "span_ids": ["_GroupBy.aggregate"], "tokens": 593}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _GroupBy(object):\n\n def aggregate(self, arg, split_every, split_out=1):\n if isinstance(self.obj, DataFrame):\n if isinstance(self.index, tuple) or np.isscalar(self.index):\n group_columns = {self.index}\n\n elif isinstance(self.index, list):\n group_columns = {\n i for i in self.index if isinstance(i, tuple) or np.isscalar(i)\n }\n\n else:\n group_columns = set()\n\n if self._slice:\n # pandas doesn't exclude the grouping column in a SeriesGroupBy\n # like df.groupby('a')['a'].agg(...)\n non_group_columns = self._slice\n if not isinstance(non_group_columns, list):\n non_group_columns = [non_group_columns]\n else:\n # NOTE: this step relies on the index normalization to replace\n # series with their name in an index.\n non_group_columns = [\n col for col in self.obj.columns if col not in group_columns\n ]\n\n spec = _normalize_spec(arg, non_group_columns)\n\n elif isinstance(self.obj, Series):\n if isinstance(arg, (list, tuple, dict)):\n # implementation detail: if self.obj is a series, a pseudo column\n # None is used to denote the series itself. This pseudo column is\n # removed from the result columns before passing the spec along.\n spec = _normalize_spec({None: arg}, [])\n spec = [\n (result_column, func, input_column)\n for ((_, result_column), func, input_column) in spec\n ]\n\n else:\n spec = _normalize_spec({None: arg}, [])\n spec = [\n (self.obj.name, func, input_column)\n for (_, func, input_column) in spec\n ]\n\n else:\n raise ValueError(\"aggregate on unknown object {}\".format(self.obj))\n\n chunk_funcs, aggregate_funcs, finalizers = _build_agg_args(spec)\n\n if isinstance(self.index, (tuple, list)) and len(self.index) > 1:\n levels = list(range(len(self.index)))\n else:\n levels = 0\n\n if not isinstance(self.index, list):\n chunk_args = [self.obj, self.index]\n\n else:\n chunk_args = [self.obj] + self.index\n\n return aca(\n chunk_args,\n chunk=_groupby_apply_funcs,\n chunk_kwargs=dict(funcs=chunk_funcs),\n combine=_groupby_apply_funcs,\n combine_kwargs=dict(funcs=aggregate_funcs, level=levels),\n aggregate=_agg_finalize,\n aggregate_kwargs=dict(\n aggregate_funcs=aggregate_funcs, finalize_funcs=finalizers, level=levels\n ),\n token=\"aggregate\",\n split_every=split_every,\n split_out=split_out,\n split_out_setup=split_out_on_index,\n sort=self.sort,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy.apply__GroupBy.apply.return.df3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy.apply__GroupBy.apply.return.df3", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1544, "end_line": 1630, "span_ids": ["_GroupBy.apply"], "tokens": 676}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _GroupBy(object):\n\n @insert_meta_param_description(pad=12)\n def apply(self, func, *args, **kwargs):\n \"\"\"Parallel version of pandas GroupBy.apply\n\n This mimics the pandas version except for the following:\n\n 1. If the grouper does not align with the index then this causes a full\n shuffle. The order of rows within each group may not be preserved.\n 2. Dask's GroupBy.apply is not appropriate for aggregations. For custom\n aggregations, use :class:`dask.dataframe.groupby.Aggregation`.\n\n .. warning::\n\n Pandas' groupby-apply can be used to to apply arbitrary functions,\n including aggregations that result in one row per group. Dask's\n groupby-apply will apply ``func`` once to each partition-group pair,\n so when ``func`` is a reduction you'll end up with one row per\n partition-group pair. To apply a custom aggregation with Dask,\n use :class:`dask.dataframe.groupby.Aggregation`.\n\n Parameters\n ----------\n func: function\n Function to apply\n args, kwargs : Scalar, Delayed or object\n Arguments and keywords to pass to the function.\n $META\n\n Returns\n -------\n applied : Series or DataFrame depending on columns keyword\n \"\"\"\n meta = kwargs.get(\"meta\", no_default)\n\n if meta is no_default:\n with raise_on_meta_error(\n \"groupby.apply({0})\".format(funcname(func)), udf=True\n ):\n meta_args, meta_kwargs = _extract_meta((args, kwargs), nonempty=True)\n meta = self._meta_nonempty.apply(func, *meta_args, **meta_kwargs)\n\n msg = (\n \"`meta` is not specified, inferred from partial data. \"\n \"Please provide `meta` if the result is unexpected.\\n\"\n \" Before: .apply(func)\\n\"\n \" After: .apply(func, meta={'x': 'f8', 'y': 'f8'}) for dataframe result\\n\"\n \" or: .apply(func, meta=('x', 'f8')) for series result\"\n )\n warnings.warn(msg, stacklevel=2)\n\n meta = make_meta(meta)\n\n # Validate self.index\n if isinstance(self.index, list) and any(\n isinstance(item, Series) for item in self.index\n ):\n raise NotImplementedError(\n \"groupby-apply with a multiple Series is currently not supported\"\n )\n\n df = self.obj\n should_shuffle = not (\n df.known_divisions and df._contains_index_name(self.index)\n )\n\n if should_shuffle:\n df2, index = self._shuffle(meta)\n else:\n df2 = df\n index = self.index\n\n # Perform embarrassingly parallel groupby-apply\n kwargs[\"meta\"] = meta\n df3 = map_partitions(\n _groupby_slice_apply,\n df2,\n index,\n self._slice,\n func,\n token=funcname(func),\n *args,\n group_keys=self.group_keys,\n **self.dropna,\n **kwargs\n )\n\n return df3", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy.transform__GroupBy.transform.return.df3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy.transform__GroupBy.transform.return.df3", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1632, "end_line": 1718, "span_ids": ["_GroupBy.transform"], "tokens": 672}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _GroupBy(object):\n\n @insert_meta_param_description(pad=12)\n def transform(self, func, *args, **kwargs):\n \"\"\"Parallel version of pandas GroupBy.transform\n\n This mimics the pandas version except for the following:\n\n 1. If the grouper does not align with the index then this causes a full\n shuffle. The order of rows within each group may not be preserved.\n 2. Dask's GroupBy.transform is not appropriate for aggregations. For custom\n aggregations, use :class:`dask.dataframe.groupby.Aggregation`.\n\n .. warning::\n\n Pandas' groupby-transform can be used to to apply arbitrary functions,\n including aggregations that result in one row per group. Dask's\n groupby-transform will apply ``func`` once to each partition-group pair,\n so when ``func`` is a reduction you'll end up with one row per\n partition-group pair. To apply a custom aggregation with Dask,\n use :class:`dask.dataframe.groupby.Aggregation`.\n\n Parameters\n ----------\n func: function\n Function to apply\n args, kwargs : Scalar, Delayed or object\n Arguments and keywords to pass to the function.\n $META\n\n Returns\n -------\n applied : Series or DataFrame depending on columns keyword\n \"\"\"\n meta = kwargs.get(\"meta\", no_default)\n\n if meta is no_default:\n with raise_on_meta_error(\n \"groupby.transform({0})\".format(funcname(func)), udf=True\n ):\n meta_args, meta_kwargs = _extract_meta((args, kwargs), nonempty=True)\n meta = self._meta_nonempty.transform(func, *meta_args, **meta_kwargs)\n\n msg = (\n \"`meta` is not specified, inferred from partial data. \"\n \"Please provide `meta` if the result is unexpected.\\n\"\n \" Before: .transform(func)\\n\"\n \" After: .transform(func, meta={'x': 'f8', 'y': 'f8'}) for dataframe result\\n\"\n \" or: .transform(func, meta=('x', 'f8')) for series result\"\n )\n warnings.warn(msg, stacklevel=2)\n\n meta = make_meta(meta)\n\n # Validate self.index\n if isinstance(self.index, list) and any(\n isinstance(item, Series) for item in self.index\n ):\n raise NotImplementedError(\n \"groupby-transform with a multiple Series is currently not supported\"\n )\n\n df = self.obj\n should_shuffle = not (\n df.known_divisions and df._contains_index_name(self.index)\n )\n\n if should_shuffle:\n df2, index = self._shuffle(meta)\n else:\n df2 = df\n index = self.index\n\n # Perform embarrassingly parallel groupby-transform\n kwargs[\"meta\"] = meta\n df3 = map_partitions(\n _groupby_slice_transform,\n df2,\n index,\n self._slice,\n func,\n token=funcname(func),\n *args,\n group_keys=self.group_keys,\n **self.dropna,\n **kwargs\n )\n\n return df3", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py_DataFrameGroupBy_DataFrameGroupBy.agg.return.self_aggregate_arg_split": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py_DataFrameGroupBy_DataFrameGroupBy.agg.return.self_aggregate_arg_split", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1751, "end_line": 1793, "span_ids": ["DataFrameGroupBy.__getitem__", "DataFrameGroupBy.__dir__", "DataFrameGroupBy.__getattr__", "DataFrameGroupBy.aggregate", "DataFrameGroupBy.agg", "DataFrameGroupBy"], "tokens": 295}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrameGroupBy(_GroupBy):\n\n _token_prefix = \"dataframe-groupby-\"\n\n def __getitem__(self, key):\n if isinstance(key, list):\n g = DataFrameGroupBy(\n self.obj, by=self.index, slice=key, sort=self.sort, **self.dropna\n )\n else:\n g = SeriesGroupBy(\n self.obj, by=self.index, slice=key, sort=self.sort, **self.dropna\n )\n\n # error is raised from pandas\n g._meta = g._meta[key]\n return g\n\n def __dir__(self):\n return sorted(\n set(\n dir(type(self))\n + list(self.__dict__)\n + list(filter(M.isidentifier, self.obj.columns))\n )\n )\n\n def __getattr__(self, key):\n try:\n return self[key]\n except KeyError as e:\n raise AttributeError(e) from e\n\n @derived_from(pd.core.groupby.DataFrameGroupBy)\n def aggregate(self, arg, split_every=None, split_out=1):\n if arg == \"size\":\n return self.size()\n\n return super().aggregate(arg, split_every=split_every, split_out=split_out)\n\n @derived_from(pd.core.groupby.DataFrameGroupBy)\n def agg(self, arg, split_every=None, split_out=1):\n return self.aggregate(arg, split_every=split_every, split_out=split_out)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py_SeriesGroupBy.nunique_SeriesGroupBy.nunique.return.aca_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py_SeriesGroupBy.nunique_SeriesGroupBy.nunique.return.aca_", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1791, "end_line": 1827, "span_ids": ["SeriesGroupBy.nunique"], "tokens": 311}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class SeriesGroupBy(_GroupBy):\n\n @derived_from(pd.core.groupby.SeriesGroupBy)\n def nunique(self, split_every=None, split_out=1):\n \"\"\"\n Examples\n --------\n >>> import pandas as pd\n >>> import dask.dataframe as dd\n >>> d = {'col1': [1, 2, 3, 4], 'col2': [5, 6, 7, 8]}\n >>> df = pd.DataFrame(data=d)\n >>> ddf = dd.from_pandas(df, 2)\n >>> ddf.groupby(['col1']).col2.nunique().compute()\n \"\"\"\n name = self._meta.obj.name\n levels = _determine_levels(self.index)\n\n if isinstance(self.obj, DataFrame):\n chunk = _nunique_df_chunk\n\n else:\n chunk = _nunique_series_chunk\n\n return aca(\n [self.obj, self.index]\n if not isinstance(self.index, list)\n else [self.obj] + self.index,\n chunk=chunk,\n aggregate=_nunique_df_aggregate,\n combine=_nunique_df_combine,\n token=\"series-groupby-nunique\",\n chunk_kwargs={\"levels\": levels, \"name\": name},\n aggregate_kwargs={\"levels\": levels, \"name\": name},\n combine_kwargs={\"levels\": levels},\n split_every=split_every,\n split_out=split_out,\n split_out_setup=split_out_on_index,\n sort=self.sort,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py_SeriesGroupBy.aggregate_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py_SeriesGroupBy.aggregate_", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1857, "end_line": 1905, "span_ids": ["SeriesGroupBy.agg", "_value_counts_aggregate", "_unique_aggregate", "SeriesGroupBy.aggregate", "SeriesGroupBy.unique", "SeriesGroupBy.value_counts"], "tokens": 401}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class SeriesGroupBy(_GroupBy):\n\n @derived_from(pd.core.groupby.SeriesGroupBy)\n def aggregate(self, arg, split_every=None, split_out=1):\n result = super().aggregate(arg, split_every=split_every, split_out=split_out)\n if self._slice:\n result = result[self._slice]\n\n if not isinstance(arg, (list, dict)) and isinstance(result, DataFrame):\n result = result[result.columns[0]]\n\n return result\n\n @derived_from(pd.core.groupby.SeriesGroupBy)\n def agg(self, arg, split_every=None, split_out=1):\n return self.aggregate(arg, split_every=split_every, split_out=split_out)\n\n @derived_from(pd.core.groupby.SeriesGroupBy)\n def value_counts(self, split_every=None, split_out=1):\n return self._aca_agg(\n token=\"value_counts\",\n func=M.value_counts,\n aggfunc=_value_counts_aggregate,\n split_every=split_every,\n split_out=split_out,\n )\n\n @derived_from(pd.core.groupby.SeriesGroupBy)\n def unique(self, split_every=None, split_out=1):\n name = self._meta.obj.name\n return self._aca_agg(\n token=\"unique\",\n func=M.unique,\n aggfunc=_unique_aggregate,\n aggregate_kwargs={\"name\": name},\n split_every=split_every,\n split_out=split_out,\n )\n\n\ndef _unique_aggregate(series_gb, name=None):\n ret = pd.Series({k: v.explode().unique() for k, v in series_gb}, name=name)\n ret.index.names = series_gb.obj.index.names\n return ret\n\n\ndef _value_counts_aggregate(series_gb):\n to_concat = {k: v.sum(level=1) for k, v in series_gb}\n names = list(series_gb.obj.index.names)\n return pd.Series(pd.concat(to_concat, names=names))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/hyperloglog.py_compute_hll_array_compute_hll_array.return.series_reindex_np_arange_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/hyperloglog.py_compute_hll_array_compute_hll_array.return.series_reindex_np_arange_", "embedding": null, "metadata": {"file_path": "dask/dataframe/hyperloglog.py", "file_name": "hyperloglog.py", "file_type": "text/x-python", "category": "implementation", "start_line": 25, "end_line": 49, "span_ids": ["compute_hll_array"], "tokens": 227}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def compute_hll_array(obj, b):\n # b is the number of bits\n\n if not 8 <= b <= 16:\n raise ValueError(\"b should be between 8 and 16\")\n num_bits_discarded = 32 - b\n m = 1 << b\n\n # Get an array of the hashes\n hashes = hash_pandas_object(obj, index=False)\n if isinstance(hashes, pd.Series):\n hashes = hashes._values\n hashes = hashes.astype(np.uint32)\n\n # Of the first b bits, which is the first nonzero?\n j = hashes >> num_bits_discarded\n first_bit = compute_first_bit(hashes)\n\n # Pandas can do the max aggregation\n df = pd.DataFrame({\"j\": j, \"first_bit\": first_bit})\n series = df.groupby(\"j\").max()[\"first_bit\"]\n\n # Return a dense array so we can concat them and get a result\n # that is easy to deal with\n return series.reindex(np.arange(m), fill_value=0).values.astype(np.uint8)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/hyperloglog.py_reduce_state_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/hyperloglog.py_reduce_state_", "embedding": null, "metadata": {"file_path": "dask/dataframe/hyperloglog.py", "file_name": "hyperloglog.py", "file_type": "text/x-python", "category": "implementation", "start_line": 52, "end_line": 81, "span_ids": ["reduce_state", "estimate_count"], "tokens": 268}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def reduce_state(Ms, b):\n m = 1 << b\n\n # We concatenated all of the states, now we need to get the max\n # value for each j in both\n Ms = Ms.reshape((len(Ms) // m), m)\n return Ms.max(axis=0)\n\n\ndef estimate_count(Ms, b):\n m = 1 << b\n\n # Combine one last time\n M = reduce_state(Ms, b)\n\n # Estimate cardinality, no adjustments\n alpha = 0.7213 / (1 + 1.079 / m)\n E = alpha * m / (2.0 ** -(M.astype(\"f8\"))).sum() * m\n # ^^^^ starts as unsigned, need a signed type for\n # negation operator to do something useful\n\n # Apply adjustments for small / big cardinalities, if applicable\n if E < 2.5 * m:\n V = (M == 0).sum()\n if V:\n return m * np.log(m / V)\n if E > 2 ** 32 / 30.0:\n return -(2 ** 32) * np.log1p(-E / 2 ** 32)\n return E", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py_from_datetime_import_date__IndexerBase._make_meta.if_cindexer_is_None_.else_.return.self__meta_indexer_cin": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py_from_datetime_import_date__IndexerBase._make_meta.if_cindexer_is_None_.else_.return.self__meta_indexer_cin", "embedding": null, "metadata": {"file_path": "dask/dataframe/indexing.py", "file_name": "indexing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 35, "span_ids": ["imports", "_IndexerBase._name", "_IndexerBase", "_IndexerBase.__init__", "_IndexerBase._meta_indexer", "_IndexerBase._make_meta"], "tokens": 180}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from datetime import datetime\nfrom collections import defaultdict\n\nimport bisect\nimport numpy as np\nimport pandas as pd\n\nfrom .core import new_dd_object, Series\nfrom ..array.core import Array\nfrom .utils import is_index_like, meta_nonempty\nfrom . import methods\nfrom ..base import tokenize\nfrom ..highlevelgraph import HighLevelGraph\n\n\nclass _IndexerBase(object):\n def __init__(self, obj):\n self.obj = obj\n\n @property\n def _name(self):\n return self.obj._name\n\n @property\n def _meta_indexer(self):\n raise NotImplementedError\n\n def _make_meta(self, iindexer, cindexer):\n \"\"\"\n get metadata\n \"\"\"\n if cindexer is None:\n return self.obj\n else:\n return self._meta_indexer[:, cindexer]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py__iLocIndexer__iLocIndexer._iloc.return.self_obj_map_partitions_m": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py__iLocIndexer__iLocIndexer._iloc.return.self_obj_map_partitions_m", "embedding": null, "metadata": {"file_path": "dask/dataframe/indexing.py", "file_name": "indexing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 38, "end_line": 73, "span_ids": ["_iLocIndexer._iloc", "_iLocIndexer", "_iLocIndexer.__getitem__", "_iLocIndexer._meta_indexer"], "tokens": 265}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _iLocIndexer(_IndexerBase):\n @property\n def _meta_indexer(self):\n return self.obj._meta.iloc\n\n def __getitem__(self, key):\n\n # dataframe\n msg = (\n \"'DataFrame.iloc' only supports selecting columns. \"\n \"It must be used like 'df.iloc[:, column_indexer]'.\"\n )\n if not isinstance(key, tuple):\n raise NotImplementedError(msg)\n\n if len(key) > 2:\n raise ValueError(\"Too many indexers\")\n\n iindexer, cindexer = key\n\n if iindexer != slice(None):\n raise NotImplementedError(msg)\n\n if not self.obj.columns.is_unique:\n # if there are any duplicate column names, do an iloc\n return self._iloc(iindexer, cindexer)\n else:\n # otherwise dispatch to dask.dataframe.core.DataFrame.__getitem__\n col_names = self.obj.columns[cindexer]\n return self.obj.__getitem__(col_names)\n\n def _iloc(self, iindexer, cindexer):\n assert iindexer == slice(None)\n meta = self._make_meta(iindexer, cindexer)\n\n return self.obj.map_partitions(methods.iloc, cindexer, meta=meta)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py__LocIndexer__LocIndexer.__getitem__.return.self__loc_iindexer_cinde": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py__LocIndexer__LocIndexer.__getitem__.return.self__loc_iindexer_cinde", "embedding": null, "metadata": {"file_path": "dask/dataframe/indexing.py", "file_name": "indexing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 76, "end_line": 98, "span_ids": ["_LocIndexer.__getitem__", "_LocIndexer._meta_indexer", "_LocIndexer"], "tokens": 161}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _LocIndexer(_IndexerBase):\n \"\"\" Helper class for the .loc accessor \"\"\"\n\n @property\n def _meta_indexer(self):\n return self.obj._meta.loc\n\n def __getitem__(self, key):\n\n if isinstance(key, tuple):\n # multi-dimensional selection\n if len(key) > self.obj.ndim:\n # raise from pandas\n msg = \"Too many indexers\"\n raise pd.core.indexing.IndexingError(msg)\n\n iindexer = key[0]\n cindexer = key[1]\n else:\n # if self.obj is Series, cindexer is always None\n iindexer = key\n cindexer = None\n return self._loc(iindexer, cindexer)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py__LocIndexer._loc__LocIndexer._loc.if_self_obj_known_divisio.else_.return.self_obj_map_partitions_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py__LocIndexer._loc__LocIndexer._loc.if_self_obj_known_divisio.else_.return.self_obj_map_partitions_", "embedding": null, "metadata": {"file_path": "dask/dataframe/indexing.py", "file_name": "indexing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 100, "end_line": 131, "span_ids": ["_LocIndexer._loc"], "tokens": 317}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _LocIndexer(_IndexerBase):\n\n def _loc(self, iindexer, cindexer):\n \"\"\" Helper function for the .loc accessor \"\"\"\n if isinstance(iindexer, Series):\n return self._loc_series(iindexer, cindexer)\n elif isinstance(iindexer, Array):\n return self._loc_array(iindexer, cindexer)\n elif callable(iindexer):\n return self._loc(iindexer(self.obj), cindexer)\n\n if self.obj.known_divisions:\n iindexer = self._maybe_partial_time_string(iindexer)\n\n if isinstance(iindexer, slice):\n return self._loc_slice(iindexer, cindexer)\n elif isinstance(iindexer, (list, np.ndarray)):\n return self._loc_list(iindexer, cindexer)\n else:\n # element should raise KeyError\n return self._loc_element(iindexer, cindexer)\n else:\n if isinstance(iindexer, (list, np.ndarray)):\n # applying map_pattition to each partitions\n # results in duplicated NaN rows\n msg = \"Cannot index with list against unknown division\"\n raise KeyError(msg)\n elif not isinstance(iindexer, slice):\n iindexer = slice(iindexer, iindexer)\n\n meta = self._make_meta(iindexer, cindexer)\n return self.obj.map_partitions(\n methods.try_loc, iindexer, cindexer, meta=meta\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py__LocIndexer._maybe_partial_time_string__LocIndexer._loc_array.return.self__loc_series_iindexer": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py__LocIndexer._maybe_partial_time_string__LocIndexer._loc_array.return.self__loc_series_iindexer", "embedding": null, "metadata": {"file_path": "dask/dataframe/indexing.py", "file_name": "indexing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 133, "end_line": 150, "span_ids": ["_LocIndexer._loc_array", "_LocIndexer._maybe_partial_time_string", "_LocIndexer._loc_series"], "tokens": 197}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _LocIndexer(_IndexerBase):\n\n def _maybe_partial_time_string(self, iindexer):\n \"\"\"\n Convert index-indexer for partial time string slicing\n if obj.index is DatetimeIndex / PeriodIndex\n \"\"\"\n idx = meta_nonempty(self.obj._meta.index)\n iindexer = _maybe_partial_time_string(idx, iindexer, kind=\"loc\")\n return iindexer\n\n def _loc_series(self, iindexer, cindexer):\n meta = self._make_meta(iindexer, cindexer)\n return self.obj.map_partitions(\n methods.loc, iindexer, cindexer, token=\"loc-series\", meta=meta\n )\n\n def _loc_array(self, iindexer, cindexer):\n iindexer_series = iindexer.to_dask_dataframe(\"_\", self.obj.index)\n return self._loc_series(iindexer_series, cindexer)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py__LocIndexer._loc_list__LocIndexer._loc_list.return.new_dd_object_graph_name": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py__LocIndexer._loc_list__LocIndexer._loc_list.return.new_dd_object_graph_name", "embedding": null, "metadata": {"file_path": "dask/dataframe/indexing.py", "file_name": "indexing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 152, "end_line": 172, "span_ids": ["_LocIndexer._loc_list"], "tokens": 244}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _LocIndexer(_IndexerBase):\n\n def _loc_list(self, iindexer, cindexer):\n name = \"loc-%s\" % tokenize(iindexer, self.obj)\n parts = self._get_partitions(iindexer)\n meta = self._make_meta(iindexer, cindexer)\n\n if len(iindexer):\n dsk = {}\n divisions = []\n items = sorted(parts.items())\n for i, (div, indexer) in enumerate(items):\n dsk[name, i] = (methods.loc, (self._name, div), indexer, cindexer)\n # append minimum value as division\n divisions.append(sorted(indexer)[0])\n # append maximum value of the last division\n divisions.append(sorted(items[-1][1])[-1])\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self.obj])\n else:\n divisions = [None, None]\n dsk = {(name, 0): meta.head(0)}\n graph = HighLevelGraph.from_collections(name, dsk)\n return new_dd_object(graph, name, meta=meta, divisions=divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py__LocIndexer._loc_element__LocIndexer._coerce_loc_index.return._coerce_loc_index_self_ob": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py__LocIndexer._loc_element__LocIndexer._coerce_loc_index.return._coerce_loc_index_self_ob", "embedding": null, "metadata": {"file_path": "dask/dataframe/indexing.py", "file_name": "indexing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 174, "end_line": 202, "span_ids": ["_LocIndexer._loc_element", "_LocIndexer._coerce_loc_index", "_LocIndexer._get_partitions"], "tokens": 280}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _LocIndexer(_IndexerBase):\n\n def _loc_element(self, iindexer, cindexer):\n name = \"loc-%s\" % tokenize(iindexer, self.obj)\n part = self._get_partitions(iindexer)\n\n if iindexer < self.obj.divisions[0] or iindexer > self.obj.divisions[-1]:\n raise KeyError(\"the label [%s] is not in the index\" % str(iindexer))\n\n dsk = {\n (name, 0): (\n methods.loc,\n (self._name, part),\n slice(iindexer, iindexer),\n cindexer,\n )\n }\n\n meta = self._make_meta(iindexer, cindexer)\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self.obj])\n return new_dd_object(graph, name, meta=meta, divisions=[iindexer, iindexer])\n\n def _get_partitions(self, keys):\n if isinstance(keys, (list, np.ndarray)):\n return _partitions_of_index_values(self.obj.divisions, keys)\n else:\n # element\n return _partition_of_index_value(self.obj.divisions, keys)\n\n def _coerce_loc_index(self, key):\n return _coerce_loc_index(self.obj.divisions, key)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py__LocIndexer._loc_slice__LocIndexer._loc_slice.return.new_dd_object_graph_name": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py__LocIndexer._loc_slice__LocIndexer._loc_slice.return.new_dd_object_graph_name", "embedding": null, "metadata": {"file_path": "dask/dataframe/indexing.py", "file_name": "indexing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 204, "end_line": 283, "span_ids": ["_LocIndexer._loc_slice"], "tokens": 625}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _LocIndexer(_IndexerBase):\n\n def _loc_slice(self, iindexer, cindexer):\n name = \"loc-%s\" % tokenize(iindexer, cindexer, self)\n\n assert isinstance(iindexer, slice)\n assert iindexer.step in (None, 1)\n\n if iindexer.start is not None:\n start = self._get_partitions(iindexer.start)\n else:\n start = 0\n if iindexer.stop is not None:\n stop = self._get_partitions(iindexer.stop)\n else:\n stop = self.obj.npartitions - 1\n\n if iindexer.start is None and self.obj.known_divisions:\n istart = self.obj.divisions[0]\n else:\n istart = self._coerce_loc_index(iindexer.start)\n if iindexer.stop is None and self.obj.known_divisions:\n istop = self.obj.divisions[-1]\n else:\n istop = self._coerce_loc_index(iindexer.stop)\n\n if stop == start:\n dsk = {\n (name, 0): (\n methods.loc,\n (self._name, start),\n slice(iindexer.start, iindexer.stop),\n cindexer,\n )\n }\n divisions = [istart, istop]\n else:\n dsk = {\n (name, 0): (\n methods.loc,\n (self._name, start),\n slice(iindexer.start, None),\n cindexer,\n )\n }\n for i in range(1, stop - start):\n if cindexer is None:\n dsk[name, i] = (self._name, start + i)\n else:\n dsk[name, i] = (\n methods.loc,\n (self._name, start + i),\n slice(None, None),\n cindexer,\n )\n\n dsk[name, stop - start] = (\n methods.loc,\n (self._name, stop),\n slice(None, iindexer.stop),\n cindexer,\n )\n\n if iindexer.start is None:\n div_start = self.obj.divisions[0]\n else:\n div_start = max(istart, self.obj.divisions[start])\n\n if iindexer.stop is None:\n div_stop = self.obj.divisions[-1]\n else:\n div_stop = min(istop, self.obj.divisions[stop + 1])\n\n divisions = (\n (div_start,) + self.obj.divisions[start + 1 : stop + 1] + (div_stop,)\n )\n\n assert len(divisions) == len(dsk) + 1\n\n meta = self._make_meta(iindexer, cindexer)\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self.obj])\n return new_dd_object(graph, name, meta=meta, divisions=divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py__partition_of_index_value__partition_of_index_value.return.min_len_divisions_2_m": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py__partition_of_index_value__partition_of_index_value.return.min_len_divisions_2_m", "embedding": null, "metadata": {"file_path": "dask/dataframe/indexing.py", "file_name": "indexing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 286, "end_line": 303, "span_ids": ["_partition_of_index_value"], "tokens": 197}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _partition_of_index_value(divisions, val):\n \"\"\"In which partition does this value lie?\n\n >>> _partition_of_index_value([0, 5, 10], 3)\n 0\n >>> _partition_of_index_value([0, 5, 10], 8)\n 1\n >>> _partition_of_index_value([0, 5, 10], 100)\n 1\n >>> _partition_of_index_value([0, 5, 10], 5) # left-inclusive divisions\n 1\n \"\"\"\n if divisions[0] is None:\n msg = \"Can not use loc on DataFrame without known divisions\"\n raise ValueError(msg)\n val = _coerce_loc_index(divisions, val)\n i = bisect.bisect_right(divisions, val)\n return min(len(divisions) - 2, max(0, i - 1))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py__partitions_of_index_values__partitions_of_index_values.return.results": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py__partitions_of_index_values__partitions_of_index_values.return.results", "embedding": null, "metadata": {"file_path": "dask/dataframe/indexing.py", "file_name": "indexing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 306, "end_line": 326, "span_ids": ["_partitions_of_index_values"], "tokens": 215}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _partitions_of_index_values(divisions, values):\n \"\"\"Return defaultdict of division and values pairs\n Each key corresponds to the division which values are index values belong\n to the division.\n\n >>> sorted(_partitions_of_index_values([0, 5, 10], [3]).items())\n [(0, [3])]\n >>> sorted(_partitions_of_index_values([0, 5, 10], [3, 8, 5]).items())\n [(0, [3]), (1, [8, 5])]\n \"\"\"\n if divisions[0] is None:\n msg = \"Can not use loc on DataFrame without known divisions\"\n raise ValueError(msg)\n\n results = defaultdict(list)\n values = pd.Index(values, dtype=object)\n for val in values:\n i = bisect.bisect_right(divisions, val)\n div = min(len(divisions) - 2, max(0, i - 1))\n results[div].append(val)\n return results", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py__coerce_loc_index_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py__coerce_loc_index_", "embedding": null, "metadata": {"file_path": "dask/dataframe/indexing.py", "file_name": "indexing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 329, "end_line": 370, "span_ids": ["_coerce_loc_index", "_maybe_partial_time_string"], "tokens": 304}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _coerce_loc_index(divisions, o):\n \"\"\"Transform values to be comparable against divisions\n\n This is particularly valuable to use with pandas datetimes\n \"\"\"\n if divisions and isinstance(divisions[0], datetime):\n return pd.Timestamp(o)\n if divisions and isinstance(divisions[0], np.datetime64):\n return np.datetime64(o).astype(divisions[0].dtype)\n return o\n\n\ndef _maybe_partial_time_string(index, indexer, kind):\n \"\"\"\n Convert indexer for partial string selection\n if data has DatetimeIndex/PeriodIndex\n \"\"\"\n # do not pass dd.Index\n assert is_index_like(index)\n\n if not isinstance(index, (pd.DatetimeIndex, pd.PeriodIndex)):\n return indexer\n\n if isinstance(indexer, slice):\n if isinstance(indexer.start, str):\n start = index._maybe_cast_slice_bound(indexer.start, \"left\", kind)\n else:\n start = indexer.start\n\n if isinstance(indexer.stop, str):\n stop = index._maybe_cast_slice_bound(indexer.stop, \"right\", kind)\n else:\n stop = indexer.stop\n return slice(start, stop)\n\n elif isinstance(indexer, str):\n start = index._maybe_cast_slice_bound(indexer, \"left\", \"loc\")\n stop = index._maybe_cast_slice_bound(indexer, \"right\", \"loc\")\n return slice(min(start, stop), max(start, stop))\n\n return indexer", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/__init__.py_from_array_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/__init__.py_from_array_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 23, "span_ids": ["imports"], "tokens": 133}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from .io import (\n from_array,\n from_bcolz,\n from_array,\n from_bcolz,\n from_pandas,\n from_dask_array,\n from_delayed,\n dataframe_from_ctable,\n to_bag,\n to_records,\n)\nfrom .csv import read_csv, to_csv, read_table, read_fwf\nfrom .hdf import read_hdf, to_hdf\nfrom .sql import read_sql_table, to_sql\nfrom .json import read_json, to_json\nfrom . import demo\n\ntry:\n from .parquet import read_parquet, to_parquet\nexcept ImportError:\n pass", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_from_os_path_import_basen_from_fsspec_compression_i": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_from_os_path_import_basen_from_fsspec_compression_i", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/csv.py", "file_name": "csv.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 33, "span_ids": ["imports"], "tokens": 188}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from os.path import basename\nfrom collections.abc import Mapping\nfrom io import BytesIO\nfrom warnings import warn, catch_warnings, simplefilter\n\ntry:\n import psutil\nexcept ImportError:\n psutil = None\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.api.types import (\n is_integer_dtype,\n is_float_dtype,\n is_object_dtype,\n is_datetime64_any_dtype,\n CategoricalDtype,\n)\n\nfrom ...base import tokenize\n\n# this import checks for the importability of fsspec\nfrom ...bytes import read_bytes, open_file, open_files\nfrom ..core import new_dd_object\nfrom ...core import flatten\nfrom ...delayed import delayed\nfrom ...utils import asciitable, parse_bytes\nfrom ..utils import clear_known_categories\nfrom ...blockwise import Blockwise\n\nimport fsspec.implementations.local\nfrom fsspec.compression import compr", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_CSVSubgraph_CSVSubgraph.__init__.self_colname_self_paths_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_CSVSubgraph_CSVSubgraph.__init__.self_colname_self_paths_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/csv.py", "file_name": "csv.py", "file_type": "text/x-python", "category": "implementation", "start_line": 35, "end_line": 64, "span_ids": ["CSVSubgraph.__init__", "CSVSubgraph"], "tokens": 157}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class CSVSubgraph(Mapping):\n \"\"\"\n Subgraph for reading CSV files.\n \"\"\"\n\n def __init__(\n self,\n name,\n reader,\n blocks,\n is_first,\n head,\n header,\n kwargs,\n dtypes,\n columns,\n enforce,\n path,\n ):\n self.name = name\n self.reader = reader\n self.blocks = blocks\n self.is_first = is_first\n self.head = head # example pandas DF for metadata\n self.header = header # prepend to all blocks\n self.kwargs = kwargs\n self.dtypes = dtypes\n self.columns = columns\n self.enforce = enforce\n self.colname, self.paths = path or (None, None)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_CSVSubgraph.__getitem___CSVSubgraph.__iter__.for_i_in_range_len_self_.yield_self_name_i_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_CSVSubgraph.__getitem___CSVSubgraph.__iter__.for_i_in_range_len_self_.yield_self_name_i_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/csv.py", "file_name": "csv.py", "file_type": "text/x-python", "category": "implementation", "start_line": 66, "end_line": 110, "span_ids": ["CSVSubgraph.__iter__", "CSVSubgraph.__len__", "CSVSubgraph.__getitem__"], "tokens": 239}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class CSVSubgraph(Mapping):\n\n def __getitem__(self, key):\n try:\n name, i = key\n except ValueError:\n # too many / few values to unpack\n raise KeyError(key) from None\n\n if name != self.name:\n raise KeyError(key)\n\n if i < 0 or i >= len(self.blocks):\n raise KeyError(key)\n\n block = self.blocks[i]\n\n if self.paths is not None:\n path_info = (self.colname, self.paths[i], self.paths)\n else:\n path_info = None\n\n write_header = False\n rest_kwargs = self.kwargs.copy()\n if not self.is_first[i]:\n write_header = True\n rest_kwargs.pop(\"skiprows\", None)\n\n return (\n pandas_read_text,\n self.reader,\n block,\n self.header,\n rest_kwargs,\n self.dtypes,\n self.columns,\n write_header,\n self.enforce,\n path_info,\n )\n\n def __len__(self):\n return len(self.blocks)\n\n def __iter__(self):\n for i in range(len(self)):\n yield (self.name, i)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_pandas_read_text_pandas_read_text.return.df": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_pandas_read_text_pandas_read_text.return.df", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/csv.py", "file_name": "csv.py", "file_type": "text/x-python", "category": "implementation", "start_line": 113, "end_line": 164, "span_ids": ["pandas_read_text"], "tokens": 331}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def pandas_read_text(\n reader,\n b,\n header,\n kwargs,\n dtypes=None,\n columns=None,\n write_header=True,\n enforce=False,\n path=None,\n):\n \"\"\"Convert a block of bytes to a Pandas DataFrame\n\n Parameters\n ----------\n reader : callable\n ``pd.read_csv`` or ``pd.read_table``.\n b : bytestring\n The content to be parsed with ``reader``\n header : bytestring\n An optional header to prepend to ``b``\n kwargs : dict\n A dictionary of keyword arguments to be passed to ``reader``\n dtypes : dict\n DTypes to assign to columns\n path : tuple\n A tuple containing path column name, path to file, and all paths.\n\n See Also\n --------\n dask.dataframe.csv.read_pandas_from_bytes\n \"\"\"\n bio = BytesIO()\n if write_header and not b.startswith(header.rstrip()):\n bio.write(header)\n bio.write(b)\n bio.seek(0)\n df = reader(bio, **kwargs)\n if dtypes:\n coerce_dtypes(df, dtypes)\n\n if enforce and columns and (list(df.columns) != list(columns)):\n raise ValueError(\"Columns do not match\", df.columns, columns)\n elif columns:\n df.columns = columns\n if path:\n colname, path, paths = path\n code = paths.index(path)\n df = df.assign(\n **{colname: pd.Categorical.from_codes(np.full(len(df), code), paths)}\n )\n return df", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_coerce_dtypes_coerce_dtypes.if_bad_dtypes_or_bad_date.raise_ValueError_msg_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_coerce_dtypes_coerce_dtypes.if_bad_dtypes_or_bad_date.raise_ValueError_msg_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/csv.py", "file_name": "csv.py", "file_type": "text/x-python", "category": "implementation", "start_line": 167, "end_line": 256, "span_ids": ["coerce_dtypes"], "tokens": 809}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def coerce_dtypes(df, dtypes):\n \"\"\"Coerce dataframe to dtypes safely\n\n Operates in place\n\n Parameters\n ----------\n df: Pandas DataFrame\n dtypes: dict like {'x': float}\n \"\"\"\n bad_dtypes = []\n bad_dates = []\n errors = []\n for c in df.columns:\n if c in dtypes and df.dtypes[c] != dtypes[c]:\n actual = df.dtypes[c]\n desired = dtypes[c]\n if is_float_dtype(actual) and is_integer_dtype(desired):\n bad_dtypes.append((c, actual, desired))\n elif is_object_dtype(actual) and is_datetime64_any_dtype(desired):\n # This can only occur when parse_dates is specified, but an\n # invalid date is encountered. Pandas then silently falls back\n # to object dtype. Since `object_array.astype(datetime)` will\n # silently overflow, error here and report.\n bad_dates.append(c)\n else:\n try:\n df[c] = df[c].astype(dtypes[c])\n except Exception as e:\n bad_dtypes.append((c, actual, desired))\n errors.append((c, e))\n\n if bad_dtypes:\n if errors:\n ex = \"\\n\".join(\n \"- %s\\n %r\" % (c, e)\n for c, e in sorted(errors, key=lambda x: str(x[0]))\n )\n exceptions = (\n \"The following columns also raised exceptions on \"\n \"conversion:\\n\\n%s\\n\\n\"\n ) % ex\n extra = \"\"\n else:\n exceptions = \"\"\n # All mismatches are int->float, also suggest `assume_missing=True`\n extra = (\n \"\\n\\nAlternatively, provide `assume_missing=True` \"\n \"to interpret\\n\"\n \"all unspecified integer columns as floats.\"\n )\n\n bad_dtypes = sorted(bad_dtypes, key=lambda x: str(x[0]))\n table = asciitable([\"Column\", \"Found\", \"Expected\"], bad_dtypes)\n dtype_kw = \"dtype={%s}\" % \",\\n \".join(\n \"%r: '%s'\" % (k, v) for (k, v, _) in bad_dtypes\n )\n\n dtype_msg = (\n \"{table}\\n\\n\"\n \"{exceptions}\"\n \"Usually this is due to dask's dtype inference failing, and\\n\"\n \"*may* be fixed by specifying dtypes manually by adding:\\n\\n\"\n \"{dtype_kw}\\n\\n\"\n \"to the call to `read_csv`/`read_table`.\"\n \"{extra}\"\n ).format(table=table, exceptions=exceptions, dtype_kw=dtype_kw, extra=extra)\n else:\n dtype_msg = None\n\n if bad_dates:\n also = \" also \" if bad_dtypes else \" \"\n cols = \"\\n\".join(\"- %s\" % c for c in bad_dates)\n date_msg = (\n \"The following columns{also}failed to properly parse as dates:\\n\\n\"\n \"{cols}\\n\\n\"\n \"This is usually due to an invalid value in that column. To\\n\"\n \"diagnose and fix it's recommended to drop these columns from the\\n\"\n \"`parse_dates` keyword, and manually convert them to dates later\\n\"\n \"using `dd.to_datetime`.\"\n ).format(also=also, cols=cols)\n else:\n date_msg = None\n\n if bad_dtypes or bad_dates:\n rule = \"\\n\\n%s\\n\\n\" % (\"-\" * 61)\n msg = \"Mismatched dtypes found in `pd.read_csv`/`pd.read_table`.\\n\\n%s\" % (\n rule.join(filter(None, [dtype_msg, date_msg]))\n )\n raise ValueError(msg)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_text_blocks_to_pandas_text_blocks_to_pandas.return.new_dd_object_subgraph_n": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_text_blocks_to_pandas_text_blocks_to_pandas.return.new_dd_object_subgraph_n", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/csv.py", "file_name": "csv.py", "file_type": "text/x-python", "category": "implementation", "start_line": 312, "end_line": 417, "span_ids": ["text_blocks_to_pandas"], "tokens": 735}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def text_blocks_to_pandas(\n reader,\n block_lists,\n header,\n head,\n kwargs,\n enforce=False,\n specified_dtypes=None,\n path=None,\n):\n \"\"\"Convert blocks of bytes to a dask.dataframe\n\n This accepts a list of lists of values of bytes where each list corresponds\n to one file, and the value of bytes concatenate to comprise the entire\n file, in order.\n\n Parameters\n ----------\n reader : callable\n ``pd.read_csv`` or ``pd.read_table``.\n block_lists : list of lists of delayed values of bytes\n The lists of bytestrings where each list corresponds to one logical file\n header : bytestring\n The header, found at the front of the first file, to be prepended to\n all blocks\n head : pd.DataFrame\n An example Pandas DataFrame to be used for metadata.\n kwargs : dict\n Keyword arguments to pass down to ``reader``\n path : tuple, optional\n A tuple containing column name for path and list of all paths\n\n Returns\n -------\n A dask.dataframe\n \"\"\"\n dtypes = head.dtypes.to_dict()\n # dtypes contains only instances of CategoricalDtype, which causes issues\n # in coerce_dtypes for non-uniform categories across partitions.\n # We will modify `dtype` (which is inferred) to\n # 1. contain instances of CategoricalDtypes for user-provided types\n # 2. contain 'category' for data inferred types\n categoricals = head.select_dtypes(include=[\"category\"]).columns\n\n known_categoricals = []\n unknown_categoricals = categoricals\n if isinstance(specified_dtypes, Mapping):\n known_categoricals = [\n k\n for k in categoricals\n if isinstance(specified_dtypes.get(k), CategoricalDtype)\n and specified_dtypes.get(k).categories is not None\n ]\n unknown_categoricals = categoricals.difference(known_categoricals)\n elif (\n isinstance(specified_dtypes, CategoricalDtype)\n and specified_dtypes.categories is None\n ):\n known_categoricals = []\n unknown_categoricals = categoricals\n\n # Fixup the dtypes\n for k in unknown_categoricals:\n dtypes[k] = \"category\"\n\n columns = list(head.columns)\n\n blocks = tuple(flatten(block_lists))\n # Create mask of first blocks from nested block_lists\n is_first = tuple(block_mask(block_lists))\n\n name = \"read-csv-\" + tokenize(reader, columns, enforce, head)\n\n if path:\n block_file_names = [basename(b[1].path) for b in blocks]\n path = (\n path[0],\n [p for p in path[1] if basename(p) in block_file_names],\n )\n\n colname, paths = path\n head = head.assign(\n **{\n colname: pd.Categorical.from_codes(\n np.zeros(len(head), dtype=int), paths\n )\n }\n )\n if len(unknown_categoricals):\n head = clear_known_categories(head, cols=unknown_categoricals)\n\n subgraph = BlockwiseReadCSV(\n name,\n reader,\n blocks,\n is_first,\n head,\n header,\n kwargs,\n dtypes,\n columns,\n enforce,\n path,\n )\n\n return new_dd_object(subgraph, name, head, (None,) * (len(blocks) + 1))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_block_mask_if_psutil_is_not_None_.else_.AUTO_BLOCKSIZE.2_25": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_block_mask_if_psutil_is_not_None_.else_.AUTO_BLOCKSIZE.2_25", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/csv.py", "file_name": "csv.py", "file_type": "text/x-python", "category": "implementation", "start_line": 367, "end_line": 396, "span_ids": ["auto_blocksize", "impl:5", "block_mask"], "tokens": 233}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def block_mask(block_lists):\n \"\"\"\n Yields a flat iterable of booleans to mark the zeroth elements of the\n nested input ``block_lists`` in a flattened output.\n\n >>> list(block_mask([[1, 2], [3, 4], [5]]))\n [True, False, True, False, True]\n \"\"\"\n for block in block_lists:\n if not block:\n continue\n yield True\n yield from (False for _ in block[1:])\n\n\ndef auto_blocksize(total_memory, cpu_count):\n memory_factor = 10\n blocksize = int(total_memory // cpu_count / memory_factor)\n return min(blocksize, int(64e6))\n\n\n# guess blocksize if psutil is installed or use acceptable default one if not\nif psutil is not None:\n with catch_warnings():\n simplefilter(\"ignore\", RuntimeWarning)\n TOTAL_MEM = psutil.virtual_memory().total\n CPU_COUNT = psutil.cpu_count()\n AUTO_BLOCKSIZE = auto_blocksize(TOTAL_MEM, CPU_COUNT)\nelse:\n AUTO_BLOCKSIZE = 2 ** 25", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_read_pandas_read_pandas.b_lineterminator.lineterminator_encode_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_read_pandas_read_pandas.b_lineterminator.lineterminator_encode_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/csv.py", "file_name": "csv.py", "file_type": "text/x-python", "category": "implementation", "start_line": 399, "end_line": 478, "span_ids": ["read_pandas"], "tokens": 759}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def read_pandas(\n reader,\n urlpath,\n blocksize=\"default\",\n lineterminator=None,\n compression=None,\n sample=256000,\n enforce=False,\n assume_missing=False,\n storage_options=None,\n include_path_column=False,\n **kwargs,\n):\n reader_name = reader.__name__\n if lineterminator is not None and len(lineterminator) == 1:\n kwargs[\"lineterminator\"] = lineterminator\n else:\n lineterminator = \"\\n\"\n if include_path_column and isinstance(include_path_column, bool):\n include_path_column = \"path\"\n if \"index\" in kwargs or \"index_col\" in kwargs:\n raise ValueError(\n \"Keywords 'index' and 'index_col' not supported. \"\n \"Use dd.{0}(...).set_index('my-index') \"\n \"instead\".format(reader_name)\n )\n for kw in [\"iterator\", \"chunksize\"]:\n if kw in kwargs:\n raise ValueError(\"{0} not supported for dd.{1}\".format(kw, reader_name))\n if kwargs.get(\"nrows\", None):\n raise ValueError(\n \"The 'nrows' keyword is not supported by \"\n \"`dd.{0}`. To achieve the same behavior, it's \"\n \"recommended to use `dd.{0}(...).\"\n \"head(n=nrows)`\".format(reader_name)\n )\n if isinstance(kwargs.get(\"skiprows\"), int):\n skiprows = lastskiprow = firstrow = kwargs.get(\"skiprows\")\n elif kwargs.get(\"skiprows\") is None:\n skiprows = lastskiprow = firstrow = 0\n else:\n # When skiprows is a list, we expect more than max(skiprows) to\n # be included in the sample. This means that [0,2] will work well,\n # but [0, 440] might not work.\n skiprows = set(kwargs.get(\"skiprows\"))\n lastskiprow = max(skiprows)\n # find the firstrow that is not skipped, for use as header\n firstrow = min(set(range(len(skiprows) + 1)) - set(skiprows))\n if isinstance(kwargs.get(\"header\"), list):\n raise TypeError(\n \"List of header rows not supported for dd.{0}\".format(reader_name)\n )\n if isinstance(kwargs.get(\"converters\"), dict) and include_path_column:\n path_converter = kwargs.get(\"converters\").get(include_path_column, None)\n else:\n path_converter = None\n\n if blocksize == \"default\":\n blocksize = AUTO_BLOCKSIZE\n if isinstance(blocksize, str):\n blocksize = parse_bytes(blocksize)\n if blocksize and compression:\n # NONE of the compressions should use chunking\n warn(\n \"Warning %s compression does not support breaking apart files\\n\"\n \"Please ensure that each individual file can fit in memory and\\n\"\n \"use the keyword ``blocksize=None to remove this message``\\n\"\n \"Setting ``blocksize=None``\" % compression\n )\n blocksize = None\n if compression not in compr:\n raise NotImplementedError(\"Compression format %s not installed\" % compression)\n if blocksize and sample and blocksize < sample and lastskiprow != 0:\n warn(\n \"Unexpected behavior can result from passing skiprows when\\n\"\n \"blocksize is smaller than sample size.\\n\"\n \"Setting ``sample=blocksize``\"\n )\n sample = blocksize\n b_lineterminator = lineterminator.encode()\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_read_pandas.b_out_read_pandas.return.text_blocks_to_pandas_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_read_pandas.b_out_read_pandas.return.text_blocks_to_pandas_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/csv.py", "file_name": "csv.py", "file_type": "text/x-python", "category": "implementation", "start_line": 479, "end_line": 554, "span_ids": ["read_pandas"], "tokens": 738}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def read_pandas(\n reader,\n urlpath,\n blocksize=\"default\",\n lineterminator=None,\n compression=None,\n sample=256000,\n enforce=False,\n assume_missing=False,\n storage_options=None,\n include_path_column=False,\n **kwargs,\n):\n # ... other code\n b_out = read_bytes(\n urlpath,\n delimiter=b_lineterminator,\n blocksize=blocksize,\n sample=sample,\n compression=compression,\n include_path=include_path_column,\n **(storage_options or {}),\n )\n\n if include_path_column:\n b_sample, values, paths = b_out\n if path_converter:\n paths = [path_converter(path) for path in paths]\n path = (include_path_column, paths)\n else:\n b_sample, values = b_out\n path = None\n\n if not isinstance(values[0], (tuple, list)):\n values = [values]\n # If we have not sampled, then use the first row of the first values\n # as a representative sample.\n if b_sample is False and len(values[0]):\n b_sample = values[0][0].compute()\n\n # Get header row, and check that sample is long enough. If the file\n # contains a header row, we need at least 2 nonempty rows + the number of\n # rows to skip.\n names = kwargs.get(\"names\", None)\n header = kwargs.get(\"header\", \"infer\" if names is None else None)\n need = 1 if header is None else 2\n parts = b_sample.split(b_lineterminator, lastskiprow + need)\n # If the last partition is empty, don't count it\n nparts = 0 if not parts else len(parts) - int(not parts[-1])\n\n if sample is not False and nparts < lastskiprow + need and len(b_sample) >= sample:\n raise ValueError(\n \"Sample is not large enough to include at least one \"\n \"row of data. Please increase the number of bytes \"\n \"in `sample` in the call to `read_csv`/`read_table`\"\n )\n\n header = b\"\" if header is None else parts[firstrow] + b_lineterminator\n\n # Use sample to infer dtypes and check for presence of include_path_column\n head = reader(BytesIO(b_sample), **kwargs)\n if include_path_column and (include_path_column in head.columns):\n raise ValueError(\n \"Files already contain the column name: %s, so the \"\n \"path column cannot use this name. Please set \"\n \"`include_path_column` to a unique name.\" % include_path_column\n )\n\n specified_dtypes = kwargs.get(\"dtype\", {})\n if specified_dtypes is None:\n specified_dtypes = {}\n # If specified_dtypes is a single type, then all columns were specified\n if assume_missing and isinstance(specified_dtypes, dict):\n # Convert all non-specified integer columns to floats\n for c in head.columns:\n if is_integer_dtype(head[c].dtype) and c not in specified_dtypes:\n head[c] = head[c].astype(float)\n\n values = [[dsk.dask.values() for dsk in block] for block in values]\n\n return text_blocks_to_pandas(\n reader,\n values,\n header,\n head,\n kwargs,\n enforce=enforce,\n specified_dtypes=specified_dtypes,\n path=path,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_READ_DOC_TEMPLATE_READ_DOC_TEMPLATE._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_READ_DOC_TEMPLATE_READ_DOC_TEMPLATE._", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/csv.py", "file_name": "csv.py", "file_type": "text/x-python", "category": "implementation", "start_line": 557, "end_line": 629, "span_ids": ["impl:17"], "tokens": 833}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "READ_DOC_TEMPLATE = \"\"\"\nRead {file_type} files into a Dask.DataFrame\n\nThis parallelizes the :func:`pandas.{reader}` function in the following ways:\n\n- It supports loading many files at once using globstrings:\n\n >>> df = dd.{reader}('myfiles.*.csv') # doctest: +SKIP\n\n- In some cases it can break up large files:\n\n >>> df = dd.{reader}('largefile.csv', blocksize=25e6) # 25MB chunks # doctest: +SKIP\n\n- It can read CSV files from external resources (e.g. S3, HDFS) by\n providing a URL:\n\n >>> df = dd.{reader}('s3://bucket/myfiles.*.csv') # doctest: +SKIP\n >>> df = dd.{reader}('hdfs:///myfiles.*.csv') # doctest: +SKIP\n >>> df = dd.{reader}('hdfs://namenode.example.com/myfiles.*.csv') # doctest: +SKIP\n\nInternally ``dd.{reader}`` uses :func:`pandas.{reader}` and supports many of the\nsame keyword arguments with the same performance guarantees. See the docstring\nfor :func:`pandas.{reader}` for more information on available keyword arguments.\n\nParameters\n----------\nurlpath : string or list\n Absolute or relative filepath(s). Prefix with a protocol like ``s3://``\n to read from alternative filesystems. To read from multiple files you\n can pass a globstring or a list of paths, with the caveat that they\n must all have the same protocol.\nblocksize : str, int or None, optional\n Number of bytes by which to cut up larger files. Default value is computed\n based on available physical memory and the number of cores, up to a maximum\n of 64MB. Can be a number like ``64000000` or a string like ``\"64MB\"``. If\n ``None``, a single block is used for each file.\nsample : int, optional\n Number of bytes to use when determining dtypes\nassume_missing : bool, optional\n If True, all integer columns that aren't specified in ``dtype`` are assumed\n to contain missing values, and are converted to floats. Default is False.\nstorage_options : dict, optional\n Extra options that make sense for a particular storage connection, e.g.\n host, port, username, password, etc.\ninclude_path_column : bool or str, optional\n Whether or not to include the path to each particular file. If True a new\n column is added to the dataframe called ``path``. If str, sets new column\n name. Default is False.\n**kwargs\n Extra keyword arguments to forward to :func:`pandas.{reader}`.\n\nNotes\n-----\nDask dataframe tries to infer the ``dtype`` of each column by reading a sample\nfrom the start of the file (or of the first file if it's a glob). Usually this\nworks fine, but if the ``dtype`` is different later in the file (or in other\nfiles) this can cause issues. For example, if all the rows in the sample had\ninteger dtypes, but later on there was a ``NaN``, then this would error at\ncompute time. To fix this, you have a few options:\n\n- Provide explicit dtypes for the offending columns using the ``dtype``\n keyword. This is the recommended solution.\n\n- Use the ``assume_missing`` keyword to assume that all columns inferred as\n integers contain missing values, and convert them to floats.\n\n- Increase the size of the sample using the ``sample`` keyword.\n\nIt should also be noted that this function may fail if a {file_type} file\nincludes quoted strings that contain the line terminator. To get around this\nyou can specify ``blocksize=None`` to not split files into multiple partitions,\nat the cost of reduced parallelism.\n\"\"\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_make_reader__write_csv.return.None": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_make_reader__write_csv.return.None", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/csv.py", "file_name": "csv.py", "file_type": "text/x-python", "category": "implementation", "start_line": 632, "end_line": 672, "span_ids": ["_write_csv", "make_reader", "impl:19"], "tokens": 260}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def make_reader(reader, reader_name, file_type):\n def read(\n urlpath,\n blocksize=\"default\",\n lineterminator=None,\n compression=None,\n sample=256000,\n enforce=False,\n assume_missing=False,\n storage_options=None,\n include_path_column=False,\n **kwargs,\n ):\n return read_pandas(\n reader,\n urlpath,\n blocksize=blocksize,\n lineterminator=lineterminator,\n compression=compression,\n sample=sample,\n enforce=enforce,\n assume_missing=assume_missing,\n storage_options=storage_options,\n include_path_column=include_path_column,\n **kwargs,\n )\n\n read.__doc__ = READ_DOC_TEMPLATE.format(reader=reader_name, file_type=file_type)\n read.__name__ = reader_name\n return read\n\n\nread_csv = make_reader(pd.read_csv, \"read_csv\", \"CSV\")\nread_table = make_reader(pd.read_table, \"read_table\", \"delimited\")\nread_fwf = make_reader(pd.read_fwf, \"read_fwf\", \"fixed-width\")\n\n\ndef _write_csv(df, fil, *, depend_on=None, **kwargs):\n with fil as f:\n df.to_csv(f, **kwargs)\n return None", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_to_csv_to_csv._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_to_csv_to_csv._", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/csv.py", "file_name": "csv.py", "file_type": "text/x-python", "category": "implementation", "start_line": 675, "end_line": 787, "span_ids": ["to_csv"], "tokens": 955}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def to_csv(\n df,\n filename,\n single_file=False,\n encoding=\"utf-8\",\n mode=\"wt\",\n name_function=None,\n compression=None,\n compute=True,\n scheduler=None,\n storage_options=None,\n header_first_partition_only=None,\n compute_kwargs=None,\n **kwargs,\n):\n \"\"\"\n Store Dask DataFrame to CSV files\n\n One filename per partition will be created. You can specify the\n filenames in a variety of ways.\n\n Use a globstring::\n\n >>> df.to_csv('/path/to/data/export-*.csv') # doctest: +SKIP\n\n The * will be replaced by the increasing sequence 0, 1, 2, ...\n\n ::\n\n /path/to/data/export-0.csv\n /path/to/data/export-1.csv\n\n Use a globstring and a ``name_function=`` keyword argument. The\n name_function function should expect an integer and produce a string.\n Strings produced by name_function must preserve the order of their\n respective partition indices.\n\n >>> from datetime import date, timedelta\n >>> def name(i):\n ... return str(date(2015, 1, 1) + i * timedelta(days=1))\n\n >>> name(0)\n '2015-01-01'\n >>> name(15)\n '2015-01-16'\n\n >>> df.to_csv('/path/to/data/export-*.csv', name_function=name) # doctest: +SKIP\n\n ::\n\n /path/to/data/export-2015-01-01.csv\n /path/to/data/export-2015-01-02.csv\n ...\n\n You can also provide an explicit list of paths::\n\n >>> paths = ['/path/to/data/alice.csv', '/path/to/data/bob.csv', ...] # doctest: +SKIP\n >>> df.to_csv(paths) # doctest: +SKIP\n\n Parameters\n ----------\n df : dask.DataFrame\n Data to save\n filename : string\n Path glob indicating the naming scheme for the output files\n single_file : bool, default False\n Whether to save everything into a single CSV file. Under the\n single file mode, each partition is appended at the end of the\n specified CSV file. Note that not all filesystems support the\n append mode and thus the single file mode, especially on cloud\n storage systems such as S3 or GCS. A warning will be issued when\n writing to a file that is not backed by a local filesystem.\n encoding : string, optional\n A string representing the encoding to use in the output file,\n defaults to 'ascii' on Python 2 and 'utf-8' on Python 3.\n mode : str\n Python write mode, default 'w'\n name_function : callable, default None\n Function accepting an integer (partition index) and producing a\n string to replace the asterisk in the given filename globstring.\n Should preserve the lexicographic order of partitions. Not\n supported when `single_file` is `True`.\n compression : string, optional\n a string representing the compression to use in the output file,\n allowed values are 'gzip', 'bz2', 'xz',\n only used when the first argument is a filename\n compute : bool\n If true, immediately executes. If False, returns a set of delayed\n objects, which can be computed at a later time.\n storage_options : dict\n Parameters passed on to the backend filesystem class.\n header_first_partition_only : boolean, default None\n If set to `True`, only write the header row in the first output\n file. By default, headers are written to all partitions under\n the multiple file mode (`single_file` is `False`) and written\n only once under the single file mode (`single_file` is `True`).\n It must not be `False` under the single file mode.\n compute_kwargs : dict, optional\n Options to be passed in to the compute method\n kwargs : dict, optional\n Additional parameters to pass to `pd.DataFrame.to_csv()`\n\n Returns\n -------\n The names of the file written if they were computed right away\n If not, the delayed tasks associated to the writing of the files\n\n Raises\n ------\n ValueError\n If `header_first_partition_only` is set to `False` or\n `name_function` is specified when `single_file` is `True`.\n \"\"\"\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_to_csv.if_single_file_and_name_f_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_to_csv.if_single_file_and_name_f_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/csv.py", "file_name": "csv.py", "file_type": "text/x-python", "category": "implementation", "start_line": 788, "end_line": 866, "span_ids": ["to_csv", "impl:25"], "tokens": 692}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def to_csv(\n df,\n filename,\n single_file=False,\n encoding=\"utf-8\",\n mode=\"wt\",\n name_function=None,\n compression=None,\n compute=True,\n scheduler=None,\n storage_options=None,\n header_first_partition_only=None,\n compute_kwargs=None,\n **kwargs,\n):\n if single_file and name_function is not None:\n raise ValueError(\"name_function is not supported under the single file mode\")\n if header_first_partition_only is None:\n header_first_partition_only = single_file\n elif not header_first_partition_only and single_file:\n raise ValueError(\n \"header_first_partition_only cannot be False in the single file mode.\"\n )\n file_options = dict(\n compression=compression,\n encoding=encoding,\n newline=\"\",\n **(storage_options or {}),\n )\n to_csv_chunk = delayed(_write_csv, pure=False)\n dfs = df.to_delayed()\n if single_file:\n first_file = open_file(filename, mode=mode, **file_options)\n if not isinstance(first_file.fs, fsspec.implementations.local.LocalFileSystem):\n warn(\"Appending data to a network storage system may not work.\")\n value = to_csv_chunk(dfs[0], first_file, **kwargs)\n append_mode = mode.replace(\"w\", \"\") + \"a\"\n append_file = open_file(filename, mode=append_mode, **file_options)\n kwargs[\"header\"] = False\n for d in dfs[1:]:\n value = to_csv_chunk(d, append_file, depend_on=value, **kwargs)\n values = [value]\n files = [first_file]\n else:\n files = open_files(\n filename,\n mode=mode,\n name_function=name_function,\n num=df.npartitions,\n **file_options,\n )\n values = [to_csv_chunk(dfs[0], files[0], **kwargs)]\n if header_first_partition_only:\n kwargs[\"header\"] = False\n values.extend(\n [to_csv_chunk(d, f, **kwargs) for d, f in zip(dfs[1:], files[1:])]\n )\n if compute:\n if compute_kwargs is None:\n compute_kwargs = dict()\n\n if scheduler is not None:\n warn(\n \"The 'scheduler' keyword argument for `to_csv()` is deprecated and\"\n \"will be removed in a future version. \"\n \"Please use the `compute_kwargs` argument instead. \"\n f\"For example, df.to_csv(..., compute_kwargs={{scheduler: {scheduler}}})\",\n FutureWarning,\n )\n\n if (\n scheduler is not None\n and compute_kwargs.get(\"scheduler\") is not None\n and compute_kwargs.get(\"scheduler\") != scheduler\n ):\n raise ValueError(\n f\"Differing values for 'scheduler' have been passed in.\\n\"\n f\"scheduler argument: {scheduler}\\n\"\n f\"via compute_kwargs: {compute_kwargs.get('scheduler')}\"\n )\n\n if scheduler is not None and compute_kwargs.get(\"scheduler\") is None:\n compute_kwargs[\"scheduler\"] = scheduler\n\n delayed(values).compute(**compute_kwargs)\n return [f.path for f in files]\n else:\n return values\n\n\nfrom ..core import _Frame\n\n_Frame.to_csv.__doc__ = to_csv.__doc__", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/demo.py_pd_make._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/demo.py_pd_make._", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/demo.py", "file_name": "demo.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 64, "span_ids": ["make_float", "impl:3", "imports", "make_string", "impl:5", "make_int", "make_categorical"], "tokens": 305}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pandas as pd\nimport numpy as np\n\nfrom ..core import tokenize, DataFrame\nfrom .io import from_delayed\nfrom ...delayed import delayed\nfrom ...utils import random_state_data\n\n__all__ = [\"make_timeseries\"]\n\n\ndef make_float(n, rstate):\n return rstate.rand(n) * 2 - 1\n\n\ndef make_int(n, rstate, lam=1000):\n return rstate.poisson(lam, size=n)\n\n\nnames = [\n \"Alice\",\n \"Bob\",\n \"Charlie\",\n \"Dan\",\n \"Edith\",\n \"Frank\",\n \"George\",\n \"Hannah\",\n \"Ingrid\",\n \"Jerry\",\n \"Kevin\",\n \"Laura\",\n \"Michael\",\n \"Norbert\",\n \"Oliver\",\n \"Patricia\",\n \"Quinn\",\n \"Ray\",\n \"Sarah\",\n \"Tim\",\n \"Ursula\",\n \"Victor\",\n \"Wendy\",\n \"Xavier\",\n \"Yvonne\",\n \"Zelda\",\n]\n\n\ndef make_string(n, rstate):\n return rstate.choice(names, size=n)\n\n\ndef make_categorical(n, rstate):\n return pd.Categorical.from_codes(rstate.randint(0, len(names), size=n), names)\n\n\nmake = {\n float: make_float,\n int: make_int,\n str: make_string,\n object: make_string,\n \"category\": make_categorical,\n}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/demo.py_make_timeseries_part_make_timeseries_part.return.df": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/demo.py_make_timeseries_part_make_timeseries_part.return.df", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/demo.py", "file_name": "demo.py", "file_type": "text/x-python", "category": "implementation", "start_line": 67, "end_line": 81, "span_ids": ["make_timeseries_part"], "tokens": 161}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def make_timeseries_part(start, end, dtypes, freq, state_data, kwargs):\n index = pd.date_range(start=start, end=end, freq=freq, name=\"timestamp\")\n state = np.random.RandomState(state_data)\n columns = {}\n for k, dt in dtypes.items():\n kws = {\n kk.rsplit(\"_\", 1)[1]: v\n for kk, v in kwargs.items()\n if kk.rsplit(\"_\", 1)[0] == k\n }\n columns[k] = make[dt](len(index), state, **kws)\n df = pd.DataFrame(columns, index=index, columns=sorted(columns))\n if df.index[-1] == end:\n df = df.iloc[:-1]\n return df", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/demo.py_make_timeseries_make_timeseries.return.DataFrame_dsk_name_head": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/demo.py_make_timeseries_make_timeseries.return.DataFrame_dsk_name_head", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/demo.py", "file_name": "demo.py", "file_type": "text/x-python", "category": "implementation", "start_line": 84, "end_line": 146, "span_ids": ["make_timeseries"], "tokens": 624}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def make_timeseries(\n start=\"2000-01-01\",\n end=\"2000-12-31\",\n dtypes={\"name\": str, \"id\": int, \"x\": float, \"y\": float},\n freq=\"10s\",\n partition_freq=\"1M\",\n seed=None,\n **kwargs\n):\n \"\"\"Create timeseries dataframe with random data\n\n Parameters\n ----------\n start: datetime (or datetime-like string)\n Start of time series\n end: datetime (or datetime-like string)\n End of time series\n dtypes: dict\n Mapping of column names to types.\n Valid types include {float, int, str, 'category'}\n freq: string\n String like '2s' or '1H' or '12W' for the time series frequency\n partition_freq: string\n String like '1M' or '2Y' to divide the dataframe into partitions\n seed: int (optional)\n Randomstate seed\n kwargs:\n Keywords to pass down to individual column creation functions.\n Keywords should be prefixed by the column name and then an underscore.\n\n Examples\n --------\n >>> import dask.dataframe as dd\n >>> df = dd.demo.make_timeseries('2000', '2010',\n ... {'value': float, 'name': str, 'id': int},\n ... freq='2H', partition_freq='1D', seed=1)\n >>> df.head() # doctest: +SKIP\n id name value\n 2000-01-01 00:00:00 969 Jerry -0.309014\n 2000-01-01 02:00:00 1010 Ray -0.760675\n 2000-01-01 04:00:00 1016 Patricia -0.063261\n 2000-01-01 06:00:00 960 Charlie 0.788245\n 2000-01-01 08:00:00 1031 Kevin 0.466002\n \"\"\"\n divisions = list(pd.date_range(start=start, end=end, freq=partition_freq))\n state_data = random_state_data(len(divisions) - 1, seed)\n name = \"make-timeseries-\" + tokenize(\n start, end, dtypes, freq, partition_freq, state_data\n )\n dsk = {\n (name, i): (\n make_timeseries_part,\n divisions[i],\n divisions[i + 1],\n dtypes,\n freq,\n state_data[i],\n kwargs,\n )\n for i in range(len(divisions) - 1)\n }\n head = make_timeseries_part(\"2000\", \"2000\", dtypes, \"1H\", state_data[0], kwargs)\n return DataFrame(dsk, name, head, divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/demo.py_generate_day_generate_day.return.pd_DataFrame_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/demo.py_generate_day_generate_day.return.pd_DataFrame_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/demo.py", "file_name": "demo.py", "file_type": "text/x-python", "category": "implementation", "start_line": 149, "end_line": 198, "span_ids": ["generate_day"], "tokens": 451}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def generate_day(\n date,\n open,\n high,\n low,\n close,\n volume,\n freq=pd.Timedelta(seconds=60),\n random_state=None,\n):\n \"\"\" Generate a day of financial data from open/close high/low values \"\"\"\n if not isinstance(random_state, np.random.RandomState):\n random_state = np.random.RandomState(random_state)\n if not isinstance(date, pd.Timestamp):\n date = pd.Timestamp(date)\n if not isinstance(freq, pd.Timedelta):\n freq = pd.Timedelta(freq)\n\n time = pd.date_range(\n date + pd.Timedelta(hours=9),\n date + pd.Timedelta(hours=12 + 4),\n freq=freq / 5,\n name=\"timestamp\",\n )\n n = len(time)\n while True:\n values = (random_state.random_sample(n) - 0.5).cumsum()\n values *= (high - low) / (values.max() - values.min()) # scale\n values += np.linspace(\n open - values[0], close - values[-1], len(values)\n ) # endpoints\n assert np.allclose(open, values[0])\n assert np.allclose(close, values[-1])\n\n mx = max(close, open)\n mn = min(close, open)\n ind = values > mx\n values[ind] = (values[ind] - mx) * (high - mx) / (values.max() - mx) + mx\n ind = values < mn\n values[ind] = (values[ind] - mn) * (low - mn) / (values.min() - mn) + mn\n # The process fails if min/max are the same as open close. This is rare\n if np.allclose(values.max(), high) and np.allclose(values.min(), low):\n break\n\n s = pd.Series(values.round(3), index=time)\n rs = s.resample(freq)\n # TODO: add in volume\n return pd.DataFrame(\n {\"open\": rs.first(), \"close\": rs.last(), \"high\": rs.max(), \"low\": rs.min()}\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/demo.py_daily_stock_daily_stock.divisions._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/demo.py_daily_stock_daily_stock.divisions._", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/demo.py", "file_name": "demo.py", "file_type": "text/x-python", "category": "implementation", "start_line": 201, "end_line": 260, "span_ids": ["daily_stock"], "tokens": 698}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def daily_stock(\n symbol,\n start,\n stop,\n freq=pd.Timedelta(seconds=1),\n data_source=\"yahoo\",\n random_state=None,\n):\n \"\"\"Create artificial stock data\n\n This data matches daily open/high/low/close values from Yahoo! Finance, but\n interpolates values within each day with random values. This makes the\n results look natural without requiring the downloading of large volumes of\n data. This is useful for education and benchmarking.\n\n Parameters\n ----------\n symbol: string\n A stock symbol like \"GOOG\" or \"F\"\n start: date, str, or pd.Timestamp\n The start date, input will be fed into pd.Timestamp for normalization\n stop: date, str, or pd.Timestamp\n The start date, input will be fed into pd.Timestamp for normalization\n freq: timedelta, str, or pd.Timedelta\n The frequency of sampling\n data_source: str, optional\n defaults to 'yahoo'. See pandas_datareader.data.DataReader for options\n random_state: int, np.random.RandomState object\n random seed, defaults to randomly chosen\n\n Examples\n --------\n >>> import dask.dataframe as dd # doctest: +SKIP\n >>> df = dd.demo.daily_stock('GOOG', '2010', '2011', freq='1s') # doctest: +SKIP\n >>> df # doctest: +SKIP\n Dask DataFrame Structure:\n close high low open\n npartitions=252\n 2010-01-04 09:00:00 float64 float64 float64 float64\n 2010-01-05 09:00:00 ... ... ... ...\n ... ... ... ... ...\n 2010-12-31 09:00:00 ... ... ... ...\n 2010-12-31 16:00:00 ... ... ... ...\n Dask Name: from-delayed, 504 tasks\n\n >>> df.head() # doctest: +SKIP\n close high low open\n timestamp\n 2010-01-04 09:00:00 626.944 626.964 626.944 626.951\n 2010-01-04 09:00:01 626.906 626.931 626.906 626.931\n 2010-01-04 09:00:02 626.901 626.911 626.901 626.905\n 2010-01-04 09:00:03 626.920 626.920 626.905 626.905\n 2010-01-04 09:00:04 626.894 626.917 626.894 626.906\n \"\"\"\n from pandas_datareader import data\n\n df = data.DataReader(symbol, data_source, start, stop)\n seeds = random_state_data(len(df), random_state=random_state)\n parts = []\n divisions = []\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/demo.py_daily_stock.for_i_seed_in_zip_range__": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/demo.py_daily_stock.for_i_seed_in_zip_range__", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/demo.py", "file_name": "demo.py", "file_type": "text/x-python", "category": "implementation", "start_line": 261, "end_line": 283, "span_ids": ["daily_stock"], "tokens": 207}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def daily_stock(\n symbol,\n start,\n stop,\n freq=pd.Timedelta(seconds=1),\n data_source=\"yahoo\",\n random_state=None,\n):\n # ... other code\n for i, seed in zip(range(len(df)), seeds):\n s = df.iloc[i]\n if s.isnull().any():\n continue\n part = delayed(generate_day)(\n s.name,\n s.loc[\"Open\"],\n s.loc[\"High\"],\n s.loc[\"Low\"],\n s.loc[\"Close\"],\n s.loc[\"Volume\"],\n freq=freq,\n random_state=seed,\n )\n parts.append(part)\n divisions.append(s.name + pd.Timedelta(hours=9))\n\n divisions.append(s.name + pd.Timedelta(hours=12 + 4))\n\n meta = generate_day(\"2000-01-01\", 1, 2, 0, 1, 100)\n\n return from_delayed(parts, meta=meta, divisions=divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py_from_fnmatch_import_fnmat__pd_to_hdf.return.None": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py_from_fnmatch_import_fnmat__pd_to_hdf.return.None", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/hdf.py", "file_name": "hdf.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 34, "span_ids": ["imports", "_pd_to_hdf"], "tokens": 198}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from fnmatch import fnmatch\nfrom glob import glob\nimport os\nimport uuid\nfrom warnings import warn\n\nimport pandas as pd\nfrom tlz import merge\n\n# this import checks for the importability of fsspec\nfrom ...bytes import read_bytes # noqa\nfrom fsspec.utils import build_name_function, stringify_path\n\nfrom .io import _link\nfrom ...base import get_scheduler\nfrom ..core import DataFrame, new_dd_object\nfrom ... import config, multiprocessing\nfrom ...base import tokenize, compute_as_if_collection\nfrom ...delayed import Delayed, delayed\nfrom ...utils import get_scheduler_lock\n\n\ndef _pd_to_hdf(pd_to_hdf, lock, args, kwargs=None):\n \"\"\" A wrapper function around pd_to_hdf that enables locking\"\"\"\n\n if lock:\n lock.acquire()\n try:\n pd_to_hdf(*args, **kwargs)\n finally:\n if lock:\n lock.release()\n\n return None", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py_to_hdf_to_hdf._Store_Dask_Dataframe_t": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py_to_hdf_to_hdf._Store_Dask_Dataframe_t", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/hdf.py", "file_name": "hdf.py", "file_type": "text/x-python", "category": "implementation", "start_line": 37, "end_line": 135, "span_ids": ["to_hdf"], "tokens": 862}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def to_hdf(\n df,\n path,\n key,\n mode=\"a\",\n append=False,\n scheduler=None,\n name_function=None,\n compute=True,\n lock=None,\n dask_kwargs={},\n **kwargs\n):\n \"\"\"Store Dask Dataframe to Hierarchical Data Format (HDF) files\n\n This is a parallel version of the Pandas function of the same name. Please\n see the Pandas docstring for more detailed information about shared keyword\n arguments.\n\n This function differs from the Pandas version by saving the many partitions\n of a Dask DataFrame in parallel, either to many files, or to many datasets\n within the same file. You may specify this parallelism with an asterix\n ``*`` within the filename or datapath, and an optional ``name_function``.\n The asterix will be replaced with an increasing sequence of integers\n starting from ``0`` or with the result of calling ``name_function`` on each\n of those integers.\n\n This function only supports the Pandas ``'table'`` format, not the more\n specialized ``'fixed'`` format.\n\n Parameters\n ----------\n path : string, pathlib.Path\n Path to a target filename. Supports strings, ``pathlib.Path``, or any\n object implementing the ``__fspath__`` protocol. May contain a ``*`` to\n denote many filenames.\n key : string\n Datapath within the files. May contain a ``*`` to denote many locations\n name_function : function\n A function to convert the ``*`` in the above options to a string.\n Should take in a number from 0 to the number of partitions and return a\n string. (see examples below)\n compute : bool\n Whether or not to execute immediately. If False then this returns a\n ``dask.Delayed`` value.\n lock : Lock, optional\n Lock to use to prevent concurrency issues. By default a\n ``threading.Lock``, ``multiprocessing.Lock`` or ``SerializableLock``\n will be used depending on your scheduler if a lock is required. See\n dask.utils.get_scheduler_lock for more information about lock\n selection.\n scheduler : string\n The scheduler to use, like \"threads\" or \"processes\"\n **other:\n See pandas.to_hdf for more information\n\n Examples\n --------\n Save Data to a single file\n\n >>> df.to_hdf('output.hdf', '/data') # doctest: +SKIP\n\n Save data to multiple datapaths within the same file:\n\n >>> df.to_hdf('output.hdf', '/data-*') # doctest: +SKIP\n\n Save data to multiple files:\n\n >>> df.to_hdf('output-*.hdf', '/data') # doctest: +SKIP\n\n Save data to multiple files, using the multiprocessing scheduler:\n\n >>> df.to_hdf('output-*.hdf', '/data', scheduler='processes') # doctest: +SKIP\n\n Specify custom naming scheme. This writes files as\n '2000-01-01.hdf', '2000-01-02.hdf', '2000-01-03.hdf', etc..\n\n >>> from datetime import date, timedelta\n >>> base = date(year=2000, month=1, day=1)\n >>> def name_function(i):\n ... ''' Convert integer 0 to n to a string '''\n ... return base + timedelta(days=i)\n\n >>> df.to_hdf('*.hdf', '/data', name_function=name_function) # doctest: +SKIP\n\n Returns\n -------\n filenames : list\n Returned if ``compute`` is True. List of file names that each partition\n is saved to.\n delayed : dask.Delayed\n Returned if ``compute`` is False. Delayed object to execute ``to_hdf``\n when computed.\n\n See Also\n --------\n read_hdf:\n to_parquet:\n \"\"\"\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py_to_hdf.name_to_hdf.for_i_in_range_0_df_npar.filenames_append_fmt_obj_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py_to_hdf.name_to_hdf.for_i_in_range_0_df_npar.filenames_append_fmt_obj_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/hdf.py", "file_name": "hdf.py", "file_type": "text/x-python", "category": "implementation", "start_line": 136, "end_line": 230, "span_ids": ["to_hdf"], "tokens": 773}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def to_hdf(\n df,\n path,\n key,\n mode=\"a\",\n append=False,\n scheduler=None,\n name_function=None,\n compute=True,\n lock=None,\n dask_kwargs={},\n **kwargs\n):\n name = \"to-hdf-\" + uuid.uuid1().hex\n\n pd_to_hdf = getattr(df._partition_type, \"to_hdf\")\n\n single_file = True\n single_node = True\n\n path = stringify_path(path)\n\n # if path is string, format using i_name\n if isinstance(path, str):\n if path.count(\"*\") + key.count(\"*\") > 1:\n raise ValueError(\n \"A maximum of one asterisk is accepted in file path and dataset key\"\n )\n\n fmt_obj = lambda path, i_name: path.replace(\"*\", i_name)\n\n if \"*\" in path:\n single_file = False\n else:\n if key.count(\"*\") > 1:\n raise ValueError(\"A maximum of one asterisk is accepted in dataset key\")\n\n fmt_obj = lambda path, _: path\n\n if \"*\" in key:\n single_node = False\n\n if \"format\" in kwargs and kwargs[\"format\"] not in [\"t\", \"table\"]:\n raise ValueError(\"Dask only support 'table' format in hdf files.\")\n\n if mode not in (\"a\", \"w\", \"r+\"):\n raise ValueError(\"Mode must be one of 'a', 'w' or 'r+'\")\n\n if name_function is None:\n name_function = build_name_function(df.npartitions - 1)\n\n # we guarantee partition order is preserved when its saved and read\n # so we enforce name_function to maintain the order of its input.\n if not (single_file and single_node):\n formatted_names = [name_function(i) for i in range(df.npartitions)]\n if formatted_names != sorted(formatted_names):\n warn(\n \"To preserve order between partitions name_function \"\n \"must preserve the order of its input\"\n )\n\n # If user did not specify scheduler and write is sequential default to the\n # sequential scheduler. otherwise let the _get method choose the scheduler\n if (\n scheduler is None\n and not config.get(\"scheduler\", None)\n and single_node\n and single_file\n ):\n scheduler = \"single-threaded\"\n\n # handle lock default based on whether we're writing to a single entity\n _actual_get = get_scheduler(collections=[df], scheduler=scheduler)\n if lock is None:\n if not single_node:\n lock = True\n elif not single_file and _actual_get is not multiprocessing.get:\n # if we're writing to multiple files with the multiprocessing\n # scheduler we don't need to lock\n lock = True\n else:\n lock = False\n if lock:\n lock = get_scheduler_lock(df, scheduler=scheduler)\n\n kwargs.update({\"format\": \"table\", \"mode\": mode, \"append\": append})\n\n dsk = dict()\n\n i_name = name_function(0)\n dsk[(name, 0)] = (\n _pd_to_hdf,\n pd_to_hdf,\n lock,\n [(df._name, 0), fmt_obj(path, i_name), key.replace(\"*\", i_name)],\n kwargs,\n )\n\n kwargs2 = kwargs.copy()\n if single_file:\n kwargs2[\"mode\"] = \"a\"\n if single_node:\n kwargs2[\"append\"] = True\n\n filenames = []\n for i in range(0, df.npartitions):\n i_name = name_function(i)\n filenames.append(fmt_obj(path, i_name))\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py_to_hdf.for_i_in_range_1_df_npar_to_hdf.if_compute_.else_.return.delayed_Delayed_k_dsk_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py_to_hdf.for_i_in_range_1_df_npar_to_hdf.if_compute_.else_.return.delayed_Delayed_k_dsk_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/hdf.py", "file_name": "hdf.py", "file_type": "text/x-python", "category": "implementation", "start_line": 232, "end_line": 258, "span_ids": ["to_hdf"], "tokens": 276}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def to_hdf(\n df,\n path,\n key,\n mode=\"a\",\n append=False,\n scheduler=None,\n name_function=None,\n compute=True,\n lock=None,\n dask_kwargs={},\n **kwargs\n):\n # ... other code\n\n for i in range(1, df.npartitions):\n i_name = name_function(i)\n task = (\n _pd_to_hdf,\n pd_to_hdf,\n lock,\n [(df._name, i), fmt_obj(path, i_name), key.replace(\"*\", i_name)],\n kwargs2,\n )\n if single_file:\n link_dep = i - 1 if single_node else 0\n task = (_link, (name, link_dep), task)\n dsk[(name, i)] = task\n\n dsk = merge(df.dask, dsk)\n if single_file and single_node:\n keys = [(name, df.npartitions - 1)]\n else:\n keys = [(name, i) for i in range(df.npartitions)]\n\n if compute:\n compute_as_if_collection(\n DataFrame, dsk, keys, scheduler=scheduler, **dask_kwargs\n )\n return filenames\n else:\n return delayed([Delayed(k, dsk) for k in keys])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py_dont_use_fixed_error_message_read_hdf_error_msg._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py_dont_use_fixed_error_message_read_hdf_error_msg._", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/hdf.py", "file_name": "hdf.py", "file_type": "text/x-python", "category": "implementation", "start_line": 261, "end_line": 272, "span_ids": ["impl"], "tokens": 110}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "dont_use_fixed_error_message = \"\"\"\nThis HDFStore is not partitionable and can only be use monolithically with\npandas. In the future when creating HDFStores use the ``format='table'``\noption to ensure that your dataset can be parallelized\"\"\"\n\nread_hdf_error_msg = \"\"\"\nThe start and stop keywords are not supported when reading from more than\none file/dataset.\n\nThe combination is ambiguous because it could be interpreted as the starting\nand stopping index per file, or starting and stopping index of the global\ndataset.\"\"\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py__read_single_hdf__read_single_hdf.get_keys_stops_divisions.return.keys_stops_divisions": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py__read_single_hdf__read_single_hdf.get_keys_stops_divisions.return.keys_stops_divisions", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/hdf.py", "file_name": "hdf.py", "file_type": "text/x-python", "category": "implementation", "start_line": 275, "end_line": 348, "span_ids": ["_read_single_hdf"], "tokens": 578}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _read_single_hdf(\n path,\n key,\n start=0,\n stop=None,\n columns=None,\n chunksize=int(1e6),\n sorted_index=False,\n lock=None,\n mode=\"a\",\n):\n \"\"\"\n Read a single hdf file into a dask.dataframe. Used for each file in\n read_hdf.\n \"\"\"\n\n def get_keys_stops_divisions(path, key, stop, sorted_index, chunksize):\n \"\"\"\n Get the \"keys\" or group identifiers which match the given key, which\n can contain wildcards. This uses the hdf file identified by the\n given path. Also get the index of the last row of data for each matched\n key.\n \"\"\"\n with pd.HDFStore(path, mode=mode) as hdf:\n import glob\n from distutils.version import LooseVersion\n\n if LooseVersion(pd.__version__) >= LooseVersion(\"0.24\"):\n if not glob.has_magic(key):\n keys = [key]\n else:\n keys = [k for k in hdf.keys() if fnmatch(k, key)]\n # https://github.com/dask/dask/issues/5934\n # TODO: remove this part if/when pandas copes with all keys\n keys.extend(\n n._v_pathname\n for n in hdf._handle.walk_nodes(\"/\", classname=\"Table\")\n if fnmatch(n._v_pathname, key)\n and n._v_name != \"table\"\n and n._v_pathname not in keys\n )\n else:\n # TODO: remove if we require pandas >= 0.24\n keys = [k for k in hdf.keys() if fnmatch(k, key)]\n stops = []\n divisions = []\n for k in keys:\n storer = hdf.get_storer(k)\n if storer.format_type != \"table\":\n raise TypeError(dont_use_fixed_error_message)\n if stop is None:\n stops.append(storer.nrows)\n elif stop > storer.nrows:\n raise ValueError(\n \"Stop keyword exceeds dataset number \"\n \"of rows ({})\".format(storer.nrows)\n )\n else:\n stops.append(stop)\n if sorted_index:\n division = [\n storer.read_column(\"index\", start=start, stop=start + 1)[0]\n for start in range(0, storer.nrows, chunksize)\n ]\n division_end = storer.read_column(\n \"index\", start=storer.nrows - 1, stop=storer.nrows\n )[0]\n\n division.append(division_end)\n divisions.append(division)\n else:\n divisions.append(None)\n\n return keys, stops, divisions\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py__read_single_hdf.one_path_one_key__read_single_hdf.one_path_one_key.return.new_dd_object_dsk_name_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py__read_single_hdf.one_path_one_key__read_single_hdf.one_path_one_key.return.new_dd_object_dsk_name_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/hdf.py", "file_name": "hdf.py", "file_type": "text/x-python", "category": "implementation", "start_line": 350, "end_line": 389, "span_ids": ["_read_single_hdf"], "tokens": 379}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _read_single_hdf(\n path,\n key,\n start=0,\n stop=None,\n columns=None,\n chunksize=int(1e6),\n sorted_index=False,\n lock=None,\n mode=\"a\",\n):\n # ... other code\n\n def one_path_one_key(path, key, start, stop, columns, chunksize, division, lock):\n \"\"\"\n Get the data frame corresponding to one path and one key (which should\n not contain any wildcards).\n \"\"\"\n empty = pd.read_hdf(path, key, mode=mode, stop=0)\n if columns is not None:\n empty = empty[columns]\n\n token = tokenize(\n (path, os.path.getmtime(path), key, start, stop, empty, chunksize, division)\n )\n name = \"read-hdf-\" + token\n if empty.ndim == 1:\n base = {\"name\": empty.name, \"mode\": mode}\n else:\n base = {\"columns\": empty.columns, \"mode\": mode}\n\n if start >= stop:\n raise ValueError(\n \"Start row number ({}) is above or equal to stop \"\n \"row number ({})\".format(start, stop)\n )\n\n def update(s):\n new = base.copy()\n new.update({\"start\": s, \"stop\": s + chunksize})\n return new\n\n dsk = dict(\n ((name, i), (_pd_read_hdf, path, key, lock, update(s)))\n for i, s in enumerate(range(start, stop, chunksize))\n )\n\n if division:\n divisions = division\n else:\n divisions = [None] * (len(dsk) + 1)\n\n return new_dd_object(dsk, name, empty, divisions)\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py__read_single_hdf.keys_stops_divisions___pd_read_hdf.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py__read_single_hdf.keys_stops_divisions___pd_read_hdf.return.result", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/hdf.py", "file_name": "hdf.py", "file_type": "text/x-python", "category": "implementation", "start_line": 391, "end_line": 415, "span_ids": ["_read_single_hdf", "_pd_read_hdf"], "tokens": 233}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _read_single_hdf(\n path,\n key,\n start=0,\n stop=None,\n columns=None,\n chunksize=int(1e6),\n sorted_index=False,\n lock=None,\n mode=\"a\",\n):\n # ... other code\n\n keys, stops, divisions = get_keys_stops_divisions(\n path, key, stop, sorted_index, chunksize\n )\n if (start != 0 or stop is not None) and len(keys) > 1:\n raise NotImplementedError(read_hdf_error_msg)\n from ..multi import concat\n\n return concat(\n [\n one_path_one_key(path, k, start, s, columns, chunksize, d, lock)\n for k, s, d in zip(keys, stops, divisions)\n ]\n )\n\n\ndef _pd_read_hdf(path, key, lock, kwargs):\n \"\"\" Read from hdf5 file with a lock \"\"\"\n if lock:\n lock.acquire()\n try:\n result = pd.read_hdf(path, key, **kwargs)\n finally:\n if lock:\n lock.release()\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py_read_hdf_read_hdf.if_chunksize_0_.raise_ValueError_Chunksi": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py_read_hdf_read_hdf.if_chunksize_0_.raise_ValueError_Chunksi", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/hdf.py", "file_name": "hdf.py", "file_type": "text/x-python", "category": "implementation", "start_line": 418, "end_line": 512, "span_ids": ["read_hdf"], "tokens": 770}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def read_hdf(\n pattern,\n key,\n start=0,\n stop=None,\n columns=None,\n chunksize=1000000,\n sorted_index=False,\n lock=True,\n mode=\"a\",\n):\n \"\"\"\n Read HDF files into a Dask DataFrame\n\n Read hdf files into a dask dataframe. This function is like\n ``pandas.read_hdf``, except it can read from a single large file, or from\n multiple files, or from multiple keys from the same file.\n\n Parameters\n ----------\n pattern : string, pathlib.Path, list\n File pattern (string), pathlib.Path, buffer to read from, or list of\n file paths. Can contain wildcards.\n key : group identifier in the store. Can contain wildcards\n start : optional, integer (defaults to 0), row number to start at\n stop : optional, integer (defaults to None, the last row), row number to\n stop at\n columns : list of columns, optional\n A list of columns that if not None, will limit the return\n columns (default is None)\n chunksize : positive integer, optional\n Maximal number of rows per partition (default is 1000000).\n sorted_index : boolean, optional\n Option to specify whether or not the input hdf files have a sorted\n index (default is False).\n lock : boolean, optional\n Option to use a lock to prevent concurrency issues (default is True).\n mode : {'a', 'r', 'r+'}, default 'a'. Mode to use when opening file(s).\n 'r'\n Read-only; no data can be modified.\n 'a'\n Append; an existing file is opened for reading and writing,\n and if the file does not exist it is created.\n 'r+'\n It is similar to 'a', but the file must already exist.\n\n Returns\n -------\n dask.DataFrame\n\n Examples\n --------\n Load single file\n\n >>> dd.read_hdf('myfile.1.hdf5', '/x') # doctest: +SKIP\n\n Load multiple files\n\n >>> dd.read_hdf('myfile.*.hdf5', '/x') # doctest: +SKIP\n\n >>> dd.read_hdf(['myfile.1.hdf5', 'myfile.2.hdf5'], '/x') # doctest: +SKIP\n\n Load multiple datasets\n\n >>> dd.read_hdf('myfile.1.hdf5', '/*') # doctest: +SKIP\n \"\"\"\n if lock is True:\n lock = get_scheduler_lock()\n\n key = key if key.startswith(\"/\") else \"/\" + key\n # Convert path-like objects to a string\n pattern = stringify_path(pattern)\n\n if isinstance(pattern, str):\n paths = sorted(glob(pattern))\n else:\n paths = pattern\n\n if not isinstance(pattern, str) and len(paths) == 0:\n raise ValueError(\"No files provided\")\n if not paths or len(paths) == 0:\n raise IOError(\"File(s) not found: {0}\".format(pattern))\n for path in paths:\n try:\n exists = os.path.exists(path)\n except (ValueError, TypeError):\n exists = False\n if not exists:\n raise IOError(\n \"File not found or insufficient permissions: {0}\".format(path)\n )\n if (start != 0 or stop is not None) and len(paths) > 1:\n raise NotImplementedError(read_hdf_error_msg)\n if chunksize <= 0:\n raise ValueError(\"Chunksize must be a positive integer\")\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py_read_hdf.None_7_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py_read_hdf.None_7_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/hdf.py", "file_name": "hdf.py", "file_type": "text/x-python", "category": "implementation", "start_line": 513, "end_line": 541, "span_ids": ["impl:5", "read_hdf"], "tokens": 199}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def read_hdf(\n pattern,\n key,\n start=0,\n stop=None,\n columns=None,\n chunksize=1000000,\n sorted_index=False,\n lock=True,\n mode=\"a\",\n):\n # ... other code\n if (start != 0 or stop is not None) and sorted_index:\n raise ValueError(\n \"When assuming pre-partitioned data, data must be \"\n \"read in its entirety using the same chunksizes\"\n )\n from ..multi import concat\n\n return concat(\n [\n _read_single_hdf(\n path,\n key,\n start=start,\n stop=stop,\n columns=columns,\n chunksize=chunksize,\n sorted_index=sorted_index,\n lock=lock,\n mode=mode,\n )\n for path in paths\n ]\n )\n\n\nfrom ..core import _Frame\n\n_Frame.to_hdf.__doc__ = to_hdf.__doc__", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_from_array_from_array.return.new_dd_object_dsk_name_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_from_array_from_array.return.new_dd_object_dsk_name_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/io.py", "file_name": "io.py", "file_type": "text/x-python", "category": "implementation", "start_line": 80, "end_line": 130, "span_ids": ["from_array"], "tokens": 432}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def from_array(x, chunksize=50000, columns=None, meta=None):\n \"\"\"Read any sliceable array into a Dask Dataframe\n\n Uses getitem syntax to pull slices out of the array. The array need not be\n a NumPy array but must support slicing syntax\n\n x[50000:100000]\n\n and have 2 dimensions:\n\n x.ndim == 2\n\n or have a record dtype:\n\n x.dtype == [('name', 'O'), ('balance', 'i8')]\n\n Parameters\n ----------\n x : array_like\n chunksize : int, optional\n The number of rows per partition to use.\n columns : list or string, optional\n list of column names if DataFrame, single string if Series\n meta : object, optional\n An optional `meta` parameter can be passed for dask\n to specify the concrete dataframe type to use for partitions of\n the Dask dataframe. By default, pandas DataFrame is used.\n\n Returns\n -------\n dask.DataFrame or dask.Series\n A dask DataFrame/Series\n \"\"\"\n if isinstance(x, da.Array):\n return from_dask_array(x, columns=columns, meta=meta)\n\n meta = _meta_from_array(x, columns, meta=meta)\n\n divisions = tuple(range(0, len(x), chunksize))\n divisions = divisions + (len(x) - 1,)\n token = tokenize(x, chunksize, columns)\n name = \"from_array-\" + token\n\n dsk = {}\n for i in range(0, int(ceil(len(x) / chunksize))):\n data = (getitem, x, slice(i * chunksize, (i + 1) * chunksize))\n if is_series_like(meta):\n dsk[name, i] = (type(meta), data, None, meta.dtype, meta.name)\n else:\n dsk[name, i] = (type(meta), data, None, meta.columns)\n return new_dd_object(dsk, name, meta, divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_from_bcolz_from_bcolz.if_index_.else_.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_from_bcolz_from_bcolz.if_index_.else_.return.result", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/io.py", "file_name": "io.py", "file_type": "text/x-python", "category": "implementation", "start_line": 230, "end_line": 317, "span_ids": ["from_bcolz"], "tokens": 684}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def from_bcolz(x, chunksize=None, categorize=True, index=None, lock=lock, **kwargs):\n \"\"\"Read BColz CTable into a Dask Dataframe\n\n BColz is a fast on-disk compressed column store with careful attention\n given to compression. https://bcolz.readthedocs.io/en/latest/\n\n Parameters\n ----------\n x : bcolz.ctable\n chunksize : int, optional\n The size(rows) of blocks to pull out from ctable.\n categorize : bool, defaults to True\n Automatically categorize all string dtypes\n index : string, optional\n Column to make the index\n lock: bool or Lock\n Lock to use when reading or False for no lock (not-thread-safe)\n\n See Also\n --------\n from_array: more generic function not optimized for bcolz\n \"\"\"\n if lock is True:\n lock = Lock()\n\n import dask.array as da\n import bcolz\n\n if isinstance(x, str):\n x = bcolz.ctable(rootdir=x)\n bc_chunklen = max(x[name].chunklen for name in x.names)\n if chunksize is None and bc_chunklen > 10000:\n chunksize = bc_chunklen\n\n categories = dict()\n if categorize:\n for name in x.names:\n if (\n np.issubdtype(x.dtype[name], np.string_)\n or np.issubdtype(x.dtype[name], np.unicode_)\n or np.issubdtype(x.dtype[name], np.object_)\n ):\n a = da.from_array(x[name], chunks=(chunksize * len(x.names),))\n categories[name] = da.unique(a).compute()\n\n columns = tuple(x.dtype.names)\n divisions = tuple(range(0, len(x), chunksize))\n divisions = divisions + (len(x) - 1,)\n if x.rootdir:\n token = tokenize(\n (x.rootdir, os.path.getmtime(x.rootdir)),\n chunksize,\n categorize,\n index,\n kwargs,\n )\n else:\n token = tokenize(\n (id(x), x.shape, x.dtype), chunksize, categorize, index, kwargs\n )\n new_name = \"from_bcolz-\" + token\n\n dsk = dict(\n (\n (new_name, i),\n (\n dataframe_from_ctable,\n x,\n (slice(i * chunksize, (i + 1) * chunksize),),\n columns,\n categories,\n lock,\n ),\n )\n for i in range(0, int(ceil(len(x) / chunksize)))\n )\n\n meta = dataframe_from_ctable(x, slice(0, 0), columns, categories, lock)\n result = DataFrame(dsk, new_name, meta, divisions)\n\n if index:\n assert index in x.names\n a = da.from_array(x[index], chunks=(chunksize * len(x.names),))\n q = np.linspace(0, 100, len(x) // chunksize + 2)\n divisions = tuple(da.percentile(a, q).compute())\n return set_partition(result, index, divisions, **kwargs)\n else:\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_dataframe_from_ctable_dataframe_from_ctable.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_dataframe_from_ctable_dataframe_from_ctable.return.result", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/io.py", "file_name": "io.py", "file_type": "text/x-python", "category": "implementation", "start_line": 320, "end_line": 393, "span_ids": ["dataframe_from_ctable"], "tokens": 555}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def dataframe_from_ctable(x, slc, columns=None, categories=None, lock=lock):\n \"\"\"Get DataFrame from bcolz.ctable\n\n Parameters\n ----------\n x: bcolz.ctable\n slc: slice\n columns: list of column names or None\n\n >>> import bcolz\n >>> x = bcolz.ctable([[1, 2, 3, 4], [10, 20, 30, 40]], names=['a', 'b'])\n >>> dataframe_from_ctable(x, slice(1, 3))\n a b\n 1 2 20\n 2 3 30\n\n >>> dataframe_from_ctable(x, slice(1, 3), columns=['b'])\n b\n 1 20\n 2 30\n\n >>> dataframe_from_ctable(x, slice(1, 3), columns='b')\n 1 20\n 2 30\n Name: b, dtype: int...\n\n \"\"\"\n import bcolz\n\n if columns is None:\n columns = x.dtype.names\n if isinstance(columns, tuple):\n columns = list(columns)\n\n x = x[columns]\n if type(slc) is slice:\n start = slc.start\n stop = slc.stop if slc.stop < len(x) else len(x)\n else:\n start = slc[0].start\n stop = slc[0].stop if slc[0].stop < len(x) else len(x)\n idx = pd.Index(range(start, stop))\n\n if lock:\n lock.acquire()\n try:\n if isinstance(x, bcolz.ctable):\n chunks = [x[name][slc] for name in columns]\n if categories is not None:\n chunks = [\n pd.Categorical.from_codes(\n np.searchsorted(categories[name], chunk), categories[name], True\n )\n if name in categories\n else chunk\n for name, chunk in zip(columns, chunks)\n ]\n result = pd.DataFrame(\n dict(zip(columns, chunks)), columns=columns, index=idx\n )\n\n elif isinstance(x, bcolz.carray):\n chunk = x[slc]\n if categories is not None and columns and columns in categories:\n chunk = pd.Categorical.from_codes(\n np.searchsorted(categories[columns], chunk),\n categories[columns],\n True,\n )\n result = pd.Series(chunk, name=columns, index=idx)\n finally:\n if lock:\n lock.release()\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_from_dask_array_from_dask_array.dsk._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_from_dask_array_from_dask_array.dsk._", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/io.py", "file_name": "io.py", "file_type": "text/x-python", "category": "implementation", "start_line": 396, "end_line": 474, "span_ids": ["from_dask_array"], "tokens": 737}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def from_dask_array(x, columns=None, index=None, meta=None):\n \"\"\"Create a Dask DataFrame from a Dask Array.\n\n Converts a 2d array into a DataFrame and a 1d array into a Series.\n\n Parameters\n ----------\n x : da.Array\n columns : list or string\n list of column names if DataFrame, single string if Series\n index : dask.dataframe.Index, optional\n An optional *dask* Index to use for the output Series or DataFrame.\n\n The default output index depends on whether `x` has any unknown\n chunks. If there are any unknown chunks, the output has ``None``\n for all the divisions (one per chunk). If all the chunks are known,\n a default index with known divisions is created.\n\n Specifying `index` can be useful if you're conforming a Dask Array\n to an existing dask Series or DataFrame, and you would like the\n indices to match.\n meta : object, optional\n An optional `meta` parameter can be passed for dask\n to specify the concrete dataframe type to be returned.\n By default, pandas DataFrame is used.\n\n Examples\n --------\n >>> import dask.array as da\n >>> import dask.dataframe as dd\n >>> x = da.ones((4, 2), chunks=(2, 2))\n >>> df = dd.io.from_dask_array(x, columns=['a', 'b'])\n >>> df.compute()\n a b\n 0 1.0 1.0\n 1 1.0 1.0\n 2 1.0 1.0\n 3 1.0 1.0\n\n See Also\n --------\n dask.bag.to_dataframe: from dask.bag\n dask.dataframe._Frame.values: Reverse conversion\n dask.dataframe._Frame.to_records: Reverse conversion\n \"\"\"\n meta = _meta_from_array(x, columns, index, meta=meta)\n\n if x.ndim == 2 and len(x.chunks[1]) > 1:\n x = x.rechunk({1: x.shape[1]})\n\n name = \"from-dask-array\" + tokenize(x, columns)\n to_merge = []\n\n if index is not None:\n if not isinstance(index, Index):\n raise ValueError(\"'index' must be an instance of dask.dataframe.Index\")\n if index.npartitions != x.numblocks[0]:\n msg = (\n \"The index and array have different numbers of blocks. \"\n \"({} != {})\".format(index.npartitions, x.numblocks[0])\n )\n raise ValueError(msg)\n divisions = index.divisions\n to_merge.append(ensure_dict(index.dask))\n index = index.__dask_keys__()\n\n elif np.isnan(sum(x.shape)):\n divisions = [None] * (len(x.chunks[0]) + 1)\n index = [None] * len(x.chunks[0])\n else:\n divisions = [0]\n for c in x.chunks[0]:\n divisions.append(divisions[-1] + c)\n index = [\n (np.arange, a, b, 1, \"i8\") for a, b in zip(divisions[:-1], divisions[1:])\n ]\n divisions[-1] -= 1\n\n dsk = {}\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_from_dask_array.for_i_chunk_ind_in_en__df_to_bag.if_isinstance_df_pd_Data.elif_isinstance_df_pd_Se.return.list_df_iteritems_if_i": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_from_dask_array.for_i_chunk_ind_in_en__df_to_bag.if_isinstance_df_pd_Data.elif_isinstance_df_pd_Se.return.list_df_iteritems_if_i", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/io.py", "file_name": "io.py", "file_type": "text/x-python", "category": "implementation", "start_line": 475, "end_line": 500, "span_ids": ["_link", "_df_to_bag", "from_dask_array"], "tokens": 240}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def from_dask_array(x, columns=None, index=None, meta=None):\n # ... other code\n for i, (chunk, ind) in enumerate(zip(x.__dask_keys__(), index)):\n if x.ndim == 2:\n chunk = chunk[0]\n if is_series_like(meta):\n dsk[name, i] = (type(meta), chunk, ind, x.dtype, meta.name)\n else:\n dsk[name, i] = (type(meta), chunk, ind, meta.columns)\n\n to_merge.extend([ensure_dict(x.dask), dsk])\n return new_dd_object(merge(*to_merge), name, meta, divisions)\n\n\ndef _link(token, result):\n \"\"\"A dummy function to link results together in a graph\n\n We use this to enforce an artificial sequential ordering on tasks that\n don't explicitly pass around a shared resource\n \"\"\"\n return None\n\n\ndef _df_to_bag(df, index=False):\n if isinstance(df, pd.DataFrame):\n return list(map(tuple, df.itertuples(index)))\n elif isinstance(df, pd.Series):\n return list(df.iteritems()) if index else list(df)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_to_bag_to_bag.return.Bag_dsk_name_df_npartit": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_to_bag_to_bag.return.Bag_dsk_name_df_npartit", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/io.py", "file_name": "io.py", "file_type": "text/x-python", "category": "implementation", "start_line": 503, "end_line": 526, "span_ids": ["to_bag"], "tokens": 207}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def to_bag(df, index=False):\n \"\"\"Create Dask Bag from a Dask DataFrame\n\n Parameters\n ----------\n index : bool, optional\n If True, the elements are tuples of ``(index, value)``, otherwise\n they're just the ``value``. Default is False.\n\n Examples\n --------\n >>> bag = df.to_bag() # doctest: +SKIP\n \"\"\"\n from ...bag.core import Bag\n\n if not isinstance(df, (DataFrame, Series)):\n raise TypeError(\"df must be either DataFrame or Series\")\n name = \"to_bag-\" + tokenize(df, index)\n dsk = dict(\n ((name, i), (_df_to_bag, block, index))\n for (i, block) in enumerate(df.__dask_keys__())\n )\n dsk.update(df.__dask_optimize__(df.__dask_graph__(), df.__dask_keys__()))\n return Bag(dsk, name, df.npartitions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_to_records_to_records.return.df_map_partitions_M_to_re": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_to_records_to_records.return.df_map_partitions_M_to_re", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/io.py", "file_name": "io.py", "file_type": "text/x-python", "category": "implementation", "start_line": 529, "end_line": 546, "span_ids": ["to_records"], "tokens": 162}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def to_records(df):\n \"\"\"Create Dask Array from a Dask Dataframe\n\n Warning: This creates a dask.array without precise shape information.\n Operations that depend on shape information, like slicing or reshaping,\n will not work.\n\n Examples\n --------\n >>> df.to_records() # doctest: +SKIP\n dask.array # noqa: E501\n\n See Also\n --------\n dask.dataframe._Frame.values\n dask.dataframe.from_dask_array\n \"\"\"\n return df.map_partitions(M.to_records)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_from_delayed_from_delayed.return.df": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_from_delayed_from_delayed.return.df", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/io.py", "file_name": "io.py", "file_type": "text/x-python", "category": "implementation", "start_line": 549, "end_line": 613, "span_ids": ["from_delayed"], "tokens": 548}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@insert_meta_param_description\ndef from_delayed(\n dfs, meta=None, divisions=None, prefix=\"from-delayed\", verify_meta=True\n):\n \"\"\"Create Dask DataFrame from many Dask Delayed objects\n\n Parameters\n ----------\n dfs : list of Delayed\n An iterable of ``dask.delayed.Delayed`` objects, such as come from\n ``dask.delayed`` These comprise the individual partitions of the\n resulting dataframe.\n $META\n divisions : tuple, str, optional\n Partition boundaries along the index.\n For tuple, see https://docs.dask.org/en/latest/dataframe-design.html#partitions\n For string 'sorted' will compute the delayed values to find index\n values. Assumes that the indexes are mutually sorted.\n If None, then won't use index information\n prefix : str, optional\n Prefix to prepend to the keys.\n verify_meta : bool, optional\n If True check that the partitions have consistent metadata, defaults to True.\n \"\"\"\n from dask.delayed import Delayed\n\n if isinstance(dfs, Delayed):\n dfs = [dfs]\n dfs = [\n delayed(df) if not isinstance(df, Delayed) and hasattr(df, \"key\") else df\n for df in dfs\n ]\n for df in dfs:\n if not isinstance(df, Delayed):\n raise TypeError(\"Expected Delayed object, got %s\" % type(df).__name__)\n\n if meta is None:\n meta = delayed(make_meta)(dfs[0]).compute()\n else:\n meta = make_meta(meta)\n\n name = prefix + \"-\" + tokenize(*dfs)\n dsk = merge(df.dask for df in dfs)\n if verify_meta:\n for (i, df) in enumerate(dfs):\n dsk[(name, i)] = (check_meta, df.key, meta, \"from_delayed\")\n else:\n for (i, df) in enumerate(dfs):\n dsk[(name, i)] = df.key\n\n if divisions is None or divisions == \"sorted\":\n divs = [None] * (len(dfs) + 1)\n else:\n divs = tuple(divisions)\n if len(divs) != len(dfs) + 1:\n raise ValueError(\"divisions should be a tuple of len(dfs) + 1\")\n\n df = new_dd_object(dsk, name, meta, divs)\n\n if divisions == \"sorted\":\n from ..shuffle import compute_and_set_divisions\n\n df = compute_and_set_divisions(df)\n\n return df", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_sorted_division_locations_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_sorted_division_locations_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/io.py", "file_name": "io.py", "file_type": "text/x-python", "category": "implementation", "start_line": 616, "end_line": 666, "span_ids": ["sorted_division_locations", "impl:4"], "tokens": 450}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def sorted_division_locations(seq, npartitions=None, chunksize=None):\n \"\"\"Find division locations and values in sorted list\n\n Examples\n --------\n\n >>> L = ['A', 'B', 'C', 'D', 'E', 'F']\n >>> sorted_division_locations(L, chunksize=2)\n (['A', 'C', 'E', 'F'], [0, 2, 4, 6])\n\n >>> sorted_division_locations(L, chunksize=3)\n (['A', 'D', 'F'], [0, 3, 6])\n\n >>> L = ['A', 'A', 'A', 'A', 'B', 'B', 'B', 'C']\n >>> sorted_division_locations(L, chunksize=3)\n (['A', 'B', 'C'], [0, 4, 8])\n\n >>> sorted_division_locations(L, chunksize=2)\n (['A', 'B', 'C'], [0, 4, 8])\n\n >>> sorted_division_locations(['A'], chunksize=2)\n (['A', 'A'], [0, 1])\n \"\"\"\n if (npartitions is None) == (chunksize is None):\n raise ValueError(\"Exactly one of npartitions and chunksize must be specified.\")\n\n if npartitions:\n chunksize = ceil(len(seq) / npartitions)\n\n positions = [0]\n values = [seq[0]]\n for pos in range(0, len(seq), chunksize):\n if pos <= positions[-1]:\n continue\n while pos + 1 < len(seq) and seq[pos - 1] == seq[pos]:\n pos += 1\n values.append(seq[pos])\n if pos == len(seq) - 1:\n pos += 1\n positions.append(pos)\n\n if positions[-1] != len(seq):\n positions.append(len(seq))\n values.append(seq[-1])\n\n return values, positions\n\n\nDataFrame.to_records.__doc__ = to_records.__doc__\nDataFrame.to_bag.__doc__ = to_bag.__doc__", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/json.py_io_write_json_partition.with_openfile_as_f_.df_to_json_f_kwargs_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/json.py_io_write_json_partition.with_openfile_as_f_.df_to_json_f_kwargs_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/json.py", "file_name": "json.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 90, "span_ids": ["to_json", "imports", "write_json_partition"], "tokens": 664}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import io\nimport pandas as pd\nfrom dask.bytes import open_files, read_bytes\nimport dask\nfrom ..utils import insert_meta_param_description, make_meta\n\n\ndef to_json(\n df,\n url_path,\n orient=\"records\",\n lines=None,\n storage_options=None,\n compute=True,\n encoding=\"utf-8\",\n errors=\"strict\",\n compression=None,\n compute_kwargs=None,\n **kwargs\n):\n \"\"\"Write dataframe into JSON text files\n\n This utilises ``pandas.DataFrame.to_json()``, and most parameters are\n passed through - see its docstring.\n\n Differences: orient is 'records' by default, with lines=True; this\n produces the kind of JSON output that is most common in big-data\n applications, and which can be chunked when reading (see ``read_json()``).\n\n Parameters\n ----------\n df: dask.DataFrame\n Data to save\n url_path: str, list of str\n Location to write to. If a string, and there are more than one\n partitions in df, should include a glob character to expand into a\n set of file names, or provide a ``name_function=`` parameter.\n Supports protocol specifications such as ``\"s3://\"``.\n encoding, errors:\n The text encoding to implement, e.g., \"utf-8\" and how to respond\n to errors in the conversion (see ``str.encode()``).\n orient, lines, kwargs\n passed to pandas; if not specified, lines=True when orient='records',\n False otherwise.\n storage_options: dict\n Passed to backend file-system implementation\n compute: bool\n If true, immediately executes. If False, returns a set of delayed\n objects, which can be computed at a later time.\n compute_kwargs : dict, optional\n Options to be passed in to the compute method\n encoding, errors:\n Text conversion, ``see str.encode()``\n compression : string or None\n String like 'gzip' or 'xz'.\n \"\"\"\n if lines is None:\n lines = orient == \"records\"\n if orient != \"records\" and lines:\n raise ValueError(\n \"Line-delimited JSON is only available with\" 'orient=\"records\".'\n )\n kwargs[\"orient\"] = orient\n kwargs[\"lines\"] = lines and orient == \"records\"\n outfiles = open_files(\n url_path,\n \"wt\",\n encoding=encoding,\n errors=errors,\n name_function=kwargs.pop(\"name_function\", None),\n num=df.npartitions,\n compression=compression,\n **(storage_options or {})\n )\n parts = [\n dask.delayed(write_json_partition)(d, outfile, kwargs)\n for outfile, d in zip(outfiles, df.to_delayed())\n ]\n if compute:\n if compute_kwargs is None:\n compute_kwargs = dict()\n dask.compute(parts, **compute_kwargs)\n return [f.path for f in outfiles]\n else:\n return parts\n\n\ndef write_json_partition(df, openfile, kwargs):\n with openfile as f:\n df.to_json(f, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/json.py_read_json_read_json.storage_options.storage_options_or_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/json.py_read_json_read_json.storage_options.storage_options_or_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/json.py", "file_name": "json.py", "file_type": "text/x-python", "category": "implementation", "start_line": 93, "end_line": 184, "span_ids": ["read_json"], "tokens": 743}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@insert_meta_param_description\ndef read_json(\n url_path,\n orient=\"records\",\n lines=None,\n storage_options=None,\n blocksize=None,\n sample=2 ** 20,\n encoding=\"utf-8\",\n errors=\"strict\",\n compression=\"infer\",\n meta=None,\n engine=pd.read_json,\n **kwargs\n):\n \"\"\"Create a dataframe from a set of JSON files\n\n This utilises ``pandas.read_json()``, and most parameters are\n passed through - see its docstring.\n\n Differences: orient is 'records' by default, with lines=True; this\n is appropriate for line-delimited \"JSON-lines\" data, the kind of JSON output\n that is most common in big-data scenarios, and which can be chunked when\n reading (see ``read_json()``). All other options require blocksize=None,\n i.e., one partition per input file.\n\n Parameters\n ----------\n url_path: str, list of str\n Location to read from. If a string, can include a glob character to\n find a set of file names.\n Supports protocol specifications such as ``\"s3://\"``.\n encoding, errors:\n The text encoding to implement, e.g., \"utf-8\" and how to respond\n to errors in the conversion (see ``str.encode()``).\n orient, lines, kwargs\n passed to pandas; if not specified, lines=True when orient='records',\n False otherwise.\n storage_options: dict\n Passed to backend file-system implementation\n blocksize: None or int\n If None, files are not blocked, and you get one partition per input\n file. If int, which can only be used for line-delimited JSON files,\n each partition will be approximately this size in bytes, to the nearest\n newline character.\n sample: int\n Number of bytes to pre-load, to provide an empty dataframe structure\n to any blocks without data. Only relevant is using blocksize.\n encoding, errors:\n Text conversion, ``see bytes.decode()``\n compression : string or None\n String like 'gzip' or 'xz'.\n engine : function object, default ``pd.read_json``\n The underlying function that dask will use to read JSON files. By\n default, this will be the pandas JSON reader (``pd.read_json``).\n $META\n\n Returns\n -------\n dask.DataFrame\n\n Examples\n --------\n Load single file\n\n >>> dd.read_json('myfile.1.json') # doctest: +SKIP\n\n Load multiple files\n\n >>> dd.read_json('myfile.*.json') # doctest: +SKIP\n\n >>> dd.read_json(['myfile.1.json', 'myfile.2.json']) # doctest: +SKIP\n\n Load large line-delimited JSON files using partitions of approx\n 256MB size\n\n >> dd.read_json('data/file*.csv', blocksize=2**28)\n \"\"\"\n import dask.dataframe as dd\n\n if lines is None:\n lines = orient == \"records\"\n if orient != \"records\" and lines:\n raise ValueError(\n \"Line-delimited JSON is only available with\" 'orient=\"records\".'\n )\n if blocksize and (orient != \"records\" or not lines):\n raise ValueError(\n \"JSON file chunking only allowed for JSON-lines\"\n \"input (orient='records', lines=True).\"\n )\n storage_options = storage_options or {}\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/json.py_read_json.if_blocksize__": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/json.py_read_json.if_blocksize__", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/json.py", "file_name": "json.py", "file_type": "text/x-python", "category": "implementation", "start_line": 185, "end_line": 234, "span_ids": ["read_json_chunk", "read_json", "read_json_file"], "tokens": 405}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@insert_meta_param_description\ndef read_json(\n url_path,\n orient=\"records\",\n lines=None,\n storage_options=None,\n blocksize=None,\n sample=2 ** 20,\n encoding=\"utf-8\",\n errors=\"strict\",\n compression=\"infer\",\n meta=None,\n engine=pd.read_json,\n **kwargs\n):\n # ... other code\n if blocksize:\n first, chunks = read_bytes(\n url_path,\n b\"\\n\",\n blocksize=blocksize,\n sample=sample,\n compression=compression,\n **storage_options\n )\n chunks = list(dask.core.flatten(chunks))\n if meta is None:\n meta = read_json_chunk(first, encoding, errors, engine, kwargs)\n meta = make_meta(meta)\n parts = [\n dask.delayed(read_json_chunk)(\n chunk, encoding, errors, engine, kwargs, meta=meta\n )\n for chunk in chunks\n ]\n return dd.from_delayed(parts, meta=meta)\n else:\n files = open_files(\n url_path,\n \"rt\",\n encoding=encoding,\n errors=errors,\n compression=compression,\n **storage_options\n )\n parts = [\n dask.delayed(read_json_file)(f, orient, lines, engine, kwargs)\n for f in files\n ]\n return dd.from_delayed(parts, meta=meta)\n\n\ndef read_json_chunk(chunk, encoding, errors, engine, kwargs, meta=None):\n s = io.StringIO(chunk.decode(encoding, errors))\n s.seek(0)\n df = engine(s, orient=\"records\", lines=True, **kwargs)\n if meta is not None and df.empty:\n return meta\n else:\n return df\n\n\ndef read_json_file(f, orient, lines, engine, kwargs):\n with f as f:\n return engine(f, orient=orient, lines=lines, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/orc.py_from_distutils_version_im__read_orc_stripe.if_pa___version___Loose.else_.return.table_to_pandas_date_as_o": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/orc.py_from_distutils_version_im__read_orc_stripe.if_pa___version___Loose.else_.return.table_to_pandas_date_as_o", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/orc.py", "file_name": "orc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 25, "span_ids": ["imports", "_read_orc_stripe"], "tokens": 213}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from distutils.version import LooseVersion\n\nfrom .utils import _get_pyarrow_dtypes, _meta_from_dtypes\nfrom ..core import DataFrame\nfrom ...base import tokenize\nfrom ...blockwise import Blockwise\nfrom ...bytes.core import get_fs_token_paths\nfrom ...highlevelgraph import HighLevelGraph\nfrom ...utils import import_required\n\n__all__ = (\"read_orc\",)\n\n\ndef _read_orc_stripe(fs, path, stripe, columns=None):\n \"\"\"Pull out specific data from specific part of ORC file\"\"\"\n orc = import_required(\"pyarrow.orc\", \"Please install pyarrow >= 0.9.0\")\n import pyarrow as pa\n\n with fs.open(path, \"rb\") as f:\n o = orc.ORCFile(f)\n table = o.read_stripe(stripe, columns)\n if pa.__version__ < LooseVersion(\"0.11.0\"):\n return table.to_pandas()\n else:\n return table.to_pandas(date_as_object=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/orc.py_read_orc_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/orc.py_read_orc_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/orc.py", "file_name": "orc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 28, "end_line": 111, "span_ids": ["read_orc"], "tokens": 718}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def read_orc(path, columns=None, storage_options=None):\n \"\"\"Read dataframe from ORC file(s)\n\n Parameters\n ----------\n path: str or list(str)\n Location of file(s), which can be a full URL with protocol specifier,\n and may include glob character if a single string.\n columns: None or list(str)\n Columns to load. If None, loads all.\n storage_options: None or dict\n Further parameters to pass to the bytes backend.\n\n Returns\n -------\n Dask.DataFrame (even if there is only one column)\n\n Examples\n --------\n >>> df = dd.read_orc('https://github.com/apache/orc/raw/'\n ... 'master/examples/demo-11-zlib.orc') # doctest: +SKIP\n \"\"\"\n orc = import_required(\"pyarrow.orc\", \"Please install pyarrow >= 0.9.0\")\n import pyarrow as pa\n\n if LooseVersion(pa.__version__) == \"0.10.0\":\n raise RuntimeError(\n \"Due to a bug in pyarrow 0.10.0, the ORC reader is \"\n \"unavailable. Please either downgrade pyarrow to \"\n \"0.9.0, or use the pyarrow master branch (in which \"\n \"this issue is fixed).\\n\\n\"\n \"For more information see: \"\n \"https://issues.apache.org/jira/browse/ARROW-3009\"\n )\n\n storage_options = storage_options or {}\n fs, fs_token, paths = get_fs_token_paths(\n path, mode=\"rb\", storage_options=storage_options\n )\n schema = None\n nstripes_per_file = []\n for path in paths:\n with fs.open(path, \"rb\") as f:\n o = orc.ORCFile(f)\n if schema is None:\n schema = o.schema\n elif schema != o.schema:\n raise ValueError(\"Incompatible schemas while parsing ORC files\")\n nstripes_per_file.append(o.nstripes)\n schema = _get_pyarrow_dtypes(schema, categories=None)\n if columns is not None:\n ex = set(columns) - set(schema)\n if ex:\n raise ValueError(\n \"Requested columns (%s) not in schema (%s)\" % (ex, set(schema))\n )\n else:\n columns = list(schema)\n meta = _meta_from_dtypes(columns, schema, [], [])\n\n # Create IO subgraph\n output_name = \"read-orc-\" + tokenize(fs_token, path, columns)\n name = \"blockwise-io-\" + output_name\n dsk_io = {}\n N = 0\n for path, n in zip(paths, nstripes_per_file):\n for stripe in range(n):\n dsk_io[(name, N)] = (_read_orc_stripe, fs, path, stripe, columns)\n N += 1\n\n # Create Blockwise layer\n npartitions = len(dsk_io)\n layer = Blockwise(\n output_name,\n \"i\",\n None,\n [(name, \"i\")],\n {name: (npartitions,)},\n io_subgraph=(name, dsk_io),\n )\n graph = HighLevelGraph({output_name: layer}, {output_name: set()})\n\n return DataFrame(graph, output_name, meta, [None] * (npartitions + 1))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/__init__.py__": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/__init__.py__", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 2, "span_ids": ["imports"], "tokens": 16}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from .core import read_parquet, to_parquet, read_parquet_part", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_from_functools_import_par__append_row_groups.try_.except_RuntimeError_as_er.if_requires_equal_schema.else_.raise_err": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_from_functools_import_par__append_row_groups.try_.except_RuntimeError_as_er.if_requires_equal_schema.else_.raise_err", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/arrow.py", "file_name": "arrow.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 44, "span_ids": ["imports", "_append_row_groups"], "tokens": 265}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from functools import partial\nfrom collections import defaultdict\nimport json\nimport warnings\nfrom distutils.version import LooseVersion\n\nimport pandas as pd\nimport pyarrow as pa\nimport pyarrow.parquet as pq\nfrom ....utils import getargspec\nfrom ..utils import _get_pyarrow_dtypes, _meta_from_dtypes\nfrom ...utils import clear_known_categories\nfrom ....core import flatten\nfrom dask import delayed\n\nfrom .utils import (\n _parse_pandas_metadata,\n _normalize_index_columns,\n Engine,\n _analyze_paths,\n)\n\npreserve_ind_supported = pa.__version__ >= LooseVersion(\"0.15.0\")\nschema_field_supported = pa.__version__ >= LooseVersion(\"0.15.0\")\n\n\n#\n# Private Helper Functions\n#\n\n\ndef _append_row_groups(metadata, md):\n try:\n metadata.append_row_groups(md)\n except RuntimeError as err:\n if \"requires equal schemas\" in str(err):\n raise RuntimeError(\n \"Schemas are inconsistent, try using \"\n '`to_parquet(..., schema=\"infer\")`, or pass an explicit '\n \"pyarrow schema. Such as \"\n '`to_parquet(..., schema={\"column1\": pa.string()})`'\n ) from err\n else:\n raise err", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py__write_partitioned__index_in_schema.if_index_and_schema_is_no.else_._No_index_to_check": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py__write_partitioned__index_in_schema.if_index_and_schema_is_no.else_._No_index_to_check", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/arrow.py", "file_name": "arrow.py", "file_type": "text/x-python", "category": "implementation", "start_line": 46, "end_line": 107, "span_ids": ["_write_partitioned", "_index_in_schema"], "tokens": 525}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _write_partitioned(\n table, root_path, filename, partition_cols, fs, index_cols=(), **kwargs\n):\n \"\"\"Write table to a partitioned dataset with pyarrow.\n\n Logic copied from pyarrow.parquet.\n (arrow/python/pyarrow/parquet.py::write_to_dataset)\n\n TODO: Remove this in favor of pyarrow's `write_to_dataset`\n once ARROW-8244 is addressed.\n \"\"\"\n fs.mkdirs(root_path, exist_ok=True)\n\n df = table.to_pandas(ignore_metadata=True)\n index_cols = list(index_cols) if index_cols else []\n preserve_index = False\n if index_cols and preserve_ind_supported:\n df.set_index(index_cols, inplace=True)\n preserve_index = True\n\n partition_keys = [df[col] for col in partition_cols]\n data_df = df.drop(partition_cols, axis=\"columns\")\n data_cols = df.columns.drop(partition_cols)\n if len(data_cols) == 0 and not index_cols:\n raise ValueError(\"No data left to save outside partition columns\")\n\n subschema = table.schema\n for col in table.schema.names:\n if col in partition_cols:\n subschema = subschema.remove(subschema.get_field_index(col))\n\n md_list = []\n for keys, subgroup in data_df.groupby(partition_keys):\n if not isinstance(keys, tuple):\n keys = (keys,)\n subdir = fs.sep.join(\n [\n \"{colname}={value}\".format(colname=name, value=val)\n for name, val in zip(partition_cols, keys)\n ]\n )\n subtable = pa.Table.from_pandas(\n subgroup, preserve_index=preserve_index, schema=subschema, safe=False\n )\n prefix = fs.sep.join([root_path, subdir])\n fs.mkdirs(prefix, exist_ok=True)\n full_path = fs.sep.join([prefix, filename])\n with fs.open(full_path, \"wb\") as f:\n pq.write_table(subtable, f, metadata_collector=md_list, **kwargs)\n md_list[-1].set_file_path(fs.sep.join([subdir, filename]))\n\n return md_list\n\n\ndef _index_in_schema(index, schema):\n if index and schema is not None:\n # Make sure all index columns are in user-defined schema\n return len(set(index).intersection(schema.names)) == len(index)\n elif index:\n return True # Schema is not user-specified, all good\n else:\n return False # No index to check", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py__get_dataset_object__get_dataset_object.return.dataset_base_fns": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py__get_dataset_object__get_dataset_object.return.dataset_base_fns", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/arrow.py", "file_name": "arrow.py", "file_type": "text/x-python", "category": "implementation", "start_line": 110, "end_line": 151, "span_ids": ["_get_dataset_object"], "tokens": 496}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _get_dataset_object(paths, fs, filters, dataset_kwargs):\n \"\"\"Generate a ParquetDataset object\"\"\"\n kwargs = dataset_kwargs.copy()\n if \"validate_schema\" not in kwargs:\n kwargs[\"validate_schema\"] = False\n if len(paths) > 1:\n # This is a list of files\n base, fns = _analyze_paths(paths, fs)\n proxy_metadata = None\n if \"_metadata\" in fns:\n # We have a _metadata file. PyArrow cannot handle\n # \"_metadata\" when `paths` is a list. So, we shuld\n # open \"_metadata\" separately.\n paths.remove(fs.sep.join([base, \"_metadata\"]))\n fns.remove(\"_metadata\")\n with fs.open(fs.sep.join([base, \"_metadata\"]), mode=\"rb\") as fil:\n proxy_metadata = pq.ParquetFile(fil).metadata\n # Create our dataset from the list of data files.\n # Note #1: that this will not parse all the files (yet)\n # Note #2: Cannot pass filters for legacy pyarrow API (see issue#6512).\n # We can handle partitions + filtering for list input after\n # adopting new pyarrow.dataset API.\n dataset = pq.ParquetDataset(paths, filesystem=fs, **kwargs)\n if proxy_metadata:\n dataset.metadata = proxy_metadata\n elif fs.isdir(paths[0]):\n # This is a directory. We can let pyarrow do its thing.\n # Note: In the future, it may be best to avoid listing the\n # directory if we can get away with checking for the\n # existence of _metadata. Listing may be much more\n # expensive in storage systems like S3.\n allpaths = fs.glob(paths[0] + fs.sep + \"*\")\n base, fns = _analyze_paths(allpaths, fs)\n dataset = pq.ParquetDataset(paths[0], filesystem=fs, filters=filters, **kwargs)\n else:\n # This is a single file. No danger in gathering statistics\n # and/or splitting row-groups without a \"_metadata\" file\n base = paths[0]\n fns = [None]\n dataset = pq.ParquetDataset(paths[0], filesystem=fs, **kwargs)\n\n return dataset, base, fns", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py__gather_metadata__gather_metadata.if_dataset_metadata_.else_.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py__gather_metadata__gather_metadata.if_dataset_metadata_.else_.return._", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/arrow.py", "file_name": "arrow.py", "file_type": "text/x-python", "category": "implementation", "start_line": 154, "end_line": 257, "span_ids": ["_gather_metadata"], "tokens": 798}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _gather_metadata(\n paths, fs, split_row_groups, gather_statistics, filters, dataset_kwargs\n):\n \"\"\"Gather parquet metadata into a single data structure.\n\n Use _metadata or aggregate footer metadata into a single\n object. Also, collect other information necessary for\n parquet-to-ddf mapping (e.g. schema, partition_info).\n \"\"\"\n\n # Step 1: Create a ParquetDataset object\n dataset, base, fns = _get_dataset_object(paths, fs, filters, dataset_kwargs)\n if fns == [None]:\n # This is a single file. No danger in gathering statistics\n # and/or splitting row-groups without a \"_metadata\" file\n if gather_statistics is None:\n gather_statistics = True\n if split_row_groups is None:\n split_row_groups = True\n\n # Step 2: Construct necessary (parquet) partitioning information\n partition_info = {\"partitions\": None, \"partition_keys\": {}, \"partition_names\": []}\n fn_partitioned = False\n if dataset.partitions is not None:\n fn_partitioned = True\n partition_info[\"partition_names\"] = [\n n for n in dataset.partitions.partition_names if n is not None\n ]\n partition_info[\"partitions\"] = dataset.partitions\n for piece in dataset.pieces:\n partition_info[\"partition_keys\"][piece.path] = piece.partition_keys\n\n # Step 3: Construct a single `metadata` object. We can\n # directly use dataset.metadata if it is available.\n # Otherwise, if `gather_statistics` or `split_row_groups`,\n # we need to gether the footer metadata manually\n metadata = None\n if dataset.metadata:\n # We have a _metadata file.\n # PyArrow already did the work for us\n schema = dataset.metadata.schema.to_arrow_schema()\n if gather_statistics is None:\n gather_statistics = True\n if split_row_groups is None:\n split_row_groups = True\n return (\n schema,\n dataset.metadata,\n base,\n partition_info,\n split_row_groups,\n gather_statistics,\n )\n else:\n # No _metadata file.\n # May need to collect footer metadata manually\n if dataset.schema is not None:\n schema = dataset.schema.to_arrow_schema()\n else:\n schema = None\n if gather_statistics is None:\n gather_statistics = False\n if split_row_groups is None:\n split_row_groups = False\n metadata = None\n if not (split_row_groups or gather_statistics):\n # Don't need to construct real metadata if\n # we are not gathering statistics or splitting\n # by row-group\n metadata = [p.path for p in dataset.pieces]\n if schema is None:\n schema = dataset.pieces[0].get_metadata().schema.to_arrow_schema()\n return (\n schema,\n metadata,\n base,\n partition_info,\n split_row_groups,\n gather_statistics,\n )\n # We have not detected a _metadata file, and the user has specified\n # that they want to split by row-group and/or gather statistics.\n # This is the only case where we MUST scan all files to collect\n # metadata.\n for piece, fn in zip(dataset.pieces, fns):\n md = piece.get_metadata()\n if schema is None:\n schema = md.schema.to_arrow_schema()\n if fn_partitioned:\n md.set_file_path(piece.path.replace(base + fs.sep, \"\"))\n elif fn:\n md.set_file_path(fn)\n if metadata:\n _append_row_groups(metadata, md)\n else:\n metadata = md\n return (\n schema,\n metadata,\n base,\n partition_info,\n split_row_groups,\n gather_statistics,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py__generate_dd_meta__generate_dd_meta.return.meta_index_cols_categor": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py__generate_dd_meta__generate_dd_meta.return.meta_index_cols_categor", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/arrow.py", "file_name": "arrow.py", "file_type": "text/x-python", "category": "implementation", "start_line": 260, "end_line": 336, "span_ids": ["_generate_dd_meta"], "tokens": 595}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _generate_dd_meta(schema, index, categories, partition_info):\n partition_obj = partition_info[\"partitions\"]\n partitions = partition_info[\"partition_names\"]\n columns = None\n\n has_pandas_metadata = schema.metadata is not None and b\"pandas\" in schema.metadata\n\n if has_pandas_metadata:\n pandas_metadata = json.loads(schema.metadata[b\"pandas\"].decode(\"utf8\"))\n (\n index_names,\n column_names,\n storage_name_mapping,\n column_index_names,\n ) = _parse_pandas_metadata(pandas_metadata)\n if categories is None:\n categories = []\n for col in pandas_metadata[\"columns\"]:\n if (col[\"pandas_type\"] == \"categorical\") and (\n col[\"name\"] not in categories\n ):\n categories.append(col[\"name\"])\n else:\n # No pandas metadata implies no index, unless selected by the user\n index_names = []\n column_names = schema.names\n storage_name_mapping = {k: k for k in column_names}\n column_index_names = [None]\n\n if index is None and index_names:\n index = index_names\n\n if set(column_names).intersection(partitions):\n raise ValueError(\n \"partition(s) should not exist in columns.\\n\"\n \"categories: {} | partitions: {}\".format(column_names, partitions)\n )\n\n column_names, index_names = _normalize_index_columns(\n columns, column_names + partitions, index, index_names\n )\n\n all_columns = index_names + column_names\n\n # Check that categories are included in columns\n if categories and not set(categories).intersection(all_columns):\n raise ValueError(\n \"categories not in available columns.\\n\"\n \"categories: {} | columns: {}\".format(categories, list(all_columns))\n )\n\n dtypes = _get_pyarrow_dtypes(schema, categories)\n dtypes = {storage_name_mapping.get(k, k): v for k, v in dtypes.items()}\n\n index_cols = index or ()\n meta = _meta_from_dtypes(all_columns, dtypes, index_cols, column_index_names)\n meta = clear_known_categories(meta, cols=categories)\n\n if partition_obj:\n for partition in partition_obj:\n if isinstance(index, list) and partition.name == index[0]:\n # Index from directory structure\n meta.index = pd.CategoricalIndex(\n categories=partition.keys, name=index[0]\n )\n elif partition.name == meta.index.name:\n # Index created from a categorical column\n meta.index = pd.CategoricalIndex(\n categories=partition.keys, name=meta.index.name\n )\n elif partition.name in meta.columns:\n meta[partition.name] = pd.Series(\n pd.Categorical(categories=partition.keys, values=[]),\n index=meta.index,\n )\n\n return meta, index_cols, categories, index", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py__aggregate_stats__aggregate_stats.if_len_file_row_group_sta.else_.return.s": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py__aggregate_stats__aggregate_stats.if_len_file_row_group_sta.else_.return.s", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/arrow.py", "file_name": "arrow.py", "file_type": "text/x-python", "category": "implementation", "start_line": 339, "end_line": 395, "span_ids": ["_aggregate_stats"], "tokens": 486}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _aggregate_stats(\n file_path, file_row_group_stats, file_row_group_column_stats, stat_col_indices\n):\n \"\"\"Utility to aggregate the statistics for N row-groups\n into a single dictionary.\n \"\"\"\n if len(file_row_group_stats) < 1:\n # Empty statistics\n return {}\n elif len(file_row_group_column_stats) == 0:\n assert len(file_row_group_stats) == 1\n return file_row_group_stats[0]\n else:\n # Note: It would be better to avoid df_rgs and df_cols\n # construction altogether. It makes it fast to aggregate\n # the statistics for many row groups, but isn't\n # worthwhile for a small number of row groups.\n if len(file_row_group_stats) > 1:\n df_rgs = pd.DataFrame(file_row_group_stats)\n s = {\n \"file_path_0\": file_path,\n \"num-rows\": df_rgs[\"num-rows\"].sum(),\n \"total_byte_size\": df_rgs[\"total_byte_size\"].sum(),\n \"columns\": [],\n }\n else:\n s = {\n \"file_path_0\": file_path,\n \"num-rows\": file_row_group_stats[0][\"num-rows\"],\n \"total_byte_size\": file_row_group_stats[0][\"total_byte_size\"],\n \"columns\": [],\n }\n\n df_cols = None\n if len(file_row_group_column_stats) > 1:\n df_cols = pd.DataFrame(file_row_group_column_stats)\n for ind, name in enumerate(stat_col_indices):\n i = ind * 3\n if df_cols is None:\n s[\"columns\"].append(\n {\n \"name\": name,\n \"min\": file_row_group_column_stats[0][i],\n \"max\": file_row_group_column_stats[0][i + 1],\n \"null_count\": file_row_group_column_stats[0][i + 2],\n }\n )\n else:\n s[\"columns\"].append(\n {\n \"name\": name,\n \"min\": df_cols.iloc[:, i].min(),\n \"max\": df_cols.iloc[:, i + 1].max(),\n \"null_count\": df_cols.iloc[:, i + 2].sum(),\n }\n )\n return s", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py__process_metadata__process_metadata.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py__process_metadata__process_metadata.return._", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/arrow.py", "file_name": "arrow.py", "file_type": "text/x-python", "category": "implementation", "start_line": 398, "end_line": 492, "span_ids": ["_process_metadata"], "tokens": 719}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _process_metadata(\n metadata, single_rg_parts, gather_statistics, stat_col_indices, no_filters\n):\n # Get the number of row groups per file\n file_row_groups = defaultdict(list)\n file_row_group_stats = defaultdict(list)\n file_row_group_column_stats = defaultdict(list)\n cmax_last = {}\n for rg in range(metadata.num_row_groups):\n row_group = metadata.row_group(rg)\n fpath = row_group.column(0).file_path\n if fpath is None:\n raise ValueError(\n \"Global metadata structure is missing a file_path string. \"\n \"If the dataset includes a _metadata file, that file may \"\n \"have one or more missing file_path fields.\"\n )\n if file_row_groups[fpath]:\n file_row_groups[fpath].append(file_row_groups[fpath][-1] + 1)\n else:\n file_row_groups[fpath].append(0)\n if gather_statistics:\n if single_rg_parts:\n s = {\n \"file_path_0\": fpath,\n \"num-rows\": row_group.num_rows,\n \"total_byte_size\": row_group.total_byte_size,\n \"columns\": [],\n }\n else:\n s = {\n \"num-rows\": row_group.num_rows,\n \"total_byte_size\": row_group.total_byte_size,\n }\n cstats = []\n for name, i in stat_col_indices.items():\n column = row_group.column(i)\n if column.statistics:\n cmin = column.statistics.min\n cmax = column.statistics.max\n cnull = column.statistics.null_count\n last = cmax_last.get(name, None)\n if no_filters:\n # Only think about bailing if we don't need\n # stats for filtering\n if cmin is None or (last and cmin < last):\n # We are collecting statistics for divisions\n # only (no filters) - Column isn't sorted, or\n # we have an all-null partition, so lets bail.\n #\n # Note: This assumes ascending order.\n #\n gather_statistics = False\n file_row_group_stats = {}\n file_row_group_column_stats = {}\n break\n\n if single_rg_parts:\n to_ts = column.statistics.logical_type.type == \"TIMESTAMP\"\n s[\"columns\"].append(\n {\n \"name\": name,\n \"min\": cmin if not to_ts else pd.Timestamp(cmin),\n \"max\": cmax if not to_ts else pd.Timestamp(cmax),\n \"null_count\": cnull,\n }\n )\n else:\n cstats += [cmin, cmax, cnull]\n cmax_last[name] = cmax\n else:\n\n if no_filters and column.num_values > 0:\n # We are collecting statistics for divisions\n # only (no filters) - Lets bail.\n gather_statistics = False\n file_row_group_stats = {}\n file_row_group_column_stats = {}\n break\n\n if single_rg_parts:\n s[\"columns\"].append({\"name\": name})\n else:\n cstats += [None, None, None]\n if gather_statistics:\n file_row_group_stats[fpath].append(s)\n if not single_rg_parts:\n file_row_group_column_stats[fpath].append(tuple(cstats))\n\n return (\n file_row_groups,\n file_row_group_stats,\n file_row_group_column_stats,\n gather_statistics,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py__construct_parts__construct_parts._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py__construct_parts__construct_parts._", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/arrow.py", "file_name": "arrow.py", "file_type": "text/x-python", "category": "implementation", "start_line": 495, "end_line": 556, "span_ids": ["_construct_parts"], "tokens": 395}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _construct_parts(\n fs,\n metadata,\n schema,\n filters,\n index_cols,\n data_path,\n partition_info,\n categories,\n split_row_groups,\n gather_statistics,\n):\n \"\"\"Construct ``parts`` for ddf construction\n\n Use metadata (along with other data) to define a tuple\n for each ddf partition. Also gather statistics if\n ``gather_statistics=True``, and other criteria is met.\n \"\"\"\n\n parts = []\n stats = []\n\n partition_keys = partition_info[\"partition_keys\"]\n partition_obj = partition_info[\"partitions\"]\n\n # Check if `metadata` is just a list of paths\n # (not splitting by row-group or collecting statistics)\n if isinstance(metadata, list) and isinstance(metadata[0], str):\n for full_path in metadata:\n part = {\n \"piece\": (full_path, None, partition_keys.get(full_path, None)),\n \"kwargs\": {\"partitions\": partition_obj, \"categories\": categories},\n }\n parts.append(part)\n return parts, stats\n\n # Determine which columns need statistics\n flat_filters = (\n set(flatten(tuple(flatten(filters, container=list)), container=tuple))\n if filters\n else []\n )\n stat_col_indices = {}\n for i, name in enumerate(schema.names):\n if name in index_cols or name in flat_filters:\n stat_col_indices[name] = i\n stat_cols = list(stat_col_indices.keys())\n gather_statistics = gather_statistics and len(stat_cols) > 0\n\n # Convert metadata into simple dictionary structures\n (\n file_row_groups,\n file_row_group_stats,\n file_row_group_column_stats,\n gather_statistics,\n ) = _process_metadata(\n metadata,\n int(split_row_groups) == 1,\n gather_statistics,\n stat_col_indices,\n flat_filters == [],\n )\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py__construct_parts.if_split_row_groups___construct_parts.return.parts_stats": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py__construct_parts.if_split_row_groups___construct_parts.return.parts_stats", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/arrow.py", "file_name": "arrow.py", "file_type": "text/x-python", "category": "implementation", "start_line": 558, "end_line": 623, "span_ids": ["_construct_parts"], "tokens": 509}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _construct_parts(\n fs,\n metadata,\n schema,\n filters,\n index_cols,\n data_path,\n partition_info,\n categories,\n split_row_groups,\n gather_statistics,\n):\n # ... other code\n\n if split_row_groups:\n # Create parts from each file,\n # limiting the number of row_groups in each piece\n split_row_groups = int(split_row_groups)\n for filename, row_groups in file_row_groups.items():\n row_group_count = len(row_groups)\n for i in range(0, row_group_count, split_row_groups):\n i_end = i + split_row_groups\n rg_list = row_groups[i:i_end]\n full_path = (\n fs.sep.join([data_path, filename])\n if filename != \"\"\n else data_path # This is a single file\n )\n pkeys = partition_keys.get(full_path, None)\n if partition_obj and pkeys is None:\n continue # This partition was filtered\n part = {\n \"piece\": (full_path, rg_list, pkeys),\n \"kwargs\": {\n \"partitions\": partition_obj,\n \"categories\": categories,\n \"filters\": filters,\n \"schema\": schema,\n },\n }\n parts.append(part)\n if gather_statistics:\n stat = _aggregate_stats(\n filename,\n file_row_group_stats[filename][i:i_end],\n file_row_group_column_stats[filename][i:i_end],\n stat_col_indices,\n )\n stats.append(stat)\n else:\n for filename, row_groups in file_row_groups.items():\n full_path = (\n fs.sep.join([data_path, filename])\n if filename != \"\"\n else data_path # This is a single file\n )\n pkeys = partition_keys.get(full_path, None)\n if partition_obj and pkeys is None:\n continue # This partition was filtered\n rgs = None\n part = {\n \"piece\": (full_path, rgs, pkeys),\n \"kwargs\": {\n \"partitions\": partition_obj,\n \"categories\": categories,\n \"filters\": filters,\n \"schema\": schema,\n },\n }\n parts.append(part)\n if gather_statistics:\n stat = _aggregate_stats(\n filename,\n file_row_group_stats[filename],\n file_row_group_column_stats[filename],\n stat_col_indices,\n )\n stats.append(stat)\n\n return parts, stats", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowEngine_ArrowEngine.read_metadata.return._meta_stats_parts_inde": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowEngine_ArrowEngine.read_metadata.return._meta_stats_parts_inde", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/arrow.py", "file_name": "arrow.py", "file_type": "text/x-python", "category": "implementation", "start_line": 627, "end_line": 704, "span_ids": ["ArrowEngine", "ArrowEngine.read_metadata"], "tokens": 507}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ArrowEngine(Engine):\n @classmethod\n def read_metadata(\n cls,\n fs,\n paths,\n categories=None,\n index=None,\n gather_statistics=None,\n filters=None,\n split_row_groups=None,\n **kwargs,\n ):\n\n # Check if we are using pyarrow.dataset API\n dataset_kwargs = kwargs.get(\"dataset\", {})\n\n # Gather necessary metadata information. This includes\n # the schema and (parquet) partitioning information.\n # This may also set split_row_groups and gather_statistics,\n # depending on _metadata availability.\n (\n schema,\n metadata,\n base_path,\n partition_info,\n split_row_groups,\n gather_statistics,\n ) = _gather_metadata(\n paths, fs, split_row_groups, gather_statistics, filters, dataset_kwargs\n )\n\n # Process metadata to define `meta` and `index_cols`\n meta, index_cols, categories, index = _generate_dd_meta(\n schema, index, categories, partition_info\n )\n\n # Cannot gather_statistics if our `metadata` is a list\n # of paths, or if we are building a multiindex (for now).\n # We also don't \"need\" to gather statistics if we don't\n # want to apply any filters or calculate divisions\n if (isinstance(metadata, list) and isinstance(metadata[0], str)) or len(\n index_cols\n ) > 1:\n gather_statistics = False\n elif filters is None and len(index_cols) == 0:\n gather_statistics = False\n\n # Make sure gather_statistics allows filtering\n # (if filters are desired)\n if filters:\n # Filters may require us to gather statistics\n if gather_statistics is False and partition_info[\"partition_names\"]:\n warnings.warn(\n \"Filtering with gather_statistics=False. \"\n \"Only partition columns will be filtered correctly.\"\n )\n elif gather_statistics is False:\n raise ValueError(\"Cannot apply filters with gather_statistics=False\")\n elif not gather_statistics:\n gather_statistics = True\n\n # Finally, construct our list of `parts`\n # (and a corresponding list of statistics)\n parts, stats = _construct_parts(\n fs,\n metadata,\n schema,\n filters,\n index_cols,\n base_path,\n partition_info,\n categories,\n split_row_groups,\n gather_statistics,\n )\n\n return (meta, stats, parts, index)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowEngine.read_partition_ArrowEngine.read_partition.return.df": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowEngine.read_partition_ArrowEngine.read_partition.return.df", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/arrow.py", "file_name": "arrow.py", "file_type": "text/x-python", "category": "implementation", "start_line": 705, "end_line": 796, "span_ids": ["ArrowEngine.read_partition"], "tokens": 659}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ArrowEngine(Engine):\n\n @classmethod\n def read_partition(\n cls,\n fs,\n piece,\n columns,\n index,\n categories=(),\n partitions=(),\n filters=None,\n schema=None,\n **kwargs,\n ):\n if isinstance(index, list):\n for level in index:\n # unclear if we can use set ops here. I think the order matters.\n # Need the membership test to avoid duplicating index when\n # we slice with `columns` later on.\n if level not in columns:\n columns.append(level)\n\n # Ensure `columns` and `partitions` do not overlap\n columns_and_parts = columns.copy()\n if columns_and_parts and partitions:\n for part_name in partitions.partition_names:\n if part_name in columns:\n columns.remove(part_name)\n else:\n columns_and_parts.append(part_name)\n columns = columns or None\n\n if isinstance(piece, str):\n # `piece` is a file-path string\n path = piece\n row_group = None\n partition_keys = None\n else:\n # `piece` contains (path, row_group, partition_keys)\n (path, row_group, partition_keys) = piece\n\n if not isinstance(row_group, list):\n row_group = [row_group]\n\n dfs = []\n for rg in row_group:\n piece = pq.ParquetDatasetPiece(\n path,\n row_group=rg,\n partition_keys=partition_keys,\n open_file_func=partial(fs.open, mode=\"rb\"),\n )\n arrow_table = cls._parquet_piece_as_arrow(\n piece, columns, partitions, **kwargs\n )\n df = cls._arrow_table_to_pandas(arrow_table, categories, **kwargs)\n\n if len(row_group) > 1:\n dfs.append(df)\n\n if len(row_group) > 1:\n df = pd.concat(dfs)\n\n # Note that `to_pandas(ignore_metadata=False)` means\n # pyarrow will use the pandas metadata to set the index.\n index_in_columns_and_parts = set(df.index.names).issubset(\n set(columns_and_parts)\n )\n if not index:\n if index_in_columns_and_parts:\n # User does not want to set index and a desired\n # column/partition has been set to the index\n df.reset_index(drop=False, inplace=True)\n else:\n # User does not want to set index and an\n # \"unwanted\" column has been set to the index\n df.reset_index(drop=True, inplace=True)\n else:\n if set(df.index.names) != set(index) and index_in_columns_and_parts:\n # The wrong index has been set and it contains\n # one or more desired columns/partitions\n df.reset_index(drop=False, inplace=True)\n elif index_in_columns_and_parts:\n # The correct index has already been set\n index = False\n columns_and_parts = list(\n set(columns_and_parts).difference(set(df.index.names))\n )\n df = df[list(columns_and_parts)]\n\n if index:\n df = df.set_index(index)\n return df", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowEngine._arrow_table_to_pandas_ArrowEngine._parquet_piece_as_arrow.return.arrow_table": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowEngine._arrow_table_to_pandas_ArrowEngine._parquet_piece_as_arrow.return.arrow_table", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/arrow.py", "file_name": "arrow.py", "file_type": "text/x-python", "category": "implementation", "start_line": 798, "end_line": 818, "span_ids": ["ArrowEngine._arrow_table_to_pandas", "ArrowEngine._parquet_piece_as_arrow"], "tokens": 164}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ArrowEngine(Engine):\n\n @classmethod\n def _arrow_table_to_pandas(\n cls, arrow_table: pa.Table, categories, **kwargs\n ) -> pd.DataFrame:\n _kwargs = kwargs.get(\"arrow_to_pandas\", {})\n _kwargs.update({\"use_threads\": False, \"ignore_metadata\": False})\n\n return arrow_table.to_pandas(categories=categories, **_kwargs)\n\n @classmethod\n def _parquet_piece_as_arrow(\n cls, piece: pq.ParquetDatasetPiece, columns, partitions, **kwargs\n ) -> pa.Table:\n arrow_table = piece.read(\n columns=columns,\n partitions=partitions,\n use_pandas_metadata=True,\n use_threads=False,\n **kwargs.get(\"read\", {}),\n )\n return arrow_table", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowEngine.initialize_write_ArrowEngine.initialize_write.if_append_.try_.except_IOError_ValueErr.append.False": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowEngine.initialize_write_ArrowEngine.initialize_write.if_append_.try_.except_IOError_ValueErr.append.False", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/arrow.py", "file_name": "arrow.py", "file_type": "text/x-python", "category": "implementation", "start_line": 820, "end_line": 896, "span_ids": ["ArrowEngine.initialize_write"], "tokens": 595}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ArrowEngine(Engine):\n\n @staticmethod\n def initialize_write(\n df,\n fs,\n path,\n append=False,\n partition_on=None,\n ignore_divisions=False,\n division_info=None,\n schema=None,\n index_cols=None,\n **kwargs,\n ):\n # Infer schema if \"infer\"\n # (also start with inferred schema if user passes a dict)\n if schema == \"infer\" or isinstance(schema, dict):\n\n # Start with schema from _meta_nonempty\n _schema = pa.Schema.from_pandas(\n df._meta_nonempty.set_index(index_cols)\n if index_cols\n else df._meta_nonempty\n )\n\n # Use dict to update our inferred schema\n if isinstance(schema, dict):\n schema = pa.schema(schema)\n for name in schema.names:\n i = _schema.get_field_index(name)\n j = schema.get_field_index(name)\n _schema = _schema.set(i, schema.field(j))\n\n # If we have object columns, we need to sample partitions\n # until we find non-null data for each column in `sample`\n sample = [col for col in df.columns if df[col].dtype == \"object\"]\n if schema_field_supported and sample and schema == \"infer\":\n delayed_schema_from_pandas = delayed(pa.Schema.from_pandas)\n for i in range(df.npartitions):\n # Keep data on worker\n _s = delayed_schema_from_pandas(\n df[sample].to_delayed()[i]\n ).compute()\n for name, typ in zip(_s.names, _s.types):\n if typ != \"null\":\n i = _schema.get_field_index(name)\n j = _s.get_field_index(name)\n _schema = _schema.set(i, _s.field(j))\n sample.remove(name)\n if not sample:\n break\n\n # Final (inferred) schema\n schema = _schema\n\n dataset = fmd = None\n i_offset = 0\n if append and division_info is None:\n ignore_divisions = True\n fs.mkdirs(path, exist_ok=True)\n\n if append:\n try:\n # Allow append if the dataset exists.\n # Also need dataset.metadata object if\n # ignore_divisions is False (to check divisions)\n dataset = pq.ParquetDataset(path, filesystem=fs)\n if not dataset.metadata and not ignore_divisions:\n # TODO: Be more flexible about existing metadata.\n raise NotImplementedError(\n \"_metadata file needed to `append` \"\n \"with `engine='pyarrow'` \"\n \"unless `ignore_divisions` is `True`\"\n )\n fmd = dataset.metadata\n except (IOError, ValueError, IndexError):\n # Original dataset does not exist - cannot append\n append = False\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowEngine.initialize_write.None_3_ArrowEngine.initialize_write.return.fmd_schema_i_offset": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowEngine.initialize_write.None_3_ArrowEngine.initialize_write.return.fmd_schema_i_offset", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/arrow.py", "file_name": "arrow.py", "file_type": "text/x-python", "category": "implementation", "start_line": 897, "end_line": 957, "span_ids": ["ArrowEngine.initialize_write"], "tokens": 539}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ArrowEngine(Engine):\n\n @staticmethod\n def initialize_write(\n df,\n fs,\n path,\n append=False,\n partition_on=None,\n ignore_divisions=False,\n division_info=None,\n schema=None,\n index_cols=None,\n **kwargs,\n ):\n # ... other code\n if append:\n names = dataset.metadata.schema.names\n has_pandas_metadata = (\n dataset.schema.to_arrow_schema().metadata is not None\n and b\"pandas\" in dataset.schema.to_arrow_schema().metadata\n )\n if has_pandas_metadata:\n pandas_metadata = json.loads(\n dataset.schema.to_arrow_schema().metadata[b\"pandas\"].decode(\"utf8\")\n )\n categories = [\n c[\"name\"]\n for c in pandas_metadata[\"columns\"]\n if c[\"pandas_type\"] == \"categorical\"\n ]\n else:\n categories = None\n dtypes = _get_pyarrow_dtypes(dataset.schema.to_arrow_schema(), categories)\n if set(names) != set(df.columns) - set(partition_on):\n raise ValueError(\n \"Appended columns not the same.\\n\"\n \"Previous: {} | New: {}\".format(names, list(df.columns))\n )\n elif (pd.Series(dtypes).loc[names] != df[names].dtypes).any():\n # TODO Coerce values for compatible but different dtypes\n raise ValueError(\n \"Appended dtypes differ.\\n{}\".format(\n set(dtypes.items()) ^ set(df.dtypes.iteritems())\n )\n )\n i_offset = len(dataset.pieces)\n\n if division_info[\"name\"] not in names:\n ignore_divisions = True\n if not ignore_divisions:\n old_end = None\n row_groups = [\n dataset.metadata.row_group(i)\n for i in range(dataset.metadata.num_row_groups)\n ]\n for row_group in row_groups:\n for i, name in enumerate(names):\n if name != division_info[\"name\"]:\n continue\n column = row_group.column(i)\n if column.statistics:\n if not old_end:\n old_end = column.statistics.max\n else:\n old_end = max(old_end, column.statistics.max)\n break\n\n divisions = division_info[\"divisions\"]\n if divisions[0] < old_end:\n raise ValueError(\n \"Appended divisions overlapping with the previous ones\"\n \" (set ignore_divisions=True to append anyway).\\n\"\n \"Previous: {} | New: {}\".format(old_end, divisions[0])\n )\n\n return fmd, schema, i_offset", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowEngine.write_metadata_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowEngine.write_metadata_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/arrow.py", "file_name": "arrow.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1026, "end_line": 1050, "span_ids": ["ArrowEngine.write_metadata"], "tokens": 266}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ArrowEngine(Engine):\n\n @staticmethod\n def write_metadata(parts, fmd, fs, path, append=False, **kwargs):\n parts = [p for p in parts if p[0][\"meta\"] is not None]\n if parts:\n if not append:\n # Get only arguments specified in the function\n common_metadata_path = fs.sep.join([path, \"_common_metadata\"])\n keywords = getargspec(pq.write_metadata).args\n kwargs_meta = {k: v for k, v in kwargs.items() if k in keywords}\n with fs.open(common_metadata_path, \"wb\") as fil:\n pq.write_metadata(parts[0][0][\"schema\"], fil, **kwargs_meta)\n\n # Aggregate metadata and write to _metadata file\n metadata_path = fs.sep.join([path, \"_metadata\"])\n if append and fmd is not None:\n _meta = fmd\n i_start = 0\n else:\n _meta = parts[0][0][\"meta\"]\n i_start = 1\n for i in range(i_start, len(parts)):\n _append_row_groups(_meta, parts[i][0][\"meta\"])\n with fs.open(metadata_path, \"wb\") as fil:\n _meta.write_metadata_file(fil)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_from_distutils_version_im_NONE_LABEL.___null_dask_index___": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_from_distutils_version_im_NONE_LABEL.___null_dask_index___", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 27, "span_ids": ["imports"], "tokens": 148}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from distutils.version import LooseVersion\n\nimport tlz as toolz\nimport warnings\nfrom ....bytes import core # noqa\nfrom fsspec.core import get_fs_token_paths\nfrom fsspec.utils import stringify_path\n\nfrom ...core import DataFrame, new_dd_object\nfrom ....base import tokenize\nfrom ....utils import import_required, natural_sort_key, parse_bytes\nfrom ...methods import concat\nfrom ....highlevelgraph import Layer\nfrom ....blockwise import Blockwise\n\n\ntry:\n import snappy\n\n snappy.compress\nexcept (ImportError, AttributeError):\n snappy = None\n\n\n__all__ = (\"read_parquet\", \"to_parquet\")\n\nNONE_LABEL = \"__null_dask_index__\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py___ParquetSubgraph.__repr__.return._ParquetSubgraph_name_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py___ParquetSubgraph.__repr__.return._ParquetSubgraph_name_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 29, "end_line": 56, "span_ids": ["ParquetSubgraph.__init__", "imports", "ParquetSubgraph", "ParquetSubgraph.__repr__"], "tokens": 179}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# ----------------------------------------------------------------------\n# User API\n\n\nclass ParquetSubgraph(Layer):\n \"\"\"\n Subgraph for reading Parquet files.\n\n Enables optimizations (see optimize_read_parquet_getitem).\n \"\"\"\n\n def __init__(\n self, name, engine, fs, meta, columns, index, parts, kwargs, part_ids=None\n ):\n self.name = name\n self.engine = engine\n self.fs = fs\n self.meta = meta\n self.columns = columns\n self.index = index\n self.parts = parts\n self.kwargs = kwargs\n self.part_ids = list(range(len(parts))) if part_ids is None else part_ids\n\n def __repr__(self):\n return \"ParquetSubgraph\".format(\n self.name, len(self.part_ids), list(self.columns)\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_read_parquet_read_parquet._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_read_parquet_read_parquet._", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 90, "end_line": 186, "span_ids": ["read_parquet"], "tokens": 1101}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def read_parquet(\n path,\n columns=None,\n filters=None,\n categories=None,\n index=None,\n storage_options=None,\n engine=\"auto\",\n gather_statistics=None,\n split_row_groups=None,\n chunksize=None,\n **kwargs,\n):\n \"\"\"\n Read a Parquet file into a Dask DataFrame\n\n This reads a directory of Parquet data into a Dask.dataframe, one file per\n partition. It selects the index among the sorted columns if any exist.\n\n Parameters\n ----------\n path : string or list\n Source directory for data, or path(s) to individual parquet files.\n Prefix with a protocol like ``s3://`` to read from alternative\n filesystems. To read from multiple files you can pass a globstring or a\n list of paths, with the caveat that they must all have the same\n protocol.\n columns : string, list or None (default)\n Field name(s) to read in as columns in the output. By default all\n non-index fields will be read (as determined by the pandas parquet\n metadata, if present). Provide a single field name instead of a list to\n read in the data as a Series.\n filters : Union[List[Tuple[str, str, Any]], List[List[Tuple[str, str, Any]]]]\n List of filters to apply, like ``[[('x', '=', 0), ...], ...]``. This\n implements partition-level (hive) filtering only, i.e., to prevent the\n loading of some row-groups and/or files.\n\n Predicates can be expressed in disjunctive normal form (DNF). This means\n that the innermost tuple describes a single column predicate. These\n inner predicates are combined with an AND conjunction into a larger\n predicate. The outer-most list then combines all of the combined\n filters with an OR disjunction.\n\n Predicates can also be expressed as a List[Tuple]. These are evaluated\n as an AND conjunction. To express OR in predictates, one must use the\n (preferred) List[List[Tuple]] notation.\n index : string, list, False or None (default)\n Field name(s) to use as the output frame index. By default will be\n inferred from the pandas parquet file metadata (if present). Use False\n to read all fields as columns.\n categories : list, dict or None\n For any fields listed here, if the parquet encoding is Dictionary,\n the column will be created with dtype category. Use only if it is\n guaranteed that the column is encoded as dictionary in all row-groups.\n If a list, assumes up to 2**16-1 labels; if a dict, specify the number\n of labels expected; if None, will load categories automatically for\n data written by dask/fastparquet, not otherwise.\n storage_options : dict\n Key/value pairs to be passed on to the file-system backend, if any.\n engine : {'auto', 'fastparquet', 'pyarrow'}, default 'auto'\n Parquet reader library to use. If only one library is installed, it\n will use that one; if both, it will use 'fastparquet'\n gather_statistics : bool or None (default).\n Gather the statistics for each dataset partition. By default,\n this will only be done if the _metadata file is available. Otherwise,\n statistics will only be gathered if True, because the footer of\n every file will be parsed (which is very slow on some systems).\n split_row_groups : bool or int\n Default is True if a _metadata file is available or if\n the dataset is composed of a single file (otherwise defult is False).\n If True, then each output dataframe partition will correspond to a single\n parquet-file row-group. If False, each partition will correspond to a\n complete file. If a positive integer value is given, each dataframe\n partition will correspond to that number of parquet row-groups (or fewer).\n Only the \"pyarrow\" engine supports this argument.\n chunksize : int, str\n The target task partition size. If set, consecutive row-groups\n from the same file will be aggregated into the same output\n partition until the aggregate size reaches this value.\n **kwargs: dict (of dicts)\n Passthrough key-word arguments for read backend.\n The top-level keys correspond to the appropriate operation type, and\n the second level corresponds to the kwargs that will be passed on to\n the underlying `pyarrow` or `fastparquet` function.\n Supported top-level keys: 'dataset' (for opening a `pyarrow` dataset),\n 'file' (for opening a `fastparquet` `ParquetFile`), 'read' (for the\n backend read function), 'arrow_to_pandas' (for controlling the arguments\n passed to convert from a `pyarrow.Table.to_pandas()`)\n\n Examples\n --------\n >>> df = dd.read_parquet('s3://bucket/my-parquet-data') # doctest: +SKIP\n\n See Also\n --------\n to_parquet\n \"\"\"\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_read_parquet.if_isinstance_columns_st_read_parquet.return.new_dd_object_subgraph_n": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_read_parquet.if_isinstance_columns_st_read_parquet.return.new_dd_object_subgraph_n", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 264, "end_line": 344, "span_ids": ["read_parquet"], "tokens": 548}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def read_parquet(\n path,\n columns=None,\n filters=None,\n categories=None,\n index=None,\n storage_options=None,\n engine=\"auto\",\n gather_statistics=None,\n split_row_groups=None,\n chunksize=None,\n **kwargs,\n):\n\n if isinstance(columns, str):\n df = read_parquet(\n path,\n [columns],\n filters,\n categories,\n index,\n storage_options,\n engine,\n gather_statistics,\n )\n return df[columns]\n\n if columns is not None:\n columns = list(columns)\n\n name = \"read-parquet-\" + tokenize(\n path,\n columns,\n filters,\n categories,\n index,\n storage_options,\n engine,\n gather_statistics,\n )\n\n if isinstance(engine, str):\n engine = get_engine(engine)\n\n if hasattr(path, \"name\"):\n path = stringify_path(path)\n fs, _, paths = get_fs_token_paths(path, mode=\"rb\", storage_options=storage_options)\n\n paths = sorted(paths, key=natural_sort_key) # numeric rather than glob ordering\n\n auto_index_allowed = False\n if index is None:\n # User is allowing auto-detected index\n auto_index_allowed = True\n if index and isinstance(index, str):\n index = [index]\n\n meta, statistics, parts, index = engine.read_metadata(\n fs,\n paths,\n categories=categories,\n index=index,\n gather_statistics=gather_statistics,\n filters=filters,\n split_row_groups=split_row_groups,\n **kwargs,\n )\n\n # Parse dataset statistics from metadata (if available)\n parts, divisions, index, index_in_columns = process_statistics(\n parts, statistics, filters, index, chunksize\n )\n\n # Account for index and columns arguments.\n # Modify `meta` dataframe accordingly\n meta, index, columns = set_index_columns(\n meta, index, columns, index_in_columns, auto_index_allowed\n )\n if meta.index.name == NONE_LABEL:\n meta.index.name = None\n\n subgraph = BlockwiseParquet(name, engine, fs, meta, columns, index, parts, kwargs)\n\n # Set the index that was previously treated as a column\n if index_in_columns:\n meta = meta.set_index(index)\n if meta.index.name == NONE_LABEL:\n meta.index.name = None\n\n if len(divisions) < 2:\n # empty dataframe - just use meta\n subgraph = {(name, 0): meta}\n divisions = (None, None)\n\n return new_dd_object(subgraph, name, meta, divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_read_parquet_part_read_parquet_part.return.df": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_read_parquet_part_read_parquet_part.return.df", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 347, "end_line": 365, "span_ids": ["read_parquet_part"], "tokens": 161}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def read_parquet_part(fs, func, meta, part, columns, index, kwargs):\n \"\"\"Read a part of a parquet dataset\n\n This function is used by `read_parquet`.\"\"\"\n\n if isinstance(part, list):\n dfs = [func(fs, rg, columns.copy(), index, **kwargs) for rg in part]\n df = concat(dfs, axis=0)\n else:\n df = func(fs, part, columns, index, **kwargs)\n\n if meta.columns.name:\n df.columns.name = meta.columns.name\n columns = columns or []\n index = index or []\n df = df[[c for c in columns if c not in index]]\n if index == [NONE_LABEL]:\n df.index.name = None\n return df", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_to_parquet_to_parquet._Store_Dask_dataframe_t": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_to_parquet_to_parquet._Store_Dask_dataframe_t", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 291, "end_line": 370, "span_ids": ["to_parquet"], "tokens": 783}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def to_parquet(\n df,\n path,\n engine=\"auto\",\n compression=\"default\",\n write_index=True,\n append=False,\n ignore_divisions=False,\n partition_on=None,\n storage_options=None,\n write_metadata_file=True,\n compute=True,\n compute_kwargs=None,\n schema=None,\n **kwargs,\n):\n \"\"\"Store Dask.dataframe to Parquet files\n\n Notes\n -----\n Each partition will be written to a separate file.\n\n Parameters\n ----------\n df : dask.dataframe.DataFrame\n path : string or pathlib.Path\n Destination directory for data. Prepend with protocol like ``s3://``\n or ``hdfs://`` for remote data.\n engine : {'auto', 'fastparquet', 'pyarrow'}, default 'auto'\n Parquet library to use. If only one library is installed, it will use\n that one; if both, it will use 'fastparquet'.\n compression : string or dict, optional\n Either a string like ``\"snappy\"`` or a dictionary mapping column names\n to compressors like ``{\"name\": \"gzip\", \"values\": \"snappy\"}``. The\n default is ``\"default\"``, which uses the default compression for\n whichever engine is selected.\n write_index : boolean, optional\n Whether or not to write the index. Defaults to True.\n append : bool, optional\n If False (default), construct data-set from scratch. If True, add new\n row-group(s) to an existing data-set. In the latter case, the data-set\n must exist, and the schema must match the input data.\n ignore_divisions : bool, optional\n If False (default) raises error when previous divisions overlap with\n the new appended divisions. Ignored if append=False.\n partition_on : list, optional\n Construct directory-based partitioning by splitting on these fields'\n values. Each dask partition will result in one or more datafiles,\n there will be no global groupby.\n storage_options : dict, optional\n Key/value pairs to be passed on to the file-system backend, if any.\n write_metadata_file : bool, optional\n Whether to write the special \"_metadata\" file.\n compute : bool, optional\n If True (default) then the result is computed immediately. If False\n then a ``dask.delayed`` object is returned for future computation.\n compute_kwargs : dict, optional\n Options to be passed in to the compute method\n schema : Schema object, dict, or {\"infer\", None}, optional\n Global schema to use for the output dataset. Alternatively, a `dict`\n of pyarrow types can be specified (e.g. `schema={\"id\": pa.string()}`).\n For this case, fields excluded from the dictionary will be inferred\n from `_meta_nonempty`. If \"infer\", the first non-empty and non-null\n partition will be used to infer the type for \"object\" columns. If\n None (default), we let the backend infer the schema for each distinct\n output partition. If the partitions produce inconsistent schemas,\n pyarrow will throw an error when writing the shared _metadata file.\n Note that this argument is ignored by the \"fastparquet\" engine.\n **kwargs :\n Extra options to be passed on to the specific backend.\n\n Examples\n --------\n >>> df = dd.read_csv(...) # doctest: +SKIP\n >>> dd.to_parquet(df, '/path/to/output/',...) # doctest: +SKIP\n\n See Also\n --------\n read_parquet: Read parquet data to dask.dataframe\n \"\"\"\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_to_parquet.from_dask_import_delayed_to_parquet.if_write_index_.else_.df.df_reset_index_drop_True_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_to_parquet.from_dask_import_delayed_to_parquet.if_write_index_.else_.df.df_reset_index_drop_True_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 371, "end_line": 446, "span_ids": ["to_parquet"], "tokens": 805}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def to_parquet(\n df,\n path,\n engine=\"auto\",\n compression=\"default\",\n write_index=True,\n append=False,\n ignore_divisions=False,\n partition_on=None,\n storage_options=None,\n write_metadata_file=True,\n compute=True,\n compute_kwargs=None,\n schema=None,\n **kwargs,\n):\n from dask import delayed\n\n if compression == \"default\":\n if snappy is not None:\n compression = \"snappy\"\n else:\n compression = None\n\n partition_on = partition_on or []\n if isinstance(partition_on, str):\n partition_on = [partition_on]\n\n if set(partition_on) - set(df.columns):\n raise ValueError(\n \"Partitioning on non-existent column. \"\n \"partition_on=%s .\"\n \"columns=%s\" % (str(partition_on), str(list(df.columns)))\n )\n\n if isinstance(engine, str):\n engine = get_engine(engine)\n\n if hasattr(path, \"name\"):\n path = stringify_path(path)\n fs, _, _ = get_fs_token_paths(path, mode=\"wb\", storage_options=storage_options)\n # Trim any protocol information from the path before forwarding\n path = fs._strip_protocol(path)\n\n # Save divisions and corresponding index name. This is necessary,\n # because we may be resetting the index to write the file\n division_info = {\"divisions\": df.divisions, \"name\": df.index.name}\n if division_info[\"name\"] is None:\n # As of 0.24.2, pandas will rename an index with name=None\n # when df.reset_index() is called. The default name is \"index\",\n # but dask will always change the name to the NONE_LABEL constant\n if NONE_LABEL not in df.columns:\n division_info[\"name\"] = NONE_LABEL\n elif write_index:\n raise ValueError(\n \"Index must have a name if __null_dask_index__ is a column.\"\n )\n else:\n warnings.warn(\n \"If read back by Dask, column named __null_dask_index__ \"\n \"will be set to the index (and renamed to None).\"\n )\n\n # There are some \"resrved\" names that may be used as the default column\n # name after resetting the index. However, we don't want to treat it as\n # a \"special\" name if the string is already used as a \"real\" column name.\n reserved_names = []\n for name in [\"index\", \"level_0\"]:\n if name not in df.columns:\n reserved_names.append(name)\n\n # If write_index==True (default), reset the index and record the\n # name of the original index in `index_cols` (we will set the name\n # to the NONE_LABEL constant if it is originally `None`).\n # `fastparquet` will use `index_cols` to specify the index column(s)\n # in the metadata. `pyarrow` will revert the `reset_index` call\n # below if `index_cols` is populated (because pyarrow will want to handle\n # index preservation itself). For both engines, the column index\n # will be written to \"pandas metadata\" if write_index=True\n index_cols = []\n if write_index:\n real_cols = set(df.columns)\n none_index = list(df._meta.index.names) == [None]\n df = df.reset_index()\n if none_index:\n df.columns = [\n c if c not in reserved_names else NONE_LABEL for c in df.columns\n ]\n index_cols = [c for c in set(df.columns).difference(real_cols)]\n else:\n # Not writing index - might as well drop it\n df = df.reset_index(drop=True)\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_to_parquet._to_parquet_kwargs_to_parquet.return.out": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_to_parquet._to_parquet_kwargs_to_parquet.return.out", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 448, "end_line": 509, "span_ids": ["to_parquet"], "tokens": 458}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def to_parquet(\n df,\n path,\n engine=\"auto\",\n compression=\"default\",\n write_index=True,\n append=False,\n ignore_divisions=False,\n partition_on=None,\n storage_options=None,\n write_metadata_file=True,\n compute=True,\n compute_kwargs=None,\n schema=None,\n **kwargs,\n):\n # ... other code\n\n _to_parquet_kwargs = {\n \"engine\",\n \"compression\",\n \"write_index\",\n \"append\",\n \"ignore_divisions\",\n \"partition_on\",\n \"storage_options\",\n \"write_metadata_file\",\n \"compute\",\n }\n kwargs_pass = {k: v for k, v in kwargs.items() if k not in _to_parquet_kwargs}\n\n # Engine-specific initialization steps to write the dataset.\n # Possibly create parquet metadata, and load existing stuff if appending\n meta, schema, i_offset = engine.initialize_write(\n df,\n fs,\n path,\n append=append,\n ignore_divisions=ignore_divisions,\n partition_on=partition_on,\n division_info=division_info,\n index_cols=index_cols,\n schema=schema,\n **kwargs_pass,\n )\n\n # Use i_offset and df.npartitions to define file-name list\n filenames = [\"part.%i.parquet\" % (i + i_offset) for i in range(df.npartitions)]\n\n # write parts\n dwrite = delayed(engine.write_partition)\n parts = [\n dwrite(\n d,\n path,\n fs,\n filename,\n partition_on,\n write_metadata_file,\n fmd=meta,\n compression=compression,\n index_cols=index_cols,\n schema=schema,\n **kwargs_pass,\n )\n for d, filename in zip(df.to_delayed(), filenames)\n ]\n\n # single task to complete\n out = delayed(lambda x: None)(parts)\n if write_metadata_file:\n out = delayed(engine.write_metadata)(\n parts, meta, fs, path, append=append, compression=compression\n )\n\n if compute:\n if compute_kwargs is None:\n compute_kwargs = dict()\n out = out.compute(**compute_kwargs)\n return out", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py__ENGINES_get_engine.if_engine_auto_.else_.raise_ValueError_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py__ENGINES_get_engine.if_engine_auto_.else_.raise_ValueError_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 512, "end_line": 561, "span_ids": ["get_engine", "impl:10"], "tokens": 348}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "_ENGINES = {}\n\n\ndef get_engine(engine):\n \"\"\"Get the parquet engine backend implementation.\n\n Parameters\n ----------\n engine : {'auto', 'fastparquet', 'pyarrow'}, default 'auto'\n Parquet reader library to use. Defaults to fastparquet if both are\n installed\n\n Returns\n -------\n A dict containing a ``'read'`` and ``'write'`` function.\n \"\"\"\n if engine in _ENGINES:\n return _ENGINES[engine]\n\n if engine == \"auto\":\n for eng in [\"fastparquet\", \"pyarrow\"]:\n try:\n return get_engine(eng)\n except RuntimeError:\n pass\n else:\n raise RuntimeError(\"Please install either fastparquet or pyarrow\")\n\n elif engine == \"fastparquet\":\n import_required(\"fastparquet\", \"`fastparquet` not installed\")\n from .fastparquet import FastParquetEngine\n\n _ENGINES[\"fastparquet\"] = eng = FastParquetEngine\n return eng\n\n elif engine == \"pyarrow\" or engine == \"arrow\":\n pa = import_required(\"pyarrow\", \"`pyarrow` not installed\")\n from .arrow import ArrowEngine\n\n if LooseVersion(pa.__version__) < \"0.13.1\":\n raise RuntimeError(\"PyArrow version >= 0.13.1 required\")\n\n _ENGINES[\"pyarrow\"] = eng = ArrowEngine\n return eng\n\n else:\n raise ValueError(\n 'Unsupported engine: \"{0}\".'.format(engine)\n + ' Valid choices include \"pyarrow\" and \"fastparquet\".'\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_None_3_sorted_columns.return.out": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_None_3_sorted_columns.return.out", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 564, "end_line": 608, "span_ids": ["sorted_columns", "get_engine"], "tokens": 259}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "#####################\n# Utility Functions #\n#####################\n\n\ndef sorted_columns(statistics):\n \"\"\"Find sorted columns given row-group statistics\n\n This finds all columns that are sorted, along with appropriate divisions\n values for those columns\n\n Returns\n -------\n out: List of {'name': str, 'divisions': List[str]} dictionaries\n \"\"\"\n if not statistics:\n return []\n\n out = []\n for i, c in enumerate(statistics[0][\"columns\"]):\n if not all(\n \"min\" in s[\"columns\"][i] and \"max\" in s[\"columns\"][i] for s in statistics\n ):\n continue\n divisions = [c[\"min\"]]\n max = c[\"max\"]\n success = True\n for stats in statistics[1:]:\n c = stats[\"columns\"][i]\n if c[\"min\"] is None:\n success = False\n break\n if c[\"min\"] >= max:\n divisions.append(c[\"min\"])\n max = c[\"max\"]\n else:\n success = False\n break\n\n if success:\n divisions.append(max)\n assert divisions == sorted(divisions)\n out.append({\"name\": c[\"name\"], \"divisions\": divisions})\n\n return out", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_apply_filters_apply_filters._Apply_filters_onto_par": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_apply_filters_apply_filters._Apply_filters_onto_par", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 611, "end_line": 638, "span_ids": ["apply_filters"], "tokens": 281}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def apply_filters(parts, statistics, filters):\n \"\"\"Apply filters onto parts/statistics pairs\n\n Parameters\n ----------\n parts: list\n Tokens corresponding to row groups to read in the future\n statistics: List[dict]\n List of statistics for each part, including min and max values\n filters: Union[List[Tuple[str, str, Any]], List[List[Tuple[str, str, Any]]]]\n List of filters to apply, like ``[[('x', '=', 0), ...], ...]``. This\n implements partition-level (hive) filtering only, i.e., to prevent the\n loading of some row-groups and/or files.\n\n Predicates can be expressed in disjunctive normal form (DNF). This means\n that the innermost tuple describes a single column predicate. These\n inner predicates are combined with an AND conjunction into a larger\n predicate. The outer-most list then combines all of the combined\n filters with an OR disjunction.\n\n Predicates can also be expressed as a List[Tuple]. These are evaluated\n as an AND conjunction. To express OR in predictates, one must use the\n (preferred) List[List[Tuple]] notation.\n\n Returns\n -------\n parts, statistics: the same as the input, but possibly a subset\n \"\"\"\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_apply_filters.apply_conjunction_apply_filters.return.out_parts_out_statistics": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_apply_filters.apply_conjunction_apply_filters.return.out_parts_out_statistics", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 640, "end_line": 685, "span_ids": ["apply_filters"], "tokens": 328}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def apply_filters(parts, statistics, filters):\n\n def apply_conjunction(parts, statistics, conjunction):\n for column, operator, value in conjunction:\n out_parts = []\n out_statistics = []\n for part, stats in zip(parts, statistics):\n if \"filter\" in stats and stats[\"filter\"]:\n continue # Filtered by engine\n try:\n c = toolz.groupby(\"name\", stats[\"columns\"])[column][0]\n min = c[\"min\"]\n max = c[\"max\"]\n except KeyError:\n out_parts.append(part)\n out_statistics.append(stats)\n else:\n if (\n operator == \"==\"\n and min <= value <= max\n or operator == \"<\"\n and min < value\n or operator == \"<=\"\n and min <= value\n or operator == \">\"\n and max > value\n or operator == \">=\"\n and max >= value\n or operator == \"in\"\n and any(min <= item <= max for item in value)\n ):\n out_parts.append(part)\n out_statistics.append(stats)\n\n parts, statistics = out_parts, out_statistics\n\n return parts, statistics\n\n conjunction, *disjunction = filters if isinstance(filters[0], list) else [filters]\n\n out_parts, out_statistics = apply_conjunction(parts, statistics, conjunction)\n for conjunction in disjunction:\n for part, stats in zip(*apply_conjunction(parts, statistics, conjunction)):\n if part not in out_parts:\n out_parts.append(part)\n out_statistics.append(stats)\n\n return out_parts, out_statistics", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_process_statistics_process_statistics.return.parts_divisions_index_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_process_statistics_process_statistics.return.parts_divisions_index_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 765, "end_line": 832, "span_ids": ["process_statistics"], "tokens": 576}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def process_statistics(parts, statistics, filters, index, chunksize):\n \"\"\"Process row-group column statistics in metadata\n Used in read_parquet.\n \"\"\"\n index_in_columns = False\n if statistics:\n result = list(\n zip(\n *[\n (part, stats)\n for part, stats in zip(parts, statistics)\n if stats[\"num-rows\"] > 0\n ]\n )\n )\n parts, statistics = result or [[], []]\n if filters:\n parts, statistics = apply_filters(parts, statistics, filters)\n\n # Aggregate parts/statistics if we are splitting by row-group\n if chunksize:\n parts, statistics = aggregate_row_groups(parts, statistics, chunksize)\n\n out = sorted_columns(statistics)\n\n if index and isinstance(index, str):\n index = [index]\n if index and out:\n # Only one valid column\n out = [o for o in out if o[\"name\"] in index]\n if index is not False and len(out) == 1:\n # Use only sorted column with statistics as the index\n divisions = out[0][\"divisions\"]\n if index is None:\n index_in_columns = True\n index = [out[0][\"name\"]]\n elif index != [out[0][\"name\"]]:\n raise ValueError(\"Specified index is invalid.\\nindex: {}\".format(index))\n elif index is not False and len(out) > 1:\n if any(o[\"name\"] == NONE_LABEL for o in out):\n # Use sorted column matching NONE_LABEL as the index\n [o] = [o for o in out if o[\"name\"] == NONE_LABEL]\n divisions = o[\"divisions\"]\n if index is None:\n index = [o[\"name\"]]\n index_in_columns = True\n elif index != [o[\"name\"]]:\n raise ValueError(\n \"Specified index is invalid.\\nindex: {}\".format(index)\n )\n else:\n # Multiple sorted columns found, cannot autodetect the index\n warnings.warn(\n \"Multiple sorted columns found %s, cannot\\n \"\n \"autodetect index. Will continue without an index.\\n\"\n \"To pick an index column, use the index= keyword; to \\n\"\n \"silence this warning use index=False.\"\n \"\" % [o[\"name\"] for o in out],\n RuntimeWarning,\n )\n index = False\n divisions = [None] * (len(parts) + 1)\n else:\n divisions = [None] * (len(parts) + 1)\n else:\n divisions = [None] * (len(parts) + 1)\n\n return parts, divisions, index, index_in_columns", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_set_index_columns_set_index_columns.return.meta_index_columns": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_set_index_columns_set_index_columns.return.meta_index_columns", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 758, "end_line": 808, "span_ids": ["set_index_columns"], "tokens": 386}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def set_index_columns(meta, index, columns, index_in_columns, auto_index_allowed):\n \"\"\"Handle index/column arguments, and modify `meta`\n Used in read_parquet.\n \"\"\"\n ignore_index_column_intersection = False\n if columns is None:\n # User didn't specify columns, so ignore any intersection\n # of auto-detected values with the index (if necessary)\n ignore_index_column_intersection = True\n columns = [c for c in meta.columns]\n\n if not set(columns).issubset(set(meta.columns)):\n raise ValueError(\n \"The following columns were not found in the dataset %s\\n\"\n \"The following columns were found %s\"\n % (set(columns) - set(meta.columns), meta.columns)\n )\n\n if index:\n if isinstance(index, str):\n index = [index]\n if isinstance(columns, str):\n columns = [columns]\n\n if ignore_index_column_intersection:\n columns = [col for col in columns if col not in index]\n if set(index).intersection(columns):\n if auto_index_allowed:\n raise ValueError(\n \"Specified index and column arguments must not intersect\"\n \" (set index=False or remove the detected index from columns).\\n\"\n \"index: {} | column: {}\".format(index, columns)\n )\n else:\n raise ValueError(\n \"Specified index and column arguments must not intersect.\\n\"\n \"index: {} | column: {}\".format(index, columns)\n )\n\n # Leaving index as a column in `meta`, because the index\n # will be reset below (in case the index was detected after\n # meta was created)\n if index_in_columns:\n meta = meta[columns + index]\n else:\n meta = meta[columns]\n\n else:\n meta = meta[list(columns)]\n\n return meta, index, columns", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_aggregate_row_groups_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_aggregate_row_groups_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 811, "end_line": 851, "span_ids": ["aggregate_row_groups", "impl:12"], "tokens": 360}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def aggregate_row_groups(parts, stats, chunksize):\n if not stats[0].get(\"file_path_0\", None):\n return parts, stats\n\n parts_agg = []\n stats_agg = []\n chunksize = parse_bytes(chunksize)\n next_part, next_stat = [parts[0].copy()], stats[0].copy()\n for i in range(1, len(parts)):\n stat, part = stats[i], parts[i]\n if (stat[\"file_path_0\"] == next_stat[\"file_path_0\"]) and (\n (next_stat[\"total_byte_size\"] + stat[\"total_byte_size\"]) <= chunksize\n ):\n # Update part list\n next_part.append(part)\n\n # Update Statistics\n next_stat[\"total_byte_size\"] += stat[\"total_byte_size\"]\n next_stat[\"num-rows\"] += stat[\"num-rows\"]\n for col, col_add in zip(next_stat[\"columns\"], stat[\"columns\"]):\n if col[\"name\"] != col_add[\"name\"]:\n raise ValueError(\"Columns are different!!\")\n if \"null_count\" in col:\n col[\"null_count\"] += col_add[\"null_count\"]\n if \"min\" in col:\n col[\"min\"] = min(col[\"min\"], col_add[\"min\"])\n if \"max\" in col:\n col[\"max\"] = max(col[\"max\"], col_add[\"max\"])\n else:\n parts_agg.append(next_part)\n stats_agg.append(next_stat)\n next_part, next_stat = [part.copy()], stat.copy()\n\n parts_agg.append(next_part)\n stats_agg.append(next_stat)\n\n return parts_agg, stats_agg\n\n\nDataFrame.to_parquet.__doc__ = to_parquet.__doc__", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_from_distutils_version_im_Engine": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_from_distutils_version_im_Engine", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/fastparquet.py", "file_name": "fastparquet.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 29, "span_ids": ["imports"], "tokens": 160}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from distutils.version import LooseVersion\n\nfrom collections import OrderedDict\nimport copy\nimport json\nimport warnings\n\nimport tlz as toolz\n\nimport numpy as np\nimport pandas as pd\n\ntry:\n import fastparquet\n from fastparquet import ParquetFile\n from fastparquet.util import get_file_scheme\n from fastparquet.util import ex_from_sep, val_to_num, groupby_types\n from fastparquet.writer import partition_on_columns, make_part_file\nexcept ImportError:\n pass\n\nfrom .utils import _parse_pandas_metadata, _normalize_index_columns, _analyze_paths\nfrom ..utils import _meta_from_dtypes\nfrom ...utils import UNKNOWN_CATEGORIES\n\n#########################\n# Fastparquet interface #\n#########################\nfrom .utils import Engine", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py__paths_to_cats__paths_to_cats.return.cats": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py__paths_to_cats__paths_to_cats.return.cats", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/fastparquet.py", "file_name": "fastparquet.py", "file_type": "text/x-python", "category": "implementation", "start_line": 32, "end_line": 91, "span_ids": ["_paths_to_cats"], "tokens": 538}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _paths_to_cats(paths, file_scheme):\n \"\"\"\n Extract categorical fields and labels from hive- or drill-style paths.\n FixMe: This has been pasted from https://github.com/dask/fastparquet/pull/471\n Use fastparquet.api.paths_to_cats from fastparquet>0.3.2 instead.\n\n Parameters\n ----------\n paths (Iterable[str]): file paths relative to root\n file_scheme (str):\n\n Returns\n -------\n cats (OrderedDict[str, List[Any]]): a dict of field names and their values\n \"\"\"\n if file_scheme in [\"simple\", \"flat\", \"other\"]:\n cats = {}\n return cats\n\n cats = OrderedDict()\n raw_cats = OrderedDict()\n s = ex_from_sep(\"/\")\n paths = toolz.unique(paths)\n if file_scheme == \"hive\":\n partitions = toolz.unique((k, v) for path in paths for k, v in s.findall(path))\n for key, val in partitions:\n cats.setdefault(key, set()).add(val_to_num(val))\n raw_cats.setdefault(key, set()).add(val)\n else:\n i_val = toolz.unique(\n (i, val) for path in paths for i, val in enumerate(path.split(\"/\")[:-1])\n )\n for i, val in i_val:\n key = \"dir%i\" % i\n cats.setdefault(key, set()).add(val_to_num(val))\n raw_cats.setdefault(key, set()).add(val)\n\n for key, v in cats.items():\n # Check that no partition names map to the same value after transformation by val_to_num\n raw = raw_cats[key]\n if len(v) != len(raw):\n conflicts_by_value = OrderedDict()\n for raw_val in raw_cats[key]:\n conflicts_by_value.setdefault(val_to_num(raw_val), set()).add(raw_val)\n conflicts = [\n c for k in conflicts_by_value.values() if len(k) > 1 for c in k\n ]\n raise ValueError(\"Partition names map to the same value: %s\" % conflicts)\n vals_by_type = groupby_types(v)\n\n # Check that all partition names map to the same type after transformation by val_to_num\n if len(vals_by_type) > 1:\n examples = [x[0] for x in vals_by_type.values()]\n warnings.warn(\n \"Partition names coerce to values of different types, e.g. %s\"\n % examples\n )\n\n cats = OrderedDict([(key, list(v)) for key, v in cats.items()])\n return cats", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_paths_to_cats__determine_pf_parts.fast_metadata.True": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_paths_to_cats__determine_pf_parts.fast_metadata.True", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/fastparquet.py", "file_name": "fastparquet.py", "file_type": "text/x-python", "category": "implementation", "start_line": 94, "end_line": 116, "span_ids": ["_determine_pf_parts", "impl:4"], "tokens": 259}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "paths_to_cats = (\n _paths_to_cats # FixMe: use fastparquet.api.paths_to_cats for fastparquet>0.3.2\n)\n\n\ndef _determine_pf_parts(fs, paths, gather_statistics, **kwargs):\n \"\"\"Determine how to access metadata and break read into ``parts``\n\n This logic is mostly to handle `gather_statistics=False` cases,\n because this also means we should avoid scanning every file in the\n dataset. If _metadata is available, set `gather_statistics=True`\n (if `gather_statistics=None`).\n\n The `fast_metadata` output specifies that ParquetFile metadata parsing\n is fast enough for each worker to perform during `read_partition`. The\n value will be set to True if: (1) The path is a directory containing\n _metadta, (2) the path is a list of files containing _metadata, (3)\n there is only one file to read, or (4) `gather_statistics` is False.\n In other cases, the ParquetFile object will need to be stored in the\n task graph, because metadata parsing is too expensive.\n \"\"\"\n parts = []\n fast_metadata = True\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py__determine_pf_parts.if_len_paths_1___determine_pf_parts.return.parts_pf_gather_statist": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py__determine_pf_parts.if_len_paths_1___determine_pf_parts.return.parts_pf_gather_statist", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/fastparquet.py", "file_name": "fastparquet.py", "file_type": "text/x-python", "category": "implementation", "start_line": 117, "end_line": 188, "span_ids": ["_determine_pf_parts"], "tokens": 652}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _determine_pf_parts(fs, paths, gather_statistics, **kwargs):\n # ... other code\n if len(paths) > 1:\n base, fns = _analyze_paths(paths, fs)\n if gather_statistics is not False:\n # This scans all the files, allowing index/divisions\n # and filtering\n if \"_metadata\" not in fns:\n paths_use = paths\n fast_metadata = False\n else:\n paths_use = base + fs.sep + \"_metadata\"\n pf = ParquetFile(\n paths_use, open_with=fs.open, sep=fs.sep, **kwargs.get(\"file\", {})\n )\n else:\n if \"_metadata\" in fns:\n # We have a _metadata file, lets use it\n pf = ParquetFile(\n base + fs.sep + \"_metadata\",\n open_with=fs.open,\n sep=fs.sep,\n **kwargs.get(\"file\", {})\n )\n else:\n # Rely on metadata for 0th file.\n # Will need to pass a list of paths to read_partition\n scheme = get_file_scheme(fns)\n pf = ParquetFile(paths[0], open_with=fs.open, **kwargs.get(\"file\", {}))\n pf.file_scheme = scheme\n pf.cats = paths_to_cats(fns, scheme)\n parts = paths.copy()\n elif fs.isdir(paths[0]):\n # This is a directory, check for _metadata, then _common_metadata\n paths = fs.glob(paths[0] + fs.sep + \"*\")\n base, fns = _analyze_paths(paths, fs)\n if \"_metadata\" in fns:\n # Using _metadata file (best-case scenario)\n pf = ParquetFile(\n base + fs.sep + \"_metadata\",\n open_with=fs.open,\n sep=fs.sep,\n **kwargs.get(\"file\", {})\n )\n if gather_statistics is None:\n gather_statistics = True\n\n elif gather_statistics is not False:\n # Scan every file\n pf = ParquetFile(paths, open_with=fs.open, **kwargs.get(\"file\", {}))\n fast_metadata = False\n else:\n # Use _common_metadata file if it is available.\n # Otherwise, just use 0th file\n if \"_common_metadata\" in fns:\n pf = ParquetFile(\n base + fs.sep + \"_common_metadata\",\n open_with=fs.open,\n **kwargs.get(\"file\", {})\n )\n else:\n pf = ParquetFile(paths[0], open_with=fs.open, **kwargs.get(\"file\", {}))\n scheme = get_file_scheme(fns)\n pf.file_scheme = scheme\n pf.cats = paths_to_cats(fns, scheme)\n parts = paths.copy()\n else:\n # There is only one file to read\n base = None\n pf = ParquetFile(\n paths[0], open_with=fs.open, sep=fs.sep, **kwargs.get(\"file\", {})\n )\n\n return parts, pf, gather_statistics, fast_metadata, base", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine_FastParquetEngine.read_metadata.for_catcol_in_pf_cats_.if_catcol_in_meta_columns.elif_meta_index_name_c.meta.index.meta_index_set_categories": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine_FastParquetEngine.read_metadata.for_catcol_in_pf_cats_.if_catcol_in_meta_columns.elif_meta_index_name_c.meta.index.meta_index_set_categories", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/fastparquet.py", "file_name": "fastparquet.py", "file_type": "text/x-python", "category": "implementation", "start_line": 191, "end_line": 290, "span_ids": ["FastParquetEngine", "FastParquetEngine.read_metadata"], "tokens": 733}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class FastParquetEngine(Engine):\n @classmethod\n def read_metadata(\n cls,\n fs,\n paths,\n categories=None,\n index=None,\n gather_statistics=None,\n filters=None,\n **kwargs\n ):\n # Define the parquet-file (pf) object to use for metadata,\n # Also, initialize `parts`. If `parts` is populated here,\n # then each part will correspond to a file. Otherwise, each part will\n # correspond to a row group (populated below).\n parts, pf, gather_statistics, fast_metadata, base_path = _determine_pf_parts(\n fs, paths, gather_statistics, **kwargs\n )\n\n columns = None\n if pf.fmd.key_value_metadata:\n pandas_md = [\n x.value for x in pf.fmd.key_value_metadata if x.key == \"pandas\"\n ]\n else:\n pandas_md = []\n\n if len(pandas_md) == 0:\n index_names = []\n column_names = pf.columns + list(pf.cats)\n storage_name_mapping = {k: k for k in column_names}\n column_index_names = [None]\n\n elif len(pandas_md) == 1:\n (\n index_names,\n column_names,\n storage_name_mapping,\n column_index_names,\n ) = _parse_pandas_metadata(json.loads(pandas_md[0]))\n # auto-ranges should not be created by fastparquet\n column_names.extend(pf.cats)\n\n else:\n raise ValueError(\"File has multiple entries for 'pandas' metadata\")\n\n if index is None and len(index_names) > 0:\n if len(index_names) == 1 and index_names[0] is not None:\n index = index_names[0]\n else:\n index = index_names\n\n # Normalize user inputs\n column_names, index_names = _normalize_index_columns(\n columns, column_names, index, index_names\n )\n\n all_columns = index_names + column_names\n\n categories_dict = None\n if isinstance(categories, dict):\n categories_dict = categories\n\n if categories is None:\n categories = pf.categories\n elif isinstance(categories, str):\n categories = [categories]\n else:\n categories = list(categories)\n\n # Check that categories are included in columns\n if categories and not set(categories).intersection(all_columns):\n raise ValueError(\n \"categories not in available columns.\\n\"\n \"categories: {} | columns: {}\".format(categories, list(all_columns))\n )\n\n dtypes = pf._dtypes(categories)\n dtypes = {storage_name_mapping.get(k, k): v for k, v in dtypes.items()}\n\n index_cols = index or ()\n meta = _meta_from_dtypes(all_columns, dtypes, index_cols, column_index_names)\n\n # fastparquet doesn't handle multiindex\n if len(index_names) > 1:\n raise ValueError(\"Cannot read DataFrame with MultiIndex.\")\n\n for cat in categories:\n if cat in meta:\n meta[cat] = pd.Series(\n pd.Categorical([], categories=[UNKNOWN_CATEGORIES]),\n index=meta.index,\n )\n\n for catcol in pf.cats:\n if catcol in meta.columns:\n meta[catcol] = meta[catcol].cat.set_categories(pf.cats[catcol])\n elif meta.index.name == catcol:\n meta.index = meta.index.set_categories(pf.cats[catcol])\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine.read_metadata.if_gather_statistics_and__FastParquetEngine.read_metadata._if_we_have_a_list_of_fi": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine.read_metadata.if_gather_statistics_and__FastParquetEngine.read_metadata._if_we_have_a_list_of_fi", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/fastparquet.py", "file_name": "fastparquet.py", "file_type": "text/x-python", "category": "implementation", "start_line": 291, "end_line": 366, "span_ids": ["FastParquetEngine.read_metadata"], "tokens": 842}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class FastParquetEngine(Engine):\n @classmethod\n def read_metadata(\n cls,\n fs,\n paths,\n categories=None,\n index=None,\n gather_statistics=None,\n filters=None,\n **kwargs\n ):\n # ... other code\n if gather_statistics and pf.row_groups:\n stats = []\n if filters is None:\n filters = []\n\n skip_cols = set() # Columns with min/max = None detected\n # make statistics conform in layout\n for (i, row_group) in enumerate(pf.row_groups):\n s = {\"num-rows\": row_group.num_rows, \"columns\": []}\n for i_col, col in enumerate(pf.columns):\n if col not in skip_cols:\n d = {\"name\": col}\n cs_min = None\n cs_max = None\n if pf.statistics[\"min\"][col][0] is not None:\n cs_min = pf.statistics[\"min\"][col][i]\n cs_max = pf.statistics[\"max\"][col][i]\n elif (\n dtypes[col] == \"object\"\n and row_group.columns[i_col].meta_data.statistics\n ):\n cs_min = row_group.columns[\n i_col\n ].meta_data.statistics.min_value\n cs_max = row_group.columns[\n i_col\n ].meta_data.statistics.max_value\n if isinstance(cs_min, (bytes, bytearray)):\n cs_min = cs_min.decode(\"utf-8\")\n cs_max = cs_max.decode(\"utf-8\")\n if None in [cs_min, cs_max] and i == 0:\n skip_cols.add(col)\n continue\n if isinstance(cs_min, np.datetime64):\n cs_min = pd.Timestamp(cs_min)\n cs_max = pd.Timestamp(cs_max)\n d.update(\n {\n \"min\": cs_min,\n \"max\": cs_max,\n \"null_count\": pf.statistics[\"null_count\"][col][i],\n }\n )\n s[\"columns\"].append(d)\n # Need this to filter out partitioned-on categorical columns\n s[\"filter\"] = fastparquet.api.filter_out_cats(row_group, filters)\n s[\"total_byte_size\"] = row_group.total_byte_size\n s[\"file_path_0\"] = row_group.columns[0].file_path # 0th column only\n stats.append(s)\n\n else:\n stats = None\n\n pf._dtypes = lambda *args: pf.dtypes # ugly patch, could be fixed\n pf.fmd.row_groups = None\n\n # Constructing \"piece\" and \"pf\" for each dask partition. We will\n # need to consider the following output scenarios:\n #\n # 1) Each \"piece\" is a file path, and \"pf\" is `None`\n # - `gather_statistics==False` and no \"_metadata\" available\n # 2) Each \"piece\" is a row-group index, and \"pf\" is ParquetFile object\n # - We have parquet partitions and no \"_metadata\" available\n # 3) Each \"piece\" is a row-group index, and \"pf\" is a `tuple`\n # - The value of the 0th tuple element depends on the following:\n # A) Dataset is partitioned and \"_metadata\" exists\n # - 0th tuple element will be the path to \"_metadata\"\n # B) Dataset is not partitioned\n # - 0th tuple element will be the path to the data\n # C) Dataset is partitioned and \"_metadata\" does not exist\n # - 0th tuple element will be the original `paths` argument\n # (We will let the workers use `_determine_pf_parts`)\n\n # Create `parts`\n # This is a list of row-group-descriptor dicts, or file-paths\n # if we have a list of files and gather_statistics=False\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine.read_metadata.base_path_FastParquetEngine.read_metadata.return._meta_stats_parts_inde": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine.read_metadata.base_path_FastParquetEngine.read_metadata.return._meta_stats_parts_inde", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/fastparquet.py", "file_name": "fastparquet.py", "file_type": "text/x-python", "category": "implementation", "start_line": 367, "end_line": 443, "span_ids": ["FastParquetEngine.read_metadata"], "tokens": 673}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class FastParquetEngine(Engine):\n @classmethod\n def read_metadata(\n cls,\n fs,\n paths,\n categories=None,\n index=None,\n gather_statistics=None,\n filters=None,\n **kwargs\n ):\n # ... other code\n base_path = (base_path or \"\") + fs.sep\n if parts:\n # Case (1)\n # `parts` is just a list of path names.\n pqpartitions = None\n pf_deps = None\n partsin = parts\n else:\n # Populate `partsin` with dataset row-groups\n partsin = pf.row_groups\n pqpartitions = pf.info.get(\"partitions\", None)\n if pqpartitions and not fast_metadata:\n # Case (2)\n # We have parquet partitions, and do not have\n # a \"_metadata\" file for the worker to read.\n # Therefore, we need to pass the pf object in\n # the task graph\n pf_deps = pf\n else:\n # Case (3)\n # We don't need to pass a pf object in the task graph.\n # Instead, we can try to pass the path for each part.\n pf_deps = \"tuple\"\n\n parts = []\n i_path = 0\n path_last = None\n\n # Loop over DataFrame partitions.\n # Each `part` will be a row-group or path ()\n for i, part in enumerate(partsin):\n if pqpartitions and fast_metadata:\n # Case (3A)\n # We can pass a \"_metadata\" path\n file_path = base_path + \"_metadata\"\n i_path = i\n elif (\n pf_deps\n and isinstance(part.columns[0].file_path, str)\n and not pqpartitions\n ):\n # Case (3B)\n # We can pass a specific file/part path\n path_curr = part.columns[0].file_path\n if path_last and path_curr == path_last:\n i_path += 1\n else:\n i_path = 0\n path_last = path_curr\n file_path = base_path + path_curr\n else:\n # Case (3C)\n # We cannot pass a specific file/part path\n file_path = paths\n i_path = i\n\n # Strip down pf object\n if pf_deps and pf_deps != \"tuple\":\n # Case (2)\n for col in part.columns:\n col.meta_data.statistics = None\n col.meta_data.encoding_stats = None\n\n # Final definition of \"piece\" and \"pf\" for this output partition\n piece = i_path if pf_deps else part\n pf_piece = (file_path, gather_statistics) if pf_deps == \"tuple\" else pf_deps\n part_item = {\n \"piece\": piece,\n \"kwargs\": {\"pf\": pf_piece, \"categories\": categories_dict or categories},\n }\n parts.append(part_item)\n\n # Cannot allow `None` in columns if the user has specified index=False\n if index is False and None in meta.columns:\n meta.drop(columns=[None], inplace=True)\n\n return (meta, stats, parts, index)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine.read_partition_FastParquetEngine.read_partition.if_pf_is_None_.else_.return.pf_read_row_group_file_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine.read_partition_FastParquetEngine.read_partition.if_pf_is_None_.else_.return.pf_read_row_group_file_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/fastparquet.py", "file_name": "fastparquet.py", "file_type": "text/x-python", "category": "implementation", "start_line": 445, "end_line": 501, "span_ids": ["FastParquetEngine.read_partition"], "tokens": 555}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class FastParquetEngine(Engine):\n\n @classmethod\n def read_partition(\n cls, fs, piece, columns, index, categories=(), pf=None, **kwargs\n ):\n\n null_index_name = False\n if isinstance(index, list):\n if index == [None]:\n # Handling a None-labeled index...\n # The pandas metadata told us to read in an index\n # labeled `None`. If this corresponds to a `RangeIndex`,\n # fastparquet will need use the pandas metadata to\n # construct the index. Otherwise, the index will correspond\n # to a column named \"__index_level_0__\". We will need to\n # check the `ParquetFile` object for this column below.\n index = []\n null_index_name = True\n columns += index\n\n if pf is None:\n base, fns = _analyze_paths([piece], fs)\n scheme = get_file_scheme(fns)\n pf = ParquetFile(piece, open_with=fs.open)\n relpath = piece.replace(base, \"\").lstrip(\"/\")\n for rg in pf.row_groups:\n for ch in rg.columns:\n ch.file_path = relpath\n pf.file_scheme = scheme\n pf.cats = paths_to_cats(fns, scheme)\n pf.fn = base\n if null_index_name and \"__index_level_0__\" in pf.columns:\n # See \"Handling a None-labeled index\" comment above\n index = [\"__index_level_0__\"]\n columns += index\n return pf.to_pandas(columns, categories, index=index)\n else:\n if isinstance(pf, tuple):\n if isinstance(pf[0], list):\n pf = _determine_pf_parts(fs, pf[0], pf[1], **kwargs)[1]\n else:\n pf = ParquetFile(\n pf[0], open_with=fs.open, sep=fs.sep, **kwargs.get(\"file\", {})\n )\n pf._dtypes = lambda *args: pf.dtypes # ugly patch, could be fixed\n pf.fmd.row_groups = None\n rg_piece = pf.row_groups[piece]\n if null_index_name:\n if \"__index_level_0__\" in pf.columns:\n # See \"Handling a None-labeled index\" comment above\n index = [\"__index_level_0__\"]\n columns += index\n pf.fmd.key_value_metadata = None\n else:\n pf.fmd.key_value_metadata = None\n return pf.read_row_group_file(\n rg_piece, columns, categories, index=index, **kwargs.get(\"read\", {})\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine.initialize_write_FastParquetEngine.initialize_write.return._fmd_schema_i_offset_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine.initialize_write_FastParquetEngine.initialize_write.return._fmd_schema_i_offset_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/fastparquet.py", "file_name": "fastparquet.py", "file_type": "text/x-python", "category": "implementation", "start_line": 503, "end_line": 584, "span_ids": ["FastParquetEngine.initialize_write"], "tokens": 640}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class FastParquetEngine(Engine):\n\n @classmethod\n def initialize_write(\n cls,\n df,\n fs,\n path,\n append=False,\n partition_on=None,\n ignore_divisions=False,\n division_info=None,\n schema=None,\n **kwargs\n ):\n if append and division_info is None:\n ignore_divisions = True\n fs.mkdirs(path, exist_ok=True)\n object_encoding = kwargs.pop(\"object_encoding\", \"utf8\")\n index_cols = kwargs.pop(\"index_cols\", [])\n if object_encoding == \"infer\" or (\n isinstance(object_encoding, dict) and \"infer\" in object_encoding.values()\n ):\n raise ValueError(\n '\"infer\" not allowed as object encoding, '\n \"because this required data in memory.\"\n )\n\n if append:\n try:\n # to append to a dataset without _metadata, need to load\n # _common_metadata or any data file here\n pf = fastparquet.api.ParquetFile(path, open_with=fs.open, sep=fs.sep)\n except (IOError, ValueError):\n # append for create\n append = False\n if append:\n if pf.file_scheme not in [\"hive\", \"empty\", \"flat\"]:\n raise ValueError(\n \"Requested file scheme is hive, but existing file scheme is not.\"\n )\n elif (set(pf.columns) != set(df.columns) - set(partition_on)) or (\n set(partition_on) != set(pf.cats)\n ):\n raise ValueError(\n \"Appended columns not the same.\\n\"\n \"Previous: {} | New: {}\".format(pf.columns, list(df.columns))\n )\n elif (pd.Series(pf.dtypes).loc[pf.columns] != df[pf.columns].dtypes).any():\n raise ValueError(\n \"Appended dtypes differ.\\n{}\".format(\n set(pf.dtypes.items()) ^ set(df.dtypes.iteritems())\n )\n )\n else:\n df = df[pf.columns + partition_on]\n\n fmd = pf.fmd\n i_offset = fastparquet.writer.find_max_part(fmd.row_groups)\n if not ignore_divisions:\n if not set(index_cols).intersection([division_info[\"name\"]]):\n ignore_divisions = True\n if not ignore_divisions:\n minmax = fastparquet.api.sorted_partitioned_columns(pf)\n old_end = minmax[index_cols[0]][\"max\"][-1]\n divisions = division_info[\"divisions\"]\n if divisions[0] < old_end:\n raise ValueError(\n \"Appended divisions overlapping with previous ones.\"\n \"\\n\"\n \"Previous: {} | New: {}\".format(old_end, divisions[0])\n )\n else:\n fmd = fastparquet.writer.make_metadata(\n df._meta,\n object_encoding=object_encoding,\n index_cols=index_cols,\n ignore_columns=partition_on,\n **kwargs\n )\n i_offset = 0\n\n schema = None # ArrowEngine compatibility\n return (fmd, schema, i_offset)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine.write_partition_FastParquetEngine.write_partition.if_return_metadata_.else_.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine.write_partition_FastParquetEngine.write_partition.if_return_metadata_.else_.return._", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/fastparquet.py", "file_name": "fastparquet.py", "file_type": "text/x-python", "category": "implementation", "start_line": 586, "end_line": 633, "span_ids": ["FastParquetEngine.write_partition"], "tokens": 292}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class FastParquetEngine(Engine):\n\n @classmethod\n def write_partition(\n cls,\n df,\n path,\n fs,\n filename,\n partition_on,\n return_metadata,\n fmd=None,\n compression=None,\n **kwargs\n ):\n fmd = copy.copy(fmd)\n if not len(df):\n # Write nothing for empty partitions\n rgs = []\n elif partition_on:\n mkdirs = lambda x: fs.mkdirs(x, exist_ok=True)\n if LooseVersion(fastparquet.__version__) >= \"0.1.4\":\n rgs = partition_on_columns(\n df, partition_on, path, filename, fmd, compression, fs.open, mkdirs\n )\n else:\n rgs = partition_on_columns(\n df,\n partition_on,\n path,\n filename,\n fmd,\n fs.sep,\n compression,\n fs.open,\n mkdirs,\n )\n else:\n with fs.open(fs.sep.join([path, filename]), \"wb\") as fil:\n fmd.num_rows = len(df)\n rg = make_part_file(\n fil, df, fmd.schema, compression=compression, fmd=fmd\n )\n for chunk in rg.columns:\n chunk.file_path = filename\n rgs = [rg]\n if return_metadata:\n return rgs\n else:\n return []", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine.write_metadata_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine.write_metadata_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/fastparquet.py", "file_name": "fastparquet.py", "file_type": "text/x-python", "category": "implementation", "start_line": 635, "end_line": 654, "span_ids": ["FastParquetEngine.write_metadata"], "tokens": 181}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class FastParquetEngine(Engine):\n\n @classmethod\n def write_metadata(cls, parts, fmd, fs, path, append=False, **kwargs):\n _meta = copy.copy(fmd)\n if parts:\n for rg in parts:\n if rg is not None:\n if isinstance(rg, list):\n for r in rg:\n _meta.row_groups.append(r)\n else:\n _meta.row_groups.append(rg)\n fn = fs.sep.join([path, \"_metadata\"])\n fastparquet.writer.write_common_metadata(\n fn, _meta, open_with=fs.open, no_row_groups=False\n )\n\n # if appending, could skip this, but would need to check existence\n fn = fs.sep.join([path, \"_common_metadata\"])\n fastparquet.writer.write_common_metadata(fn, _meta, open_with=fs.open)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py_re_Engine.read_metadata.raise_NotImplementedError": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py_re_Engine.read_metadata.raise_NotImplementedError", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 68, "span_ids": ["imports", "Engine.read_metadata", "Engine"], "tokens": 542}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import re\n\n\nclass Engine:\n \"\"\" The API necessary to provide a new Parquet reader/writer \"\"\"\n\n @classmethod\n def read_metadata(\n cls,\n fs,\n paths,\n categories=None,\n index=None,\n gather_statistics=None,\n filters=None,\n **kwargs\n ):\n \"\"\"Gather metadata about a Parquet Dataset to prepare for a read\n\n This function is called once in the user's Python session to gather\n important metadata about the parquet dataset.\n\n Parameters\n ----------\n fs: FileSystem\n paths: List[str]\n A list of paths to files (or their equivalents)\n categories: list, dict or None\n Column(s) containing categorical data.\n index: str, List[str], or False\n The column name(s) to be used as the index.\n If set to ``None``, pandas metadata (if available) can be used\n to reset the value in this function\n gather_statistics: bool\n Whether or not to gather statistics data. If ``None``, we only\n gather statistics data if there is a _metadata file available to\n query (cheaply)\n filters: list\n List of filters to apply, like ``[('x', '>', 0), ...]``.\n **kwargs: dict (of dicts)\n User-specified arguments to pass on to backend.\n Top level key can be used by engine to select appropriate dict.\n\n Returns\n -------\n meta: pandas.DataFrame\n An empty DataFrame object to use for metadata.\n Should have appropriate column names and dtypes but need not have\n any actual data\n statistics: Optional[List[Dict]]\n Either None, if no statistics were found, or a list of dictionaries\n of statistics data, one dict for every partition (see the next\n return value). The statistics should look like the following:\n\n [\n {'num-rows': 1000, 'columns': [\n {'name': 'id', 'min': 0, 'max': 100, 'null-count': 0},\n {'name': 'x', 'min': 0.0, 'max': 1.0, 'null-count': 5},\n ]},\n ...\n ]\n parts: List[object]\n A list of objects to be passed to ``Engine.read_partition``.\n Each object should represent a piece of data (usually a row-group).\n The type of each object can be anything, as long as the\n engine's read_partition function knows how to interpret it.\n \"\"\"\n raise NotImplementedError()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py_Engine.read_partition_Engine.read_partition.raise_NotImplementedError": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py_Engine.read_partition_Engine.read_partition.raise_NotImplementedError", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 70, "end_line": 95, "span_ids": ["Engine.read_partition"], "tokens": 195}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Engine:\n\n @classmethod\n def read_partition(cls, fs, piece, columns, index, **kwargs):\n \"\"\"Read a single piece of a Parquet dataset into a Pandas DataFrame\n\n This function is called many times in individual tasks\n\n Parameters\n ----------\n fs: FileSystem\n piece: object\n This is some token that is returned by Engine.read_metadata.\n Typically it represents a row group in a Parquet dataset\n columns: List[str]\n List of column names to pull out of that row group\n index: str, List[str], or False\n The index name(s).\n **kwargs:\n Includes `\"kwargs\"` values stored within the `parts` output\n of `engine.read_metadata`. May also include arguments to be\n passed to the backend (if stored under a top-level `\"read\"` key).\n\n Returns\n -------\n A Pandas DataFrame\n \"\"\"\n raise NotImplementedError()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py_Engine.initialize_write_Engine.initialize_write.raise_NotImplementedError": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py_Engine.initialize_write_Engine.initialize_write.raise_NotImplementedError", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 97, "end_line": 137, "span_ids": ["Engine.initialize_write"], "tokens": 255}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Engine:\n\n @classmethod\n def initialize_write(\n cls,\n df,\n fs,\n path,\n append=False,\n partition_on=None,\n ignore_divisions=False,\n division_info=None,\n **kwargs\n ):\n \"\"\"Perform engine-specific initialization steps for this dataset\n\n Parameters\n ----------\n df: dask.dataframe.DataFrame\n fs: FileSystem\n path: str\n Destination directory for data. Prepend with protocol like ``s3://``\n or ``hdfs://`` for remote data.\n append: bool\n If True, may use existing metadata (if any) and perform checks\n against the new data being stored.\n partition_on: List(str)\n Column(s) to use for dataset partitioning in parquet.\n ignore_divisions: bool\n Whether or not to ignore old divisions when appending. Otherwise,\n overlapping divisions will lead to an error being raised.\n division_info: dict\n Dictionary containing the divisions and corresponding column name.\n **kwargs: dict\n Other keyword arguments (including `index_cols`)\n\n Returns\n -------\n tuple:\n engine-specific instance\n list of filenames, one per partition\n \"\"\"\n raise NotImplementedError", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py_Engine.write_partition_Engine.write_partition.raise_NotImplementedError": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py_Engine.write_partition_Engine.write_partition.raise_NotImplementedError", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 139, "end_line": 170, "span_ids": ["Engine.write_partition"], "tokens": 254}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Engine:\n\n @classmethod\n def write_partition(\n cls, df, path, fs, filename, partition_on, return_metadata, **kwargs\n ):\n \"\"\"\n Output a partition of a dask.DataFrame. This will correspond to\n one output file, unless partition_on is set, in which case, it will\n correspond to up to one file in each sub-directory.\n\n Parameters\n ----------\n df: dask.dataframe.DataFrame\n path: str\n Destination directory for data. Prepend with protocol like ``s3://``\n or ``hdfs://`` for remote data.\n fs: FileSystem\n filename: str\n partition_on: List(str)\n Column(s) to use for dataset partitioning in parquet.\n return_metadata : bool\n Whether to return list of instances from this write, one for each\n output file. These will be passed to write_metadata if an output\n metadata file is requested.\n **kwargs: dict\n Other keyword arguments (including `fmd` and `index_cols`)\n\n Returns\n -------\n List of metadata-containing instances (if `return_metadata` is `True`)\n or empty list\n \"\"\"\n raise NotImplementedError", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py_Engine.write_metadata_Engine.write_metadata.raise_NotImplementedError": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py_Engine.write_metadata_Engine.write_metadata.raise_NotImplementedError", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 172, "end_line": 196, "span_ids": ["Engine.write_metadata"], "tokens": 191}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Engine:\n\n @classmethod\n def write_metadata(cls, parts, meta, fs, path, append=False, **kwargs):\n \"\"\"\n Write the shared metadata file for a parquet dataset.\n\n Parameters\n ----------\n parts: List\n Contains metadata objects to write, of the type undrestood by the\n specific implementation\n meta: non-chunk metadata\n Details that do not depend on the specifics of each chunk write,\n typically the schema and pandas metadata, in a format the writer\n can use.\n fs: FileSystem\n path: str\n Output file to write to, usually ``\"_metadata\"`` in the root of\n the output dataset\n append: boolean\n Whether or not to consolidate new metadata with existing (True)\n or start from scratch (False)\n **kwargs: dict\n Other keyword arguments (including `compression`)\n \"\"\"\n raise NotImplementedError()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py__parse_pandas_metadata__parse_pandas_metadata._0_8_0_allows_for_dupli": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py__parse_pandas_metadata__parse_pandas_metadata._0_8_0_allows_for_dupli", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 199, "end_line": 257, "span_ids": ["_parse_pandas_metadata"], "tokens": 573}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _parse_pandas_metadata(pandas_metadata):\n \"\"\"Get the set of names from the pandas metadata section\n\n Parameters\n ----------\n pandas_metadata : dict\n Should conform to the pandas parquet metadata spec\n\n Returns\n -------\n index_names : list\n List of strings indicating the actual index names\n column_names : list\n List of strings indicating the actual column names\n storage_name_mapping : dict\n Pairs of storage names (e.g. the field names for\n PyArrow) and actual names. The storage and field names will\n differ for index names for certain writers (pyarrow > 0.8).\n column_indexes_names : list\n The names for ``df.columns.name`` or ``df.columns.names`` for\n a MultiIndex in the columns\n\n Notes\n -----\n This should support metadata written by at least\n\n * fastparquet>=0.1.3\n * pyarrow>=0.7.0\n \"\"\"\n index_storage_names = [\n n[\"name\"] if isinstance(n, dict) else n\n for n in pandas_metadata[\"index_columns\"]\n ]\n index_name_xpr = re.compile(r\"__index_level_\\d+__\")\n\n # older metadatas will not have a 'field_name' field so we fall back\n # to the 'name' field\n pairs = [\n (x.get(\"field_name\", x[\"name\"]), x[\"name\"]) for x in pandas_metadata[\"columns\"]\n ]\n\n # Need to reconcile storage and real names. These will differ for\n # pyarrow, which uses __index_leveL_d__ for the storage name of indexes.\n # The real name may be None (e.g. `df.index.name` is None).\n pairs2 = []\n for storage_name, real_name in pairs:\n if real_name and index_name_xpr.match(real_name):\n real_name = None\n pairs2.append((storage_name, real_name))\n index_names = [name for (storage_name, name) in pairs2 if name != storage_name]\n\n # column_indexes represents df.columns.name\n # It was added to the spec after pandas 0.21.0+, and implemented\n # in PyArrow 0.8. It was added to fastparquet in 0.3.1.\n column_index_names = pandas_metadata.get(\"column_indexes\", [{\"name\": None}])\n column_index_names = [x[\"name\"] for x in column_index_names]\n\n # Now we need to disambiguate between columns and index names. PyArrow\n # 0.8.0+ allows for duplicates between df.index.names and df.columns\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py__parse_pandas_metadata.if_not_index_names___parse_pandas_metadata.return.index_names_column_names": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py__parse_pandas_metadata.if_not_index_names___parse_pandas_metadata.return.index_names_column_names", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 258, "end_line": 280, "span_ids": ["_parse_pandas_metadata"], "tokens": 282}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _parse_pandas_metadata(pandas_metadata):\n # ... other code\n if not index_names:\n # For PyArrow < 0.8, Any fastparquet. This relies on the facts that\n # 1. Those versions used the real index name as the index storage name\n # 2. Those versions did not allow for duplicate index / column names\n # So we know that if a name is in index_storage_names, it must be an\n # index name\n if index_storage_names and isinstance(index_storage_names[0], dict):\n # Cannot handle dictionary case\n index_storage_names = []\n index_names = list(index_storage_names) # make a copy\n index_storage_names2 = set(index_storage_names)\n column_names = [\n name for (storage_name, name) in pairs if name not in index_storage_names2\n ]\n else:\n # For newer PyArrows the storage names differ from the index names\n # iff it's an index level. Though this is a fragile assumption for\n # other systems...\n column_names = [name for (storage_name, name) in pairs2 if name == storage_name]\n\n storage_name_mapping = dict(pairs2) # TODO: handle duplicates gracefully\n\n return index_names, column_names, storage_name_mapping, column_index_names", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py__normalize_index_columns__normalize_index_columns.return.column_names_index_names": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py__normalize_index_columns__normalize_index_columns.return.column_names_index_names", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 283, "end_line": 342, "span_ids": ["_normalize_index_columns"], "tokens": 478}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _normalize_index_columns(user_columns, data_columns, user_index, data_index):\n \"\"\"Normalize user and file-provided column and index names\n\n Parameters\n ----------\n user_columns : None, str or list of str\n data_columns : list of str\n user_index : None, str, or list of str\n data_index : list of str\n\n Returns\n -------\n column_names : list of str\n index_names : list of str\n \"\"\"\n specified_columns = user_columns is not None\n specified_index = user_index is not None\n\n if user_columns is None:\n user_columns = list(data_columns)\n elif isinstance(user_columns, str):\n user_columns = [user_columns]\n else:\n user_columns = list(user_columns)\n\n if user_index is None:\n user_index = data_index\n elif user_index is False:\n # When index is False, use no index and all fields should be treated as\n # columns (unless `columns` provided).\n user_index = []\n data_columns = data_index + data_columns\n elif isinstance(user_index, str):\n user_index = [user_index]\n else:\n user_index = list(user_index)\n\n if specified_index and not specified_columns:\n # Only `index` provided. Use specified index, and all column fields\n # that weren't specified as indices\n index_names = user_index\n column_names = [x for x in data_columns if x not in index_names]\n elif specified_columns and not specified_index:\n # Only `columns` provided. Use specified columns, and all index fields\n # that weren't specified as columns\n column_names = user_columns\n index_names = [x for x in data_index if x not in column_names]\n elif specified_index and specified_columns:\n # Both `index` and `columns` provided. Use as specified, but error if\n # they intersect.\n column_names = user_columns\n index_names = user_index\n if set(column_names).intersection(index_names):\n raise ValueError(\"Specified index and column names must not intersect\")\n else:\n # Use default columns and index from the metadata\n column_names = data_columns\n index_names = data_index\n\n return column_names, index_names", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py__analyze_paths__analyze_paths._join_path._scrub.return.p": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py__analyze_paths__analyze_paths._join_path._scrub.return.p", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 345, "end_line": 362, "span_ids": ["_analyze_paths"], "tokens": 199}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _analyze_paths(file_list, fs, root=False):\n \"\"\"Consolidate list of file-paths into parquet relative paths\n\n Note: This function was mostly copied from dask/fastparquet to\n use in both `FastParquetEngine` and `ArrowEngine`.\"\"\"\n\n def _join_path(*path):\n def _scrub(i, p):\n # Convert path to standard form\n # this means windows path separators are converted to linux\n p = p.replace(fs.sep, \"/\")\n if p == \"\": # empty path is assumed to be a relative path\n return \".\"\n if p[-1] == \"/\": # trailing slashes are not allowed\n p = p[:-1]\n if i > 0 and p[0] == \"/\": # only the first path can start with /\n p = p[1:]\n return p\n # ... other code\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py__analyze_paths._join_path.abs_prefix__analyze_paths._join_path.return.joined": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py__analyze_paths._join_path.abs_prefix__analyze_paths._join_path.return.joined", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 364, "end_line": 403, "span_ids": ["_analyze_paths"], "tokens": 300}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _analyze_paths(file_list, fs, root=False):\n\n def _join_path(*path):\n # ... other code\n\n abs_prefix = \"\"\n if path and path[0]:\n if path[0][0] == \"/\":\n abs_prefix = \"/\"\n path = list(path)\n path[0] = path[0][1:]\n elif fs.sep == \"\\\\\" and path[0][1:].startswith(\":/\"):\n # If windows, then look for the \"c:/\" prefix\n abs_prefix = path[0][0:3]\n path = list(path)\n path[0] = path[0][3:]\n\n _scrubbed = []\n for i, p in enumerate(path):\n _scrubbed.extend(_scrub(i, p).split(\"/\"))\n simpler = []\n for s in _scrubbed:\n if s == \".\":\n pass\n elif s == \"..\":\n if simpler:\n if simpler[-1] == \"..\":\n simpler.append(s)\n else:\n simpler.pop()\n elif abs_prefix:\n raise Exception(\"can not get parent of root\")\n else:\n simpler.append(s)\n else:\n simpler.append(s)\n\n if not simpler:\n if abs_prefix:\n joined = abs_prefix\n else:\n joined = \".\"\n else:\n joined = abs_prefix + (\"/\".join(simpler))\n return joined\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py__analyze_paths.path_parts_list_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py__analyze_paths.path_parts_list_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 405, "end_line": 434, "span_ids": ["_analyze_paths"], "tokens": 271}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _analyze_paths(file_list, fs, root=False):\n # ... other code\n\n path_parts_list = [_join_path(fn).split(\"/\") for fn in file_list]\n if root is False:\n basepath = path_parts_list[0][:-1]\n for i, path_parts in enumerate(path_parts_list):\n j = len(path_parts) - 1\n for k, (base_part, path_part) in enumerate(zip(basepath, path_parts)):\n if base_part != path_part:\n j = k\n break\n basepath = basepath[:j]\n l = len(basepath)\n\n else:\n basepath = _join_path(root).split(\"/\")\n l = len(basepath)\n assert all(\n p[:l] == basepath for p in path_parts_list\n ), \"All paths must begin with the given root\"\n l = len(basepath)\n out_list = []\n for path_parts in path_parts_list:\n out_list.append(\n \"/\".join(path_parts[l:])\n ) # use '/'.join() instead of _join_path to be consistent with split('/')\n\n return (\n \"/\".join(basepath),\n out_list,\n ) # use '/'.join() instead of _join_path to be consistent with split('/')", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/sql.py_np_read_sql_table._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/sql.py_np_read_sql_table._", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/sql.py", "file_name": "sql.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 101, "span_ids": ["imports", "read_sql_table"], "tokens": 907}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import numpy as np\nimport pandas as pd\n\nimport dask\nfrom dask.dataframe.utils import PANDAS_GT_0240, PANDAS_VERSION\nfrom dask.delayed import tokenize\nfrom .io import from_delayed, from_pandas\nfrom ... import delayed\nfrom .. import methods\n\n\ndef read_sql_table(\n table,\n uri,\n index_col,\n divisions=None,\n npartitions=None,\n limits=None,\n columns=None,\n bytes_per_chunk=\"256 MiB\",\n head_rows=5,\n schema=None,\n meta=None,\n engine_kwargs=None,\n **kwargs,\n):\n \"\"\"\n Create dataframe from an SQL table.\n\n If neither divisions or npartitions is given, the memory footprint of the\n first few rows will be determined, and partitions of size ~256MB will\n be used.\n\n Parameters\n ----------\n table : string or sqlalchemy expression\n Select columns from here.\n uri : string\n Full sqlalchemy URI for the database connection\n index_col : string\n Column which becomes the index, and defines the partitioning. Should\n be a indexed column in the SQL server, and any orderable type. If the\n type is number or time, then partition boundaries can be inferred from\n npartitions or bytes_per_chunk; otherwide must supply explicit\n ``divisions=``.\n ``index_col`` could be a function to return a value, e.g.,\n ``sql.func.abs(sql.column('value')).label('abs(value)')``.\n ``index_col=sql.func.abs(sql.column(\"value\")).label(\"abs(value)\")``, or\n ``index_col=cast(sql.column(\"id\"),types.BigInteger).label(\"id\")`` to convert\n the textfield ``id`` to ``BigInteger``.\n\n Note ``sql``, ``cast``, ``types`` methods comes from ``sqlalchemy`` module.\n\n Labeling columns created by functions or arithmetic operations is\n required.\n divisions: sequence\n Values of the index column to split the table by. If given, this will\n override npartitions and bytes_per_chunk. The divisions are the value\n boundaries of the index column used to define the partitions. For\n example, ``divisions=list('acegikmoqsuwz')`` could be used to partition\n a string column lexographically into 12 partitions, with the implicit\n assumption that each partition contains similar numbers of records.\n npartitions : int\n Number of partitions, if divisions is not given. Will split the values\n of the index column linearly between limits, if given, or the column\n max/min. The index column must be numeric or time for this to work\n limits: 2-tuple or None\n Manually give upper and lower range of values for use with npartitions;\n if None, first fetches max/min from the DB. Upper limit, if\n given, is inclusive.\n columns : list of strings or None\n Which columns to select; if None, gets all; can include sqlalchemy\n functions, e.g.,\n ``sql.func.abs(sql.column('value')).label('abs(value)')``.\n Labeling columns created by functions or arithmetic operations is\n recommended.\n bytes_per_chunk : str, int\n If both divisions and npartitions is None, this is the target size of\n each partition, in bytes\n head_rows : int\n How many rows to load for inferring the data-types, unless passing meta\n meta : empty DataFrame or None\n If provided, do not attempt to infer dtypes, but use these, coercing\n all chunks on load\n schema : str or None\n If using a table name, pass this to sqlalchemy to select which DB\n schema to use within the URI connection\n engine_kwargs : dict or None\n Specific db engine parameters for sqlalchemy\n kwargs : dict\n Additional parameters to pass to `pd.read_sql()`\n\n Returns\n -------\n dask.dataframe\n\n Examples\n --------\n >>> df = dd.read_sql_table('accounts', 'sqlite:///path/to/bank.db',\n ... npartitions=10, index_col='id') # doctest: +SKIP\n \"\"\"\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/sql.py_read_sql_table.if_divisions_is_None___read_sql_chunk.if_df_empty_.else_.return.df_astype_meta_dtypes_to_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/sql.py_read_sql_table.if_divisions_is_None___read_sql_chunk.if_df_empty_.else_.return.df_astype_meta_dtypes_to_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/sql.py", "file_name": "sql.py", "file_type": "text/x-python", "category": "implementation", "start_line": 162, "end_line": 230, "span_ids": ["_read_sql_chunk", "read_sql_table"], "tokens": 610}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def read_sql_table(\n table,\n uri,\n index_col,\n divisions=None,\n npartitions=None,\n limits=None,\n columns=None,\n bytes_per_chunk=\"256 MiB\",\n head_rows=5,\n schema=None,\n meta=None,\n engine_kwargs=None,\n **kwargs,\n):\n # ... other code\n\n if divisions is None:\n if limits is None:\n # calculate max and min for given index\n q = sql.select([sql.func.max(index), sql.func.min(index)]).select_from(\n table\n )\n minmax = pd.read_sql(q, engine)\n maxi, mini = minmax.iloc[0]\n dtype = minmax.dtypes[\"max_1\"]\n else:\n mini, maxi = limits\n dtype = pd.Series(limits).dtype\n\n if npartitions is None:\n q = sql.select([sql.func.count(index)]).select_from(table)\n count = pd.read_sql(q, engine)[\"count_1\"][0]\n npartitions = (\n int(\n round(\n count * bytes_per_row / dask.utils.parse_bytes(bytes_per_chunk)\n )\n )\n or 1\n )\n if dtype.kind == \"M\":\n divisions = methods.tolist(\n pd.date_range(\n start=mini,\n end=maxi,\n freq=\"%iS\" % ((maxi - mini).total_seconds() / npartitions),\n )\n )\n divisions[0] = mini\n divisions[-1] = maxi\n elif dtype.kind in [\"i\", \"u\", \"f\"]:\n divisions = np.linspace(mini, maxi, npartitions + 1).tolist()\n else:\n raise TypeError(\n 'Provided index column is of type \"{}\". If divisions is not provided the '\n \"index column type must be numeric or datetime.\".format(dtype)\n )\n\n parts = []\n lowers, uppers = divisions[:-1], divisions[1:]\n for i, (lower, upper) in enumerate(zip(lowers, uppers)):\n cond = index <= upper if i == len(lowers) - 1 else index < upper\n q = sql.select(columns).where(sql.and_(index >= lower, cond)).select_from(table)\n parts.append(\n delayed(_read_sql_chunk)(\n q, uri, meta, engine_kwargs=engine_kwargs, **kwargs\n )\n )\n\n engine.dispose()\n\n return from_delayed(parts, meta, divisions=divisions)\n\n\ndef _read_sql_chunk(q, uri, meta, engine_kwargs=None, **kwargs):\n import sqlalchemy as sa\n\n engine_kwargs = engine_kwargs or {}\n engine = sa.create_engine(uri, **engine_kwargs)\n df = pd.read_sql(q, engine, **kwargs)\n engine.dispose()\n if df.empty:\n return meta\n else:\n return df.astype(meta.dtypes.to_dict(), copy=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/sql.py_to_sql_to_sql._Store_Dask_Dataframe_t": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/sql.py_to_sql_to_sql._Store_Dask_Dataframe_t", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/sql.py", "file_name": "sql.py", "file_type": "text/x-python", "category": "implementation", "start_line": 233, "end_line": 353, "span_ids": ["to_sql"], "tokens": 1129}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def to_sql(\n df,\n name: str,\n uri: str,\n schema=None,\n if_exists: str = \"fail\",\n index: bool = True,\n index_label=None,\n chunksize=None,\n dtype=None,\n method=None,\n compute=True,\n parallel=False,\n):\n \"\"\"Store Dask Dataframe to a SQL table\n\n An empty table is created based on the \"meta\" DataFrame (and conforming to the caller's \"if_exists\" preference), and\n then each block calls pd.DataFrame.to_sql (with `if_exists=\"append\"`).\n\n Databases supported by SQLAlchemy [1]_ are supported. Tables can be\n newly created, appended to, or overwritten.\n\n Parameters\n ----------\n name : str\n Name of SQL table.\n uri : string\n Full sqlalchemy URI for the database connection\n schema : str, optional\n Specify the schema (if database flavor supports this). If None, use\n default schema.\n if_exists : {'fail', 'replace', 'append'}, default 'fail'\n How to behave if the table already exists.\n\n * fail: Raise a ValueError.\n * replace: Drop the table before inserting new values.\n * append: Insert new values to the existing table.\n\n index : bool, default True\n Write DataFrame index as a column. Uses `index_label` as the column\n name in the table.\n index_label : str or sequence, default None\n Column label for index column(s). If None is given (default) and\n `index` is True, then the index names are used.\n A sequence should be given if the DataFrame uses MultiIndex.\n chunksize : int, optional\n Specify the number of rows in each batch to be written at a time.\n By default, all rows will be written at once.\n dtype : dict or scalar, optional\n Specifying the datatype for columns. If a dictionary is used, the\n keys should be the column names and the values should be the\n SQLAlchemy types or strings for the sqlite3 legacy mode. If a\n scalar is provided, it will be applied to all columns.\n method : {None, 'multi', callable}, optional\n Controls the SQL insertion clause used:\n\n * None : Uses standard SQL ``INSERT`` clause (one per row).\n * 'multi': Pass multiple values in a single ``INSERT`` clause.\n * callable with signature ``(pd_table, conn, keys, data_iter)``.\n\n Details and a sample callable implementation can be found in the\n section :ref:`insert method `.\n compute : bool, default True\n When true, call dask.compute and perform the load into SQL; otherwise, return a Dask object (or array of\n per-block objects when parallel=True)\n parallel : bool, default False\n When true, have each block append itself to the DB table concurrently. This can result in DB rows being in a\n different order than the source DataFrame's corresponding rows. When false, load each block into the SQL DB in\n sequence.\n\n Raises\n ------\n ValueError\n When the table already exists and `if_exists` is 'fail' (the\n default).\n\n See Also\n --------\n read_sql : Read a DataFrame from a table.\n\n Notes\n -----\n Timezone aware datetime columns will be written as\n ``Timestamp with timezone`` type with SQLAlchemy if supported by the\n database. Otherwise, the datetimes will be stored as timezone unaware\n timestamps local to the original timezone.\n\n .. versionadded:: 0.24.0\n\n References\n ----------\n .. [1] https://docs.sqlalchemy.org\n .. [2] https://www.python.org/dev/peps/pep-0249/\n\n Examples\n --------\n Create a table from scratch with 4 rows.\n\n >>> import pandas as pd\n >>> df = pd.DataFrame([ {'i':i, 's':str(i)*2 } for i in range(4) ])\n >>> from dask.dataframe import from_pandas\n >>> ddf = from_pandas(df, npartitions=2)\n >>> ddf # doctest: +SKIP\n Dask DataFrame Structure:\n i s\n npartitions=2\n 0 int64 object\n 2 ... ...\n 3 ... ...\n Dask Name: from_pandas, 2 tasks\n\n >>> from dask.utils import tmpfile\n >>> from sqlalchemy import create_engine # doctest: +SKIP\n >>> with tmpfile() as f: # doctest: +SKIP\n ... db = 'sqlite:///%s' %f # doctest: +SKIP\n ... ddf.to_sql('test', db) # doctest: +SKIP\n ... engine = create_engine(db, echo=False) # doctest: +SKIP\n ... result = engine.execute(\"SELECT * FROM test\").fetchall() # doctest: +SKIP\n >>> result # doctest: +SKIP\n [(0, 0, '00'), (1, 1, '11'), (2, 2, '22'), (3, 3, '33')]\n \"\"\"\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_from_io_import_BytesIO_timeseries._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_from_io_import_BytesIO_timeseries._", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 89, "span_ids": ["parse_filename", "imports", "normalize_text", "impl:7"], "tokens": 749}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from io import BytesIO\nimport os\nimport gzip\nfrom time import sleep\nfrom unittest import mock\n\nimport pytest\n\npd = pytest.importorskip(\"pandas\")\ndd = pytest.importorskip(\"dask.dataframe\")\n\nfrom tlz import partition_all, valmap\n\nimport dask\nimport dask.dataframe as dd\nfrom dask.dataframe._compat import tm\nfrom dask.base import compute_as_if_collection\nfrom dask.core import flatten\nfrom dask.dataframe.io.csv import (\n text_blocks_to_pandas,\n pandas_read_text,\n auto_blocksize,\n block_mask,\n)\nfrom dask.dataframe.utils import assert_eq, has_known_categories\nfrom dask.bytes.core import read_bytes\nfrom dask.bytes.utils import compress\nfrom dask.utils import filetexts, filetext, tmpfile, tmpdir\nfrom fsspec.compression import compr\n\nfmt_bs = [(fmt, None) for fmt in compr] + [(fmt, 10) for fmt in compr]\n\n\ndef normalize_text(s):\n return \"\\n\".join(map(str.strip, s.strip().split(\"\\n\")))\n\n\ndef parse_filename(path):\n return os.path.split(path)[1]\n\n\ncsv_text = \"\"\"\nname,amount\nAlice,100\nBob,-200\nCharlie,300\nDennis,400\nEdith,-500\nFrank,600\nAlice,200\nFrank,-200\nBob,600\nAlice,400\nFrank,200\nAlice,300\nEdith,600\n\"\"\".strip()\n\ntsv_text = csv_text.replace(\",\", \"\\t\")\n\ntsv_text2 = \"\"\"\nname amount\nAlice 100\nBob -200\nCharlie 300\nDennis 400\nEdith -500\nFrank 600\nAlice 200\nFrank -200\nBob 600\nAlice 400\nFrank 200\nAlice 300\nEdith 600\n\"\"\".strip()\n\ntimeseries = \"\"\"\nDate,Open,High,Low,Close,Volume,Adj Close\n2015-08-28,198.50,199.839996,197.919998,199.240005,143298900,199.240005\n2015-08-27,197.020004,199.419998,195.210007,199.160004,266244700,199.160004\n2015-08-26,192.080002,194.789993,188.369995,194.679993,328058100,194.679993\n2015-08-25,195.429993,195.449997,186.919998,187.229996,353966700,187.229996\n2015-08-24,197.630005,197.630005,182.399994,189.550003,478672400,189.550003\n2015-08-21,201.729996,203.940002,197.520004,197.630005,328271500,197.630005\n2015-08-20,206.509995,208.289993,203.899994,204.009995,185865600,204.009995\n2015-08-19,209.089996,210.009995,207.350006,208.279999,167316300,208.279999\n2015-08-18,210.259995,210.679993,209.699997,209.929993,70043800,209.929993\n\"\"\".strip()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_csv_files_test_pandas_read_text_with_header.assert_df_id_sum_1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_csv_files_test_pandas_read_text_with_header.assert_df_id_sum_1_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 91, "end_line": 175, "span_ids": ["test_pandas_read_text_dtype_coercion", "test_pandas_read_text_with_header", "test_pandas_read_text_kwargs", "test_pandas_read_text", "impl:15"], "tokens": 770}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "csv_files = {\n \"2014-01-01.csv\": (\n b\"name,amount,id\\n\" b\"Alice,100,1\\n\" b\"Bob,200,2\\n\" b\"Charlie,300,3\\n\"\n ),\n \"2014-01-02.csv\": b\"name,amount,id\\n\",\n \"2014-01-03.csv\": (\n b\"name,amount,id\\n\" b\"Dennis,400,4\\n\" b\"Edith,500,5\\n\" b\"Frank,600,6\\n\"\n ),\n}\n\ntsv_files = {k: v.replace(b\",\", b\"\\t\") for (k, v) in csv_files.items()}\n\nfwf_files = {\n \"2014-01-01.csv\": (\n b\" name amount id\\n\"\n b\" Alice 100 1\\n\"\n b\" Bob 200 2\\n\"\n b\" Charlie 300 3\\n\"\n ),\n \"2014-01-02.csv\": b\" name amount id\\n\",\n \"2014-01-03.csv\": (\n b\" name amount id\\n\"\n b\" Dennis 400 4\\n\"\n b\" Edith 500 5\\n\"\n b\" Frank 600 6\\n\"\n ),\n}\n\nexpected = pd.concat([pd.read_csv(BytesIO(csv_files[k])) for k in sorted(csv_files)])\n\ncomment_header = b\"\"\"# some header lines\n# that may be present\n# in a data file\n# before any data\"\"\"\n\ncsv_units_row = b\"str, int, int\\n\"\ntsv_units_row = csv_units_row.replace(b\",\", b\"\\t\")\n\n\n# Pandas has deprecated read_table\nread_table_mark = pytest.mark.filterwarnings(\"ignore:read_table:FutureWarning\")\n\n\ncsv_and_table = pytest.mark.parametrize(\n \"reader,files\",\n [\n (pd.read_csv, csv_files),\n pytest.param(pd.read_table, tsv_files, marks=read_table_mark),\n (pd.read_fwf, fwf_files),\n ],\n)\n\n\n@csv_and_table\ndef test_pandas_read_text(reader, files):\n b = files[\"2014-01-01.csv\"]\n df = pandas_read_text(reader, b, b\"\", {})\n assert list(df.columns) == [\"name\", \"amount\", \"id\"]\n assert len(df) == 3\n assert df.id.sum() == 1 + 2 + 3\n\n\n@csv_and_table\ndef test_pandas_read_text_kwargs(reader, files):\n b = files[\"2014-01-01.csv\"]\n df = pandas_read_text(reader, b, b\"\", {\"usecols\": [\"name\", \"id\"]})\n assert list(df.columns) == [\"name\", \"id\"]\n\n\n@csv_and_table\ndef test_pandas_read_text_dtype_coercion(reader, files):\n b = files[\"2014-01-01.csv\"]\n df = pandas_read_text(reader, b, b\"\", {}, {\"amount\": \"float\"})\n assert df.amount.dtype == \"float\"\n\n\n@csv_and_table\ndef test_pandas_read_text_with_header(reader, files):\n b = files[\"2014-01-01.csv\"]\n header, b = b.split(b\"\\n\", 1)\n header = header + b\"\\n\"\n df = pandas_read_text(reader, b, header, {})\n assert list(df.columns) == [\"name\", \"amount\", \"id\"]\n assert len(df) == 3\n assert df.id.sum() == 1 + 2 + 3", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_text_blocks_to_pandas_simple_test_text_blocks_to_pandas_simple.assert_eq_df_amount_sum_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_text_blocks_to_pandas_simple_test_text_blocks_to_pandas_simple.assert_eq_df_amount_sum_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 178, "end_line": 194, "span_ids": ["test_text_blocks_to_pandas_simple"], "tokens": 193}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@csv_and_table\ndef test_text_blocks_to_pandas_simple(reader, files):\n blocks = [[files[k]] for k in sorted(files)]\n kwargs = {}\n head = pandas_read_text(reader, files[\"2014-01-01.csv\"], b\"\", {})\n header = files[\"2014-01-01.csv\"].split(b\"\\n\")[0] + b\"\\n\"\n\n df = text_blocks_to_pandas(reader, blocks, header, head, kwargs)\n assert isinstance(df, dd.DataFrame)\n assert list(df.columns) == [\"name\", \"amount\", \"id\"]\n\n values = text_blocks_to_pandas(reader, blocks, header, head, kwargs)\n assert isinstance(values, dd.DataFrame)\n assert hasattr(values, \"dask\")\n assert len(values.dask) == 3\n\n assert_eq(df.amount.sum(), 100 + 200 + 300 + 400 + 500 + 600)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_text_blocks_to_pandas_kwargs_test_text_blocks_to_pandas_kwargs.assert_result_columns_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_text_blocks_to_pandas_kwargs_test_text_blocks_to_pandas_kwargs.assert_result_columns_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 197, "end_line": 208, "span_ids": ["test_text_blocks_to_pandas_kwargs"], "tokens": 149}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@csv_and_table\ndef test_text_blocks_to_pandas_kwargs(reader, files):\n blocks = [files[k] for k in sorted(files)]\n blocks = [[b] for b in blocks]\n kwargs = {\"usecols\": [\"name\", \"id\"]}\n head = pandas_read_text(reader, files[\"2014-01-01.csv\"], b\"\", kwargs)\n header = files[\"2014-01-01.csv\"].split(b\"\\n\")[0] + b\"\\n\"\n\n df = text_blocks_to_pandas(reader, blocks, header, head, kwargs)\n assert list(df.columns) == [\"name\", \"id\"]\n result = df.compute()\n assert (result.columns == df.columns).all()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_text_blocks_to_pandas_blocked_test_text_blocks_to_pandas_blocked.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_text_blocks_to_pandas_blocked_test_text_blocks_to_pandas_blocked.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 211, "end_line": 235, "span_ids": ["test_text_blocks_to_pandas_blocked"], "tokens": 206}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@csv_and_table\ndef test_text_blocks_to_pandas_blocked(reader, files):\n header = files[\"2014-01-01.csv\"].split(b\"\\n\")[0] + b\"\\n\"\n blocks = []\n for k in sorted(files):\n b = files[k]\n lines = b.split(b\"\\n\")\n blocks.append([b\"\\n\".join(bs) for bs in partition_all(2, lines)])\n\n df = text_blocks_to_pandas(reader, blocks, header, expected.head(), {})\n assert_eq(\n df.compute().reset_index(drop=True),\n expected.reset_index(drop=True),\n check_dtype=False,\n )\n\n expected2 = expected[[\"name\", \"id\"]]\n df = text_blocks_to_pandas(\n reader, blocks, header, expected2.head(), {\"usecols\": [\"name\", \"id\"]}\n )\n assert_eq(\n df.compute().reset_index(drop=True),\n expected2.reset_index(drop=True),\n check_dtype=False,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_skiprows_test_skiprows.with_filetexts_files_mod.assert_eq_df_expected_df": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_skiprows_test_skiprows.with_filetexts_files_mod.assert_eq_df_expected_df", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 238, "end_line": 249, "span_ids": ["test_skiprows"], "tokens": 153}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"dd_read,pd_read,files\",\n [(dd.read_csv, pd.read_csv, csv_files), (dd.read_table, pd.read_table, tsv_files)],\n)\n@read_table_mark\ndef test_skiprows(dd_read, pd_read, files):\n files = {name: comment_header + b\"\\n\" + content for name, content in files.items()}\n skip = len(comment_header.splitlines())\n with filetexts(files, mode=\"b\"):\n df = dd_read(\"2014-01-*.csv\", skiprows=skip)\n expected_df = pd.concat([pd_read(n, skiprows=skip) for n in sorted(files)])\n assert_eq(df, expected_df, check_dtype=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_skiprows_as_list_test_skiprows_as_list.with_filetexts_files_mod.assert_eq_df_expected_df": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_skiprows_as_list_test_skiprows_as_list.with_filetexts_files_mod.assert_eq_df_expected_df", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 252, "end_line": 269, "span_ids": ["test_skiprows_as_list"], "tokens": 202}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"dd_read,pd_read,files,units\",\n [\n (dd.read_csv, pd.read_csv, csv_files, csv_units_row),\n (dd.read_table, pd.read_table, tsv_files, tsv_units_row),\n ],\n)\n@read_table_mark\ndef test_skiprows_as_list(dd_read, pd_read, files, units):\n files = {\n name: (comment_header + b\"\\n\" + content.replace(b\"\\n\", b\"\\n\" + units, 1))\n for name, content in files.items()\n }\n skip = [0, 1, 2, 3, 5]\n with filetexts(files, mode=\"b\"):\n df = dd_read(\"2014-01-*.csv\", skiprows=skip)\n expected_df = pd.concat([pd_read(n, skiprows=skip) for n in sorted(files)])\n assert_eq(df, expected_df, check_dtype=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_csv_blocks_tsv_blocks._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_csv_blocks_tsv_blocks._", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 272, "end_line": 280, "span_ids": ["impl:33"], "tokens": 131}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "csv_blocks = [\n [b\"aa,bb\\n1,1.0\\n2,2.0\", b\"10,20\\n30,40\"],\n [b\"aa,bb\\n1,1.0\\n2,2.0\", b\"10,20\\n30,40\"],\n]\n\ntsv_blocks = [\n [b\"aa\\tbb\\n1\\t1.0\\n2\\t2.0\", b\"10\\t20\\n30\\t40\"],\n [b\"aa\\tbb\\n1\\t1.0\\n2\\t2.0\", b\"10\\t20\\n30\\t40\"],\n]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_enforce_dtypes_test_enforce_dtypes.assert_all_df_dtypes_to_d": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_enforce_dtypes_test_enforce_dtypes.assert_all_df_dtypes_to_d", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 283, "end_line": 292, "span_ids": ["test_enforce_dtypes"], "tokens": 131}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"reader,blocks\", [(pd.read_csv, csv_blocks), (pd.read_table, tsv_blocks)]\n)\n@read_table_mark\ndef test_enforce_dtypes(reader, blocks):\n head = reader(BytesIO(blocks[0][0]), header=0)\n header = blocks[0][0].split(b\"\\n\")[0] + b\"\\n\"\n dfs = text_blocks_to_pandas(reader, blocks, header, head, {})\n dfs = dask.compute(dfs, scheduler=\"sync\")\n assert all(df.dtypes.to_dict() == head.dtypes.to_dict() for df in dfs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_enforce_columns_test_enforce_columns.with_pytest_raises_ValueE.dask_compute_dfs_schedu": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_enforce_columns_test_enforce_columns.with_pytest_raises_ValueE.dask_compute_dfs_schedu", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 295, "end_line": 306, "span_ids": ["test_enforce_columns"], "tokens": 155}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"reader,blocks\", [(pd.read_csv, csv_blocks), (pd.read_table, tsv_blocks)]\n)\n@read_table_mark\ndef test_enforce_columns(reader, blocks):\n # Replace second header with different column name\n blocks = [blocks[0], [blocks[1][0].replace(b\"a\", b\"A\"), blocks[1][1]]]\n head = reader(BytesIO(blocks[0][0]), header=0)\n header = blocks[0][0].split(b\"\\n\")[0] + b\"\\n\"\n with pytest.raises(ValueError):\n dfs = text_blocks_to_pandas(reader, blocks, header, head, {}, enforce=True)\n dask.compute(*dfs, scheduler=\"sync\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py___test_read_csv.with_filetext_text_as_fn.assert_eq_result_pd_read": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py___test_read_csv.with_filetext_text_as_fn.assert_eq_result_pd_read", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 309, "end_line": 329, "span_ids": ["test_read_csv", "test_enforce_columns"], "tokens": 185}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "#############################\n# read_csv and read_table #\n#############################\n\n\n@pytest.mark.parametrize(\n \"dd_read,pd_read,text,sep\",\n [\n (dd.read_csv, pd.read_csv, csv_text, \",\"),\n (dd.read_table, pd.read_table, tsv_text, \"\\t\"),\n (dd.read_table, pd.read_table, tsv_text2, r\"\\s+\"),\n ],\n)\n@read_table_mark\ndef test_read_csv(dd_read, pd_read, text, sep):\n with filetext(text) as fn:\n f = dd_read(fn, blocksize=30, lineterminator=os.linesep, sep=sep)\n assert list(f.columns) == [\"name\", \"amount\"]\n # index may be different\n result = f.compute(scheduler=\"sync\").reset_index(drop=True)\n assert_eq(result, pd_read(fn, sep=sep))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_large_skiprows_test_read_csv_large_skiprows.with_filetext_text_as_fn.assert_eq_actual_pd_read": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_large_skiprows_test_read_csv_large_skiprows.with_filetext_text_as_fn.assert_eq_actual_pd_read", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 332, "end_line": 344, "span_ids": ["test_read_csv_large_skiprows"], "tokens": 130}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"dd_read,pd_read,text,skip\",\n [\n (dd.read_csv, pd.read_csv, csv_text, 7),\n (dd.read_table, pd.read_table, tsv_text, [1, 13]),\n ],\n)\n@read_table_mark\ndef test_read_csv_large_skiprows(dd_read, pd_read, text, skip):\n names = [\"name\", \"amount\"]\n with filetext(text) as fn:\n actual = dd_read(fn, skiprows=skip, names=names)\n assert_eq(actual, pd_read(fn, skiprows=skip, names=names))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_skiprows_only_in_first_partition_test_read_csv_skiprows_only_in_first_partition.with_filetext_text_as_fn.None_1.with_pytest_raises_ValueE.dd_read_fn_blocksize_30_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_skiprows_only_in_first_partition_test_read_csv_skiprows_only_in_first_partition.with_filetext_text_as_fn.None_1.with_pytest_raises_ValueE.dd_read_fn_blocksize_30_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 347, "end_line": 365, "span_ids": ["test_read_csv_skiprows_only_in_first_partition"], "tokens": 204}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"dd_read,pd_read,text,skip\",\n [\n (dd.read_csv, pd.read_csv, csv_text, 7),\n (dd.read_table, pd.read_table, tsv_text, [1, 12]),\n ],\n)\n@read_table_mark\ndef test_read_csv_skiprows_only_in_first_partition(dd_read, pd_read, text, skip):\n names = [\"name\", \"amount\"]\n with filetext(text) as fn:\n with pytest.warns(UserWarning, match=\"sample=blocksize\"):\n actual = dd_read(fn, blocksize=200, skiprows=skip, names=names).compute()\n assert_eq(actual, pd_read(fn, skiprows=skip, names=names))\n\n with pytest.warns(UserWarning):\n # if new sample does not contain all the skiprows, raise error\n with pytest.raises(ValueError):\n dd_read(fn, blocksize=30, skiprows=skip, names=names)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_files_test_read_csv_files.with_filetexts_files_mod.assert_eq_df_expected2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_files_test_read_csv_files.with_filetexts_files_mod.assert_eq_df_expected2_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 368, "end_line": 381, "span_ids": ["test_read_csv_files"], "tokens": 137}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"dd_read,pd_read,files\",\n [(dd.read_csv, pd.read_csv, csv_files), (dd.read_table, pd.read_table, tsv_files)],\n)\n@read_table_mark\ndef test_read_csv_files(dd_read, pd_read, files):\n with filetexts(files, mode=\"b\"):\n df = dd_read(\"2014-01-*.csv\")\n assert_eq(df, expected, check_dtype=False)\n\n fn = \"2014-01-01.csv\"\n df = dd_read(fn)\n expected2 = pd_read(BytesIO(files[fn]))\n assert_eq(df, expected2, check_dtype=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_files_list_test_read_csv_files_list.with_filetexts_files_mod.with_pytest_raises_ValueE.dd_read_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_files_list_test_read_csv_files_list.with_filetexts_files_mod.with_pytest_raises_ValueE.dd_read_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 384, "end_line": 397, "span_ids": ["test_read_csv_files_list"], "tokens": 133}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"dd_read,pd_read,files\",\n [(dd.read_csv, pd.read_csv, csv_files), (dd.read_table, pd.read_table, tsv_files)],\n)\n@read_table_mark\ndef test_read_csv_files_list(dd_read, pd_read, files):\n with filetexts(files, mode=\"b\"):\n subset = sorted(files)[:2] # Just first 2\n sol = pd.concat([pd_read(BytesIO(files[k])) for k in subset])\n res = dd_read(subset)\n assert_eq(res, sol, check_dtype=False)\n\n with pytest.raises(ValueError):\n dd_read([])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_include_path_column_test_read_csv_include_path_column.with_filetexts_files_mod.assert_2014_01_03_csv_i": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_include_path_column_test_read_csv_include_path_column.with_filetexts_files_mod.assert_2014_01_03_csv_i", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 400, "end_line": 414, "span_ids": ["test_read_csv_include_path_column"], "tokens": 139}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"dd_read,files\", [(dd.read_csv, csv_files), (dd.read_table, tsv_files)]\n)\n@read_table_mark\ndef test_read_csv_include_path_column(dd_read, files):\n with filetexts(files, mode=\"b\"):\n df = dd_read(\n \"2014-01-*.csv\",\n include_path_column=True,\n converters={\"path\": parse_filename},\n )\n filenames = df.path.compute().unique()\n assert \"2014-01-01.csv\" in filenames\n assert \"2014-01-02.csv\" not in filenames\n assert \"2014-01-03.csv\" in filenames", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_include_path_column_as_str_test_read_csv_include_path_column_as_str.with_filetexts_files_mod.assert_2014_01_03_csv_i": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_include_path_column_as_str_test_read_csv_include_path_column_as_str.with_filetexts_files_mod.assert_2014_01_03_csv_i", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 417, "end_line": 431, "span_ids": ["test_read_csv_include_path_column_as_str"], "tokens": 142}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"dd_read,files\", [(dd.read_csv, csv_files), (dd.read_table, tsv_files)]\n)\n@read_table_mark\ndef test_read_csv_include_path_column_as_str(dd_read, files):\n with filetexts(files, mode=\"b\"):\n df = dd_read(\n \"2014-01-*.csv\",\n include_path_column=\"filename\",\n converters={\"filename\": parse_filename},\n )\n filenames = df.filename.compute().unique()\n assert \"2014-01-01.csv\" in filenames\n assert \"2014-01-02.csv\" not in filenames\n assert \"2014-01-03.csv\" in filenames", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_include_path_column_with_duplicate_name_test_read_csv_include_path_column_is_dtype_category.with_filetexts_files_mod.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_include_path_column_with_duplicate_name_test_read_csv_include_path_column_is_dtype_category.with_filetexts_files_mod.None_3", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 434, "end_line": 457, "span_ids": ["test_read_csv_include_path_column_with_duplicate_name", "test_read_csv_include_path_column_is_dtype_category"], "tokens": 220}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"dd_read,files\", [(dd.read_csv, csv_files), (dd.read_table, tsv_files)]\n)\n@read_table_mark\ndef test_read_csv_include_path_column_with_duplicate_name(dd_read, files):\n with filetexts(files, mode=\"b\"):\n with pytest.raises(ValueError):\n dd_read(\"2014-01-*.csv\", include_path_column=\"name\")\n\n\n@pytest.mark.parametrize(\n \"dd_read,files\", [(dd.read_csv, csv_files), (dd.read_table, tsv_files)]\n)\n@read_table_mark\ndef test_read_csv_include_path_column_is_dtype_category(dd_read, files):\n with filetexts(files, mode=\"b\"):\n df = dd_read(\"2014-01-*.csv\", include_path_column=True)\n assert df.path.dtype == \"category\"\n assert has_known_categories(df.path)\n\n dfs = dd_read(\"2014-01-*.csv\", include_path_column=True)\n result = dfs.compute()\n assert result.path.dtype == \"category\"\n assert has_known_categories(result.path)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py__After_this_point_we_te_test_read_csv_index.with_filetext_csv_text_a.assert_eq_result_expecte": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py__After_this_point_we_te_test_read_csv_index.with_filetext_csv_text_a.assert_eq_result_expecte", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 460, "end_line": 480, "span_ids": ["test_read_csv_index", "test_read_csv_include_path_column_is_dtype_category"], "tokens": 186}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# After this point, we test just using read_csv, as all functionality\n# for both is implemented using the same code.\n\n\ndef test_read_csv_index():\n with filetext(csv_text) as fn:\n f = dd.read_csv(fn, blocksize=20).set_index(\"amount\")\n result = f.compute(scheduler=\"sync\")\n assert result.index.name == \"amount\"\n\n blocks = compute_as_if_collection(\n dd.DataFrame, f.dask, f.__dask_keys__(), scheduler=\"sync\"\n )\n for i, block in enumerate(blocks):\n if i < len(f.divisions) - 2:\n assert (block.index < f.divisions[i + 1]).all()\n if i > 0:\n assert (block.index >= f.divisions[i]).all()\n\n expected = pd.read_csv(fn).set_index(\"amount\")\n assert_eq(result, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_skiprows_range_test_consistent_dtypes.with_filetext_text_as_fn.assert_df_amount_compute_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_skiprows_range_test_consistent_dtypes.with_filetext_text_as_fn.assert_df_amount_compute_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 483, "end_line": 543, "span_ids": ["test_consistent_dtypes", "test_read_csv_skiprows_range", "test_string_blocksize", "test_usecols", "test_skipinitialspace"], "tokens": 397}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_csv_skiprows_range():\n with filetext(csv_text) as fn:\n f = dd.read_csv(fn, skiprows=range(5))\n result = f\n expected = pd.read_csv(fn, skiprows=range(5))\n assert_eq(result, expected)\n\n\ndef test_usecols():\n with filetext(timeseries) as fn:\n df = dd.read_csv(fn, blocksize=30, usecols=[\"High\", \"Low\"])\n expected = pd.read_csv(fn, usecols=[\"High\", \"Low\"])\n assert (df.compute().values == expected.values).all()\n\n\ndef test_string_blocksize():\n with filetext(timeseries) as fn:\n a = dd.read_csv(fn, blocksize=\"30B\")\n b = dd.read_csv(fn, blocksize=\"30\")\n assert a.npartitions == b.npartitions\n\n c = dd.read_csv(fn, blocksize=\"64MiB\")\n assert c.npartitions == 1\n\n\ndef test_skipinitialspace():\n text = normalize_text(\n \"\"\"\n name, amount\n Alice,100\n Bob,-200\n Charlie,300\n Dennis,400\n Edith,-500\n Frank,600\n \"\"\"\n )\n\n with filetext(text) as fn:\n df = dd.read_csv(fn, skipinitialspace=True, blocksize=20)\n\n assert \"amount\" in df.columns\n assert df.amount.max().compute() == 600\n\n\ndef test_consistent_dtypes():\n text = normalize_text(\n \"\"\"\n name,amount\n Alice,100.5\n Bob,-200.5\n Charlie,300\n Dennis,400\n Edith,-500\n Frank,600\n \"\"\"\n )\n\n with filetext(text) as fn:\n df = dd.read_csv(fn, blocksize=30)\n assert df.amount.compute().dtype == float", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_consistent_dtypes_2_test_consistent_dtypes_2.with_filetexts_foo_1_cs.assert_df_name_compute_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_consistent_dtypes_2_test_consistent_dtypes_2.with_filetexts_foo_1_cs.assert_df_name_compute_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 546, "end_line": 568, "span_ids": ["test_consistent_dtypes_2"], "tokens": 134}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_consistent_dtypes_2():\n text1 = normalize_text(\n \"\"\"\n name,amount\n Alice,100\n Bob,-200\n Charlie,300\n \"\"\"\n )\n\n text2 = normalize_text(\n \"\"\"\n name,amount\n 1,400\n 2,-500\n Frank,600\n \"\"\"\n )\n\n with filetexts({\"foo.1.csv\": text1, \"foo.2.csv\": text2}):\n df = dd.read_csv(\"foo.*.csv\", blocksize=25)\n assert df.name.dtype == object\n assert df.name.compute().dtype == object", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_categorical_dtypes_test_categorical_dtypes.with_filetexts_foo_1_cs.assert_sorted_res_fruit_c": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_categorical_dtypes_test_categorical_dtypes.with_filetexts_foo_1_cs.assert_sorted_res_fruit_c", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 571, "end_line": 598, "span_ids": ["test_categorical_dtypes"], "tokens": 185}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_categorical_dtypes():\n text1 = normalize_text(\n \"\"\"\n fruit,count\n apple,10\n apple,25\n pear,100\n orange,15\n \"\"\"\n )\n\n text2 = normalize_text(\n \"\"\"\n fruit,count\n apple,200\n banana,300\n orange,400\n banana,10\n \"\"\"\n )\n\n with filetexts({\"foo.1.csv\": text1, \"foo.2.csv\": text2}):\n df = dd.read_csv(\"foo.*.csv\", dtype={\"fruit\": \"category\"}, blocksize=25)\n assert df.fruit.dtype == \"category\"\n assert not has_known_categories(df.fruit)\n res = df.compute()\n assert res.fruit.dtype == \"category\"\n assert sorted(res.fruit.cat.categories) == [\"apple\", \"banana\", \"orange\", \"pear\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_categorical_known_test_categorical_known.with_filetexts_foo_1_cs.None_10": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_categorical_known_test_categorical_known.with_filetexts_foo_1_cs.None_10", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 601, "end_line": 661, "span_ids": ["test_categorical_known"], "tokens": 512}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_categorical_known():\n text1 = normalize_text(\n \"\"\"\n A,B\n a,a\n b,b\n a,a\n \"\"\"\n )\n text2 = normalize_text(\n \"\"\"\n A,B\n a,a\n b,b\n c,c\n \"\"\"\n )\n dtype = pd.api.types.CategoricalDtype([\"a\", \"b\", \"c\"], ordered=False)\n with filetexts({\"foo.1.csv\": text1, \"foo.2.csv\": text2}):\n result = dd.read_csv(\"foo.*.csv\", dtype={\"A\": \"category\", \"B\": \"category\"})\n assert result.A.cat.known is False\n assert result.B.cat.known is False\n expected = pd.DataFrame(\n {\n \"A\": pd.Categorical(\n [\"a\", \"b\", \"a\", \"a\", \"b\", \"c\"], categories=dtype.categories\n ),\n \"B\": pd.Categorical(\n [\"a\", \"b\", \"a\", \"a\", \"b\", \"c\"], categories=dtype.categories\n ),\n },\n index=[0, 1, 2, 0, 1, 2],\n )\n assert_eq(result, expected)\n\n # Specify a dtype\n result = dd.read_csv(\"foo.*.csv\", dtype={\"A\": dtype, \"B\": \"category\"})\n assert result.A.cat.known is True\n assert result.B.cat.known is False\n tm.assert_index_equal(result.A.cat.categories, dtype.categories)\n assert result.A.cat.ordered is False\n assert_eq(result, expected)\n\n # ordered\n dtype = pd.api.types.CategoricalDtype([\"a\", \"b\", \"c\"], ordered=True)\n result = dd.read_csv(\"foo.*.csv\", dtype={\"A\": dtype, \"B\": \"category\"})\n expected[\"A\"] = expected[\"A\"].cat.as_ordered()\n assert result.A.cat.known is True\n assert result.B.cat.known is False\n assert result.A.cat.ordered is True\n\n assert_eq(result, expected)\n\n # Specify \"unknown\" categories\n result = dd.read_csv(\n \"foo.*.csv\", dtype=pd.api.types.CategoricalDtype(ordered=False)\n )\n assert result.A.cat.known is False\n\n result = dd.read_csv(\"foo.*.csv\", dtype=\"category\")\n assert result.A.cat.known is False", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_compression_multiple_files_test_compression_multiple_files.with_tmpdir_as_tdir_.assert_len_df_compute_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_compression_multiple_files_test_compression_multiple_files.with_tmpdir_as_tdir_.assert_len_df_compute_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 664, "end_line": 678, "span_ids": ["test_compression_multiple_files"], "tokens": 131}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.slow\ndef test_compression_multiple_files():\n with tmpdir() as tdir:\n f = gzip.open(os.path.join(tdir, \"a.csv.gz\"), \"wb\")\n f.write(csv_text.encode())\n f.close()\n\n f = gzip.open(os.path.join(tdir, \"b.csv.gz\"), \"wb\")\n f.write(csv_text.encode())\n f.close()\n\n with pytest.warns(UserWarning):\n df = dd.read_csv(os.path.join(tdir, \"*.csv.gz\"), compression=\"gzip\")\n\n assert len(df.compute()) == (len(csv_text.split(\"\\n\")) - 1) * 2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_empty_csv_file_test_read_csv_sensitive_to_enforce.with_filetexts_csv_files_.assert_a__name_b__name": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_empty_csv_file_test_read_csv_sensitive_to_enforce.with_filetexts_csv_files_.assert_a__name_b__name", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 681, "end_line": 698, "span_ids": ["test_empty_csv_file", "test_read_csv_no_sample", "test_read_csv_sensitive_to_enforce"], "tokens": 167}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_empty_csv_file():\n with filetext(\"a,b\") as fn:\n df = dd.read_csv(fn, header=0)\n assert len(df.compute()) == 0\n assert list(df.columns) == [\"a\", \"b\"]\n\n\ndef test_read_csv_no_sample():\n with filetexts(csv_files, mode=\"b\") as fn:\n df = dd.read_csv(fn, sample=False)\n assert list(df.columns) == [\"name\", \"amount\", \"id\"]\n\n\ndef test_read_csv_sensitive_to_enforce():\n with filetexts(csv_files, mode=\"b\"):\n a = dd.read_csv(\"2014-01-*.csv\", enforce=True)\n b = dd.read_csv(\"2014-01-*.csv\", enforce=False)\n assert a._name != b._name", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_compression_test_read_csv_compression.with_filetexts_files2_mo.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_compression_test_read_csv_compression.with_filetexts_files2_mo.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 701, "end_line": 716, "span_ids": ["test_read_csv_compression"], "tokens": 169}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"fmt,blocksize\", fmt_bs)\ndef test_read_csv_compression(fmt, blocksize):\n if fmt not in compress:\n pytest.skip(\"compress function not provided for %s\" % fmt)\n files2 = valmap(compress[fmt], csv_files)\n with filetexts(files2, mode=\"b\"):\n if fmt and blocksize:\n with pytest.warns(UserWarning):\n df = dd.read_csv(\"2014-01-*.csv\", compression=fmt, blocksize=blocksize)\n else:\n df = dd.read_csv(\"2014-01-*.csv\", compression=fmt, blocksize=blocksize)\n assert_eq(\n df.compute(scheduler=\"sync\").reset_index(drop=True),\n expected.reset_index(drop=True),\n check_dtype=False,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_warn_non_seekable_files_test_warn_non_seekable_files.with_filetexts_files2_mo.with_pytest_raises_NotImp.with_pytest_warns_UserWar.df.dd_read_csv_2014_01_cs": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_warn_non_seekable_files_test_warn_non_seekable_files.with_filetexts_files2_mo.with_pytest_raises_NotImp.with_pytest_warns_UserWar.df.dd_read_csv_2014_01_cs", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 719, "end_line": 739, "span_ids": ["test_warn_non_seekable_files"], "tokens": 199}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skip\ndef test_warn_non_seekable_files():\n files2 = valmap(compress[\"gzip\"], csv_files)\n with filetexts(files2, mode=\"b\"):\n\n with pytest.warns(UserWarning) as w:\n df = dd.read_csv(\"2014-01-*.csv\", compression=\"gzip\")\n assert df.npartitions == 3\n\n assert len(w) == 1\n msg = str(w[0].message)\n assert \"gzip\" in msg\n assert \"blocksize=None\" in msg\n\n with pytest.warns(None) as w:\n df = dd.read_csv(\"2014-01-*.csv\", compression=\"gzip\", blocksize=None)\n assert len(w) == 0\n\n with pytest.raises(NotImplementedError):\n with pytest.warns(UserWarning): # needed for pytest\n df = dd.read_csv(\"2014-01-*.csv\", compression=\"foo\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_windows_line_terminator_test_windows_line_terminator.with_filetext_text_as_fn.assert_df_a_sum_compute": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_windows_line_terminator_test_windows_line_terminator.with_filetext_text_as_fn.assert_df_a_sum_compute", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 742, "end_line": 747, "span_ids": ["test_windows_line_terminator"], "tokens": 127}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_windows_line_terminator():\n text = \"a,b\\r\\n1,2\\r\\n2,3\\r\\n3,4\\r\\n4,5\\r\\n5,6\\r\\n6,7\"\n with filetext(text) as fn:\n df = dd.read_csv(fn, blocksize=5, lineterminator=\"\\r\\n\")\n assert df.b.sum().compute() == 2 + 3 + 4 + 5 + 6 + 7\n assert df.a.sum().compute() == 1 + 2 + 3 + 4 + 5 + 6", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_header_None_test_auto_blocksize_max64mb.assert_isinstance_blocksi": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_header_None_test_auto_blocksize_max64mb.assert_isinstance_blocksi", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 750, "end_line": 766, "span_ids": ["test_auto_blocksize", "test_header_None", "test_auto_blocksize_max64mb"], "tokens": 189}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_header_None():\n with filetexts({\".tmp.1.csv\": \"1,2\", \".tmp.2.csv\": \"\", \".tmp.3.csv\": \"3,4\"}):\n df = dd.read_csv(\".tmp.*.csv\", header=None)\n expected = pd.DataFrame({0: [1, 3], 1: [2, 4]})\n assert_eq(df.compute().reset_index(drop=True), expected)\n\n\ndef test_auto_blocksize():\n assert isinstance(auto_blocksize(3000, 15), int)\n assert auto_blocksize(3000, 3) == 100\n assert auto_blocksize(5000, 2) == 250\n\n\ndef test_auto_blocksize_max64mb():\n blocksize = auto_blocksize(1000000000000, 3)\n assert blocksize == int(64e6)\n assert isinstance(blocksize, int)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_auto_blocksize_csv_test_auto_blocksize_csv.with_filetexts_csv_files_.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_auto_blocksize_csv_test_auto_blocksize_csv.with_filetexts_csv_files_.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 769, "end_line": 780, "span_ids": ["test_auto_blocksize_csv"], "tokens": 137}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_auto_blocksize_csv(monkeypatch):\n psutil = pytest.importorskip(\"psutil\")\n total_memory = psutil.virtual_memory().total\n cpu_count = psutil.cpu_count()\n mock_read_bytes = mock.Mock(wraps=read_bytes)\n monkeypatch.setattr(dask.dataframe.io.csv, \"read_bytes\", mock_read_bytes)\n\n expected_block_size = auto_blocksize(total_memory, cpu_count)\n with filetexts(csv_files, mode=\"b\"):\n dd.read_csv(\"2014-01-01.csv\")\n assert mock_read_bytes.called\n assert mock_read_bytes.call_args[1][\"blocksize\"] == expected_block_size", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_head_partial_line_fix_test_head_partial_line_fix.with_filetexts_files_.assert_df_dtypes_i8_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_head_partial_line_fix_test_head_partial_line_fix.with_filetexts_files_.assert_df_dtypes_i8_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 783, "end_line": 798, "span_ids": ["test_head_partial_line_fix"], "tokens": 179}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_head_partial_line_fix():\n files = {\n \".overflow1.csv\": (\n \"a,b\\n0,'abcdefghijklmnopqrstuvwxyz'\\n1,'abcdefghijklmnopqrstuvwxyz'\"\n ),\n \".overflow2.csv\": \"a,b\\n111111,-11111\\n222222,-22222\\n333333,-33333\\n\",\n }\n with filetexts(files):\n # 64 byte file, 52 characters is mid-quote; this should not cause exception in head-handling code.\n dd.read_csv(\".overflow1.csv\", sample=52)\n\n # 35 characters is cuts off before the second number on the last line\n # Should sample to end of line, otherwise pandas will infer `b` to be\n # a float dtype\n df = dd.read_csv(\".overflow2.csv\", sample=35)\n assert (df.dtypes == \"i8\").all()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_raises_on_no_files_test_read_csv_of_modified_file_has_different_name.with_filetext_csv_text_a.assert_sorted_a_dask_key": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_raises_on_no_files_test_read_csv_of_modified_file_has_different_name.with_filetext_csv_text_a.assert_sorted_a_dask_key", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 801, "end_line": 847, "span_ids": ["test_multiple_read_csv_has_deterministic_name", "test_read_csv_of_modified_file_has_different_name", "test_csv_with_integer_names", "test_read_csv_raises_on_no_files", "test_read_csv_has_deterministic_name"], "tokens": 378}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_csv_raises_on_no_files():\n fn = \".not.a.real.file.csv\"\n try:\n dd.read_csv(fn)\n assert False\n except (OSError, IOError) as e:\n assert fn in str(e)\n\n\ndef test_read_csv_has_deterministic_name():\n with filetext(csv_text) as fn:\n a = dd.read_csv(fn)\n b = dd.read_csv(fn)\n assert a._name == b._name\n assert sorted(a.dask.keys(), key=str) == sorted(b.dask.keys(), key=str)\n assert isinstance(a._name, str)\n\n c = dd.read_csv(fn, skiprows=1, na_values=[0])\n assert a._name != c._name\n\n\ndef test_multiple_read_csv_has_deterministic_name():\n with filetexts({\"_foo.1.csv\": csv_text, \"_foo.2.csv\": csv_text}):\n a = dd.read_csv(\"_foo.*.csv\")\n b = dd.read_csv(\"_foo.*.csv\")\n\n assert sorted(a.dask.keys(), key=str) == sorted(b.dask.keys(), key=str)\n\n\ndef test_csv_with_integer_names():\n with filetext(\"alice,1\\nbob,2\") as fn:\n df = dd.read_csv(fn, header=None)\n assert list(df.columns) == [0, 1]\n\n\n@pytest.mark.slow\ndef test_read_csv_of_modified_file_has_different_name():\n with filetext(csv_text) as fn:\n sleep(1)\n a = dd.read_csv(fn)\n sleep(1)\n with open(fn, \"a\") as f:\n f.write(\"\\nGeorge,700\")\n os.fsync(f)\n b = dd.read_csv(fn)\n\n assert sorted(a.dask, key=str) != sorted(b.dask, key=str)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_late_dtypes_test_late_dtypes.date_msg._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_late_dtypes_test_late_dtypes.date_msg._", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 850, "end_line": 869, "span_ids": ["test_late_dtypes"], "tokens": 205}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_late_dtypes():\n text = \"numbers,names,more_numbers,integers,dates\\n\"\n for i in range(1000):\n text += \"1,,2,3,2017-10-31 00:00:00\\n\"\n text += \"1.5,bar,2.5,3,4998-01-01 00:00:00\\n\"\n\n date_msg = (\n \"\\n\"\n \"\\n\"\n \"-------------------------------------------------------------\\n\"\n \"\\n\"\n \"The following columns also failed to properly parse as dates:\\n\"\n \"\\n\"\n \"- dates\\n\"\n \"\\n\"\n \"This is usually due to an invalid value in that column. To\\n\"\n \"diagnose and fix it's recommended to drop these columns from the\\n\"\n \"`parse_dates` keyword, and manually convert them to dates later\\n\"\n \"using `dd.to_datetime`.\"\n )\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_late_dtypes.with_filetext_text_as_fn_test_late_dtypes.with_filetext_text_as_fn.assert_eq_res_sol_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_late_dtypes.with_filetext_text_as_fn_test_late_dtypes.with_filetext_text_as_fn.assert_eq_res_sol_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 871, "end_line": 965, "span_ids": ["test_late_dtypes"], "tokens": 880}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_late_dtypes():\n # ... other code\n\n with filetext(text) as fn:\n sol = pd.read_csv(fn)\n msg = (\n \"Mismatched dtypes found in `pd.read_csv`/`pd.read_table`.\\n\"\n \"\\n\"\n \"+--------------+---------+----------+\\n\"\n \"| Column | Found | Expected |\\n\"\n \"+--------------+---------+----------+\\n\"\n \"| more_numbers | float64 | int64 |\\n\"\n \"| names | object | float64 |\\n\"\n \"| numbers | float64 | int64 |\\n\"\n \"+--------------+---------+----------+\\n\"\n \"\\n\"\n \"- names\\n\"\n \" ValueError(.*)\\n\"\n \"\\n\"\n \"Usually this is due to dask's dtype inference failing, and\\n\"\n \"*may* be fixed by specifying dtypes manually by adding:\\n\"\n \"\\n\"\n \"dtype={'more_numbers': 'float64',\\n\"\n \" 'names': 'object',\\n\"\n \" 'numbers': 'float64'}\\n\"\n \"\\n\"\n \"to the call to `read_csv`/`read_table`.\"\n )\n\n with pytest.raises(ValueError) as e:\n dd.read_csv(fn, sample=50, parse_dates=[\"dates\"]).compute(scheduler=\"sync\")\n assert e.match(msg + date_msg)\n\n with pytest.raises(ValueError) as e:\n dd.read_csv(fn, sample=50).compute(scheduler=\"sync\")\n assert e.match(msg)\n\n msg = (\n \"Mismatched dtypes found in `pd.read_csv`/`pd.read_table`.\\n\"\n \"\\n\"\n \"+--------------+---------+----------+\\n\"\n \"| Column | Found | Expected |\\n\"\n \"+--------------+---------+----------+\\n\"\n \"| more_numbers | float64 | int64 |\\n\"\n \"| numbers | float64 | int64 |\\n\"\n \"+--------------+---------+----------+\\n\"\n \"\\n\"\n \"Usually this is due to dask's dtype inference failing, and\\n\"\n \"*may* be fixed by specifying dtypes manually by adding:\\n\"\n \"\\n\"\n \"dtype={'more_numbers': 'float64',\\n\"\n \" 'numbers': 'float64'}\\n\"\n \"\\n\"\n \"to the call to `read_csv`/`read_table`.\\n\"\n \"\\n\"\n \"Alternatively, provide `assume_missing=True` to interpret\\n\"\n \"all unspecified integer columns as floats.\"\n )\n\n with pytest.raises(ValueError) as e:\n dd.read_csv(fn, sample=50, dtype={\"names\": \"O\"}).compute(scheduler=\"sync\")\n assert str(e.value) == msg\n\n with pytest.raises(ValueError) as e:\n dd.read_csv(\n fn, sample=50, parse_dates=[\"dates\"], dtype={\"names\": \"O\"}\n ).compute(scheduler=\"sync\")\n assert str(e.value) == msg + date_msg\n\n msg = (\n \"Mismatched dtypes found in `pd.read_csv`/`pd.read_table`.\\n\"\n \"\\n\"\n \"The following columns failed to properly parse as dates:\\n\"\n \"\\n\"\n \"- dates\\n\"\n \"\\n\"\n \"This is usually due to an invalid value in that column. To\\n\"\n \"diagnose and fix it's recommended to drop these columns from the\\n\"\n \"`parse_dates` keyword, and manually convert them to dates later\\n\"\n \"using `dd.to_datetime`.\"\n )\n\n with pytest.raises(ValueError) as e:\n dd.read_csv(\n fn,\n sample=50,\n parse_dates=[\"dates\"],\n dtype={\"more_numbers\": float, \"names\": object, \"numbers\": float},\n ).compute(scheduler=\"sync\")\n assert str(e.value) == msg\n\n # Specifying dtypes works\n res = dd.read_csv(\n fn,\n sample=50,\n dtype={\"more_numbers\": float, \"names\": object, \"numbers\": float},\n )\n assert_eq(res, sol)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_assume_missing_test_assume_missing.None_3.assert_df_numbers_dtype_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_assume_missing_test_assume_missing.None_3.assert_df_numbers_dtype_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 968, "end_line": 1000, "span_ids": ["test_assume_missing"], "tokens": 301}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_assume_missing():\n text = \"numbers,names,more_numbers,integers\\n\"\n for i in range(1000):\n text += \"1,foo,2,3\\n\"\n text += \"1.5,bar,2.5,3\\n\"\n with filetext(text) as fn:\n sol = pd.read_csv(fn)\n\n # assume_missing affects all columns\n res = dd.read_csv(fn, sample=50, assume_missing=True)\n assert_eq(res, sol.astype({\"integers\": float}))\n\n # assume_missing doesn't override specified dtypes\n res = dd.read_csv(\n fn, sample=50, assume_missing=True, dtype={\"integers\": \"int64\"}\n )\n assert_eq(res, sol)\n\n # assume_missing works with dtype=None\n res = dd.read_csv(fn, sample=50, assume_missing=True, dtype=None)\n assert_eq(res, sol.astype({\"integers\": float}))\n\n text = \"numbers,integers\\n\"\n for i in range(1000):\n text += \"1,2\\n\"\n text += \"1.5,2\\n\"\n\n with filetext(text) as fn:\n sol = pd.read_csv(fn)\n\n # assume_missing ignored when all dtypes specifed\n df = dd.read_csv(fn, sample=30, dtype=\"int64\", assume_missing=True)\n assert df.numbers.dtype == \"int64\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_index_col_test_read_csv_with_datetime_index_partitions_one.with_filetext_timeseries_.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_index_col_test_read_csv_with_datetime_index_partitions_one.with_filetext_timeseries_.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 1003, "end_line": 1027, "span_ids": ["test_index_col", "test_read_csv_with_datetime_index_partitions_one"], "tokens": 231}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_index_col():\n with filetext(csv_text) as fn:\n try:\n dd.read_csv(fn, blocksize=30, index_col=\"name\")\n assert False\n except ValueError as e:\n assert \"set_index\" in str(e)\n\n\ndef test_read_csv_with_datetime_index_partitions_one():\n with filetext(timeseries) as fn:\n df = pd.read_csv(\n fn, index_col=0, header=0, usecols=[0, 4], parse_dates=[\"Date\"]\n )\n # blocksize set to explicitly set to single chunk\n ddf = dd.read_csv(\n fn, header=0, usecols=[0, 4], parse_dates=[\"Date\"], blocksize=10000000\n ).set_index(\"Date\")\n assert_eq(df, ddf)\n\n # because fn is so small, by default, this will only be one chunk\n ddf = dd.read_csv(fn, header=0, usecols=[0, 4], parse_dates=[\"Date\"]).set_index(\n \"Date\"\n )\n assert_eq(df, ddf)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_with_datetime_index_partitions_n_xfail_pandas_100.pytest_mark_xfail_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_with_datetime_index_partitions_n_xfail_pandas_100.pytest_mark_xfail_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 1030, "end_line": 1044, "span_ids": ["test_read_csv_with_datetime_index_partitions_n", "impl:37"], "tokens": 157}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_csv_with_datetime_index_partitions_n():\n with filetext(timeseries) as fn:\n df = pd.read_csv(\n fn, index_col=0, header=0, usecols=[0, 4], parse_dates=[\"Date\"]\n )\n # because fn is so small, by default, set chunksize small\n ddf = dd.read_csv(\n fn, header=0, usecols=[0, 4], parse_dates=[\"Date\"], blocksize=400\n ).set_index(\"Date\")\n assert_eq(df, ddf)\n\n\nxfail_pandas_100 = pytest.mark.xfail(\n dd._compat.PANDAS_GT_100, reason=\"https://github.com/dask/dask/issues/5787\"\n)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_encoding_gh601_test_encoding_gh601.with_tmpfile_csv_as_f.assert_eq_d_a_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_encoding_gh601_test_encoding_gh601.with_tmpfile_csv_as_f.assert_eq_d_a_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 1047, "end_line": 1069, "span_ids": ["test_encoding_gh601"], "tokens": 206}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"encoding\",\n [\n pytest.param(\"utf-16\", marks=xfail_pandas_100),\n pytest.param(\"utf-16-le\", marks=xfail_pandas_100),\n \"utf-16-be\",\n ],\n)\ndef test_encoding_gh601(encoding):\n ar = pd.Series(range(0, 100))\n br = ar % 7\n cr = br * 3.3\n dr = br / 1.9836\n test_df = pd.DataFrame({\"a\": ar, \"b\": br, \"c\": cr, \"d\": dr})\n\n with tmpfile(\".csv\") as fn:\n test_df.to_csv(fn, encoding=encoding, index=False)\n\n a = pd.read_csv(fn, encoding=encoding)\n d = dd.read_csv(fn, encoding=encoding, blocksize=1000)\n d = d.compute()\n d.index = range(len(d.index))\n assert_eq(d, a)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_header_issue_823_test_none_usecols.with_filetext_csv_text_a.assert_eq_df_pd_read_csv": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_header_issue_823_test_none_usecols.with_filetext_csv_text_a.assert_eq_df_pd_read_csv", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 1072, "end_line": 1085, "span_ids": ["test_read_csv_header_issue_823", "test_none_usecols"], "tokens": 140}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_csv_header_issue_823():\n text = \"\"\"a b c-d\\n1 2 3\\n4 5 6\"\"\".replace(\" \", \"\\t\")\n with filetext(text) as fn:\n df = dd.read_csv(fn, sep=\"\\t\")\n assert_eq(df, pd.read_csv(fn, sep=\"\\t\"))\n\n df = dd.read_csv(fn, delimiter=\"\\t\")\n assert_eq(df, pd.read_csv(fn, delimiter=\"\\t\"))\n\n\ndef test_none_usecols():\n with filetext(csv_text) as fn:\n df = dd.read_csv(fn, usecols=None)\n assert_eq(df, pd.read_csv(fn, usecols=None))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_parse_dates_multi_column_test_parse_dates_multi_column.with_filetext_pdmc_text_.assert_len_df_len_ddf": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_parse_dates_multi_column_test_parse_dates_multi_column.with_filetext_pdmc_text_.assert_len_df_len_ddf", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 1088, "end_line": 1113, "span_ids": ["test_parse_dates_multi_column"], "tokens": 278}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_parse_dates_multi_column():\n pdmc_text = normalize_text(\n \"\"\"\n ID,date,time\n 10,2003-11-04,180036\n 11,2003-11-05,125640\n 12,2003-11-01,2519\n 13,2003-10-22,142559\n 14,2003-10-24,163113\n 15,2003-10-20,170133\n 16,2003-11-11,160448\n 17,2003-11-03,171759\n 18,2003-11-07,190928\n 19,2003-10-21,84623\n 20,2003-10-25,192207\n 21,2003-11-13,180156\n 22,2003-11-15,131037\n \"\"\"\n )\n\n with filetext(pdmc_text) as fn:\n ddf = dd.read_csv(fn, parse_dates=[[\"date\", \"time\"]])\n df = pd.read_csv(fn, parse_dates=[[\"date\", \"time\"]])\n\n assert (df.columns == ddf.columns).all()\n assert len(df) == len(ddf)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_sep_test_robust_column_mismatch.with_filetexts_files_mod.assert_eq_ddf_ddf_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_sep_test_robust_column_mismatch.with_filetexts_files_mod.assert_eq_ddf_ddf_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 1116, "end_line": 1160, "span_ids": ["test_robust_column_mismatch", "test_read_csv_slash_r", "test_read_csv_singleton_dtype", "test_read_csv_sep"], "tokens": 356}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_csv_sep():\n sep_text = normalize_text(\n \"\"\"\n name###amount\n alice###100\n bob###200\n charlie###300\"\"\"\n )\n\n with filetext(sep_text) as fn:\n ddf = dd.read_csv(fn, sep=\"###\", engine=\"python\")\n df = pd.read_csv(fn, sep=\"###\", engine=\"python\")\n\n assert (df.columns == ddf.columns).all()\n assert len(df) == len(ddf)\n\n\ndef test_read_csv_slash_r():\n data = b\"0,my\\n1,data\\n\" * 1000 + b\"2,foo\\rbar\"\n with filetext(data, mode=\"wb\") as fn:\n dd.read_csv(\n fn,\n header=None,\n sep=\",\",\n lineterminator=\"\\n\",\n names=[\"a\", \"b\"],\n blocksize=200,\n ).compute(scheduler=\"sync\")\n\n\ndef test_read_csv_singleton_dtype():\n data = b\"a,b\\n1,2\\n3,4\\n5,6\"\n with filetext(data, mode=\"wb\") as fn:\n assert_eq(pd.read_csv(fn, dtype=float), dd.read_csv(fn, dtype=float))\n\n\ndef test_robust_column_mismatch():\n files = csv_files.copy()\n k = sorted(files)[-1]\n files[k] = files[k].replace(b\"name\", b\"Name\")\n with filetexts(files, mode=\"b\"):\n ddf = dd.read_csv(\"2014-01-*.csv\")\n df = pd.read_csv(\"2014-01-01.csv\")\n assert (df.columns == ddf.columns).all()\n assert_eq(ddf, ddf)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_error_if_sample_is_too_small_test_error_if_sample_is_too_small.None_1.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_error_if_sample_is_too_small_test_error_if_sample_is_too_small.None_1.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 1163, "end_line": 1189, "span_ids": ["test_error_if_sample_is_too_small"], "tokens": 258}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_error_if_sample_is_too_small():\n text = \"AAAAA,BBBBB,CCCCC,DDDDD,EEEEE\\n1,2,3,4,5\\n6,7,8,9,10\\n11,12,13,14,15\"\n with filetext(text) as fn:\n # Sample size stops mid header row\n sample = 20\n with pytest.raises(ValueError):\n dd.read_csv(fn, sample=sample)\n\n # Saying no header means this is fine\n assert_eq(\n dd.read_csv(fn, sample=sample, header=None), pd.read_csv(fn, header=None)\n )\n\n skiptext = \"# skip\\n# these\\n# lines\\n\"\n\n text = skiptext + text\n with filetext(text) as fn:\n # Sample size stops mid header row\n sample = 20 + len(skiptext)\n with pytest.raises(ValueError):\n dd.read_csv(fn, sample=sample, skiprows=3)\n\n # Saying no header means this is fine\n assert_eq(\n dd.read_csv(fn, sample=sample, header=None, skiprows=3),\n pd.read_csv(fn, header=None, skiprows=3),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_names_not_none_test_read_csv_names_not_none.with_filetext_text_as_fn.assert_eq_df_ddf_check_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_names_not_none_test_read_csv_names_not_none.with_filetext_text_as_fn.assert_eq_df_ddf_check_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 1192, "end_line": 1205, "span_ids": ["test_read_csv_names_not_none"], "tokens": 118}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_csv_names_not_none():\n text = (\n \"Alice,100\\n\"\n \"Bob,-200\\n\"\n \"Charlie,300\\n\"\n \"Dennis,400\\n\"\n \"Edith,-500\\n\"\n \"Frank,600\\n\"\n )\n names = [\"name\", \"amount\"]\n with filetext(text) as fn:\n ddf = dd.read_csv(fn, names=names, blocksize=16)\n df = pd.read_csv(fn, names=names)\n assert_eq(df, ddf, check_index=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_None_6_test_to_csv.for_npartitions_in_1_2_.None_2.assert_eq_result_df_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_None_6_test_to_csv.for_npartitions_in_1_2_.None_2.assert_eq_result_df_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 1208, "end_line": 1233, "span_ids": ["test_to_csv", "test_read_csv_names_not_none"], "tokens": 238}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "############\n# to_csv #\n############\n\n\ndef test_to_csv():\n df = pd.DataFrame({\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [1, 2, 3, 4]})\n\n for npartitions in [1, 2]:\n a = dd.from_pandas(df, npartitions)\n with tmpdir() as dn:\n a.to_csv(dn, index=False)\n result = dd.read_csv(os.path.join(dn, \"*\")).compute().reset_index(drop=True)\n assert_eq(result, df)\n\n with tmpdir() as dn:\n r = a.to_csv(dn, index=False, compute=False)\n dask.compute(*r, scheduler=\"sync\")\n result = dd.read_csv(os.path.join(dn, \"*\")).compute().reset_index(drop=True)\n assert_eq(result, df)\n\n with tmpdir() as dn:\n fn = os.path.join(dn, \"data_*.csv\")\n a.to_csv(fn, index=False)\n result = dd.read_csv(fn).compute().reset_index(drop=True)\n assert_eq(result, df)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_multiple_files_cornercases_test_to_csv_multiple_files_cornercases.None_3.assert_eq_result_df16_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_multiple_files_cornercases_test_to_csv_multiple_files_cornercases.None_3.assert_eq_result_df16_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 1236, "end_line": 1290, "span_ids": ["test_to_csv_multiple_files_cornercases"], "tokens": 482}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_csv_multiple_files_cornercases():\n df = pd.DataFrame({\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [1, 2, 3, 4]})\n a = dd.from_pandas(df, 2)\n with tmpdir() as dn:\n with pytest.raises(ValueError):\n fn = os.path.join(dn, \"data_*_*.csv\")\n a.to_csv(fn)\n\n df16 = pd.DataFrame(\n {\n \"x\": [\n \"a\",\n \"b\",\n \"c\",\n \"d\",\n \"e\",\n \"f\",\n \"g\",\n \"h\",\n \"i\",\n \"j\",\n \"k\",\n \"l\",\n \"m\",\n \"n\",\n \"o\",\n \"p\",\n ],\n \"y\": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],\n }\n )\n a = dd.from_pandas(df16, 16)\n with tmpdir() as dn:\n fn = os.path.join(dn, \"data_*.csv\")\n a.to_csv(fn, index=False)\n result = dd.read_csv(fn).compute().reset_index(drop=True)\n assert_eq(result, df16)\n\n # test handling existing files when links are optimized out\n a = dd.from_pandas(df, 2)\n with tmpdir() as dn:\n a.to_csv(dn, index=False)\n fn = os.path.join(dn, \"data_*.csv\")\n a.to_csv(fn, mode=\"w\", index=False)\n result = dd.read_csv(fn).compute().reset_index(drop=True)\n assert_eq(result, df)\n\n # test handling existing files when links are optimized out\n a = dd.from_pandas(df16, 16)\n with tmpdir() as dn:\n a.to_csv(dn, index=False)\n fn = os.path.join(dn, \"data_*.csv\")\n a.to_csv(fn, mode=\"w\", index=False)\n result = dd.read_csv(fn).compute().reset_index(drop=True)\n assert_eq(result, df16)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_single_csv_test_to_single_csv.for_npartitions_in_1_2_.None_1.assert_eq_result_df_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_single_csv_test_to_single_csv.for_npartitions_in_1_2_.None_1.assert_eq_result_df_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 1293, "end_line": 1309, "span_ids": ["test_to_single_csv"], "tokens": 194}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_single_csv():\n df = pd.DataFrame({\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [1, 2, 3, 4]})\n\n for npartitions in [1, 2]:\n a = dd.from_pandas(df, npartitions)\n with tmpdir() as dn:\n fn = os.path.join(dn, \"test.csv\")\n a.to_csv(fn, index=False, single_file=True)\n result = dd.read_csv(fn).compute().reset_index(drop=True)\n assert_eq(result, df)\n\n with tmpdir() as dn:\n fn = os.path.join(dn, \"test.csv\")\n r = a.to_csv(fn, index=False, compute=False, single_file=True)\n dask.compute(r, scheduler=\"sync\")\n result = dd.read_csv(fn).compute().reset_index(drop=True)\n assert_eq(result, df)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_single_csv_with_name_function_test_to_single_csv_with_name_function.with_tmpdir_as_dn_.with_pytest_raises_.a_to_csv_fn_name_functio": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_single_csv_with_name_function_test_to_single_csv_with_name_function.with_tmpdir_as_dn_.with_pytest_raises_.a_to_csv_fn_name_functio", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 1312, "end_line": 1321, "span_ids": ["test_to_single_csv_with_name_function"], "tokens": 121}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_single_csv_with_name_function():\n df = pd.DataFrame({\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [1, 2, 3, 4]})\n a = dd.from_pandas(df, 1)\n with tmpdir() as dn:\n fn = os.path.join(dn, \"test.csv\")\n with pytest.raises(\n ValueError,\n match=\"name_function is not supported under the single file mode\",\n ):\n a.to_csv(fn, name_function=lambda x: x, index=False, single_file=True)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_single_csv_with_header_first_partition_only_test_to_single_csv_with_header_first_partition_only.with_tmpdir_as_dn_.with_pytest_raises_.a_to_csv_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_single_csv_with_header_first_partition_only_test_to_single_csv_with_header_first_partition_only.with_tmpdir_as_dn_.with_pytest_raises_.a_to_csv_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 1324, "end_line": 1335, "span_ids": ["test_to_single_csv_with_header_first_partition_only"], "tokens": 128}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_single_csv_with_header_first_partition_only():\n df = pd.DataFrame({\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [1, 2, 3, 4]})\n a = dd.from_pandas(df, 1)\n with tmpdir() as dn:\n fn = os.path.join(dn, \"test.csv\")\n with pytest.raises(\n ValueError,\n match=\"header_first_partition_only cannot be False in the single file mode.\",\n ):\n a.to_csv(\n fn, index=False, header_first_partition_only=False, single_file=True\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_single_csv_gzip_test_to_single_csv_gzip.for_npartitions_in_1_2_.with_tmpdir_as_dn_.assert_eq_result_df_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_single_csv_gzip_test_to_single_csv_gzip.for_npartitions_in_1_2_.with_tmpdir_as_dn_.assert_eq_result_df_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 1338, "end_line": 1347, "span_ids": ["test_to_single_csv_gzip"], "tokens": 132}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_single_csv_gzip():\n df = pd.DataFrame({\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [1, 2, 3, 4]})\n\n for npartitions in [1, 2]:\n a = dd.from_pandas(df, npartitions)\n with tmpdir() as dn:\n fn = os.path.join(dn, \"test.csv.gz\")\n a.to_csv(fn, index=False, compression=\"gzip\", single_file=True)\n result = pd.read_csv(fn, compression=\"gzip\").reset_index(drop=True)\n assert_eq(result, df)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_gzip_test_to_csv_gzip.for_npartitions_in_1_2_.with_tmpfile_csv_as_fn.tm_assert_frame_equal_res": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_gzip_test_to_csv_gzip.for_npartitions_in_1_2_.with_tmpfile_csv_as_fn.tm_assert_frame_equal_res", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 1350, "end_line": 1361, "span_ids": ["test_to_csv_gzip"], "tokens": 151}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.xfail(reason=\"to_csv does not support compression\")\ndef test_to_csv_gzip():\n df = pd.DataFrame(\n {\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [1, 2, 3, 4]}, index=[1.0, 2.0, 3.0, 4.0]\n )\n\n for npartitions in [1, 2]:\n a = dd.from_pandas(df, npartitions)\n with tmpfile(\"csv\") as fn:\n a.to_csv(fn, compression=\"gzip\")\n result = pd.read_csv(fn, index_col=0, compression=\"gzip\")\n tm.assert_frame_equal(result, df)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_nodir_test_to_csv_nodir.assert_result_x_values_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_nodir_test_to_csv_nodir.assert_result_x_values_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 1364, "end_line": 1376, "span_ids": ["test_to_csv_nodir"], "tokens": 183}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_csv_nodir():\n # See #6062 https://github.com/intake/filesystem_spec/pull/271 and\n df0 = pd.DataFrame(\n {\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [1, 2, 3, 4]}, index=[1.0, 2.0, 3.0, 4.0]\n )\n df = dd.from_pandas(df0, npartitions=2)\n with tmpdir() as dir:\n dir0 = os.path.join(str(dir), \"createme\")\n df.to_csv(dir0)\n assert \"createme\" in os.listdir(dir)\n assert os.listdir(dir0)\n result = dd.read_csv(os.path.join(dir0, \"*\")).compute()\n assert (result.x.values == df0.x.values).all()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_simple_test_to_csv_simple.assert_result_x_values_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_simple_test_to_csv_simple.assert_result_x_values_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 1379, "end_line": 1389, "span_ids": ["test_to_csv_simple"], "tokens": 138}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_csv_simple():\n df0 = pd.DataFrame(\n {\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [1, 2, 3, 4]}, index=[1.0, 2.0, 3.0, 4.0]\n )\n df = dd.from_pandas(df0, npartitions=2)\n with tmpdir() as dir:\n dir = str(dir)\n df.to_csv(dir)\n assert os.listdir(dir)\n result = dd.read_csv(os.path.join(dir, \"*\")).compute()\n assert (result.x.values == df0.x.values).all()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_series_test_to_csv_series.assert_result_x_df0_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_series_test_to_csv_series.assert_result_x_df0_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 1392, "end_line": 1400, "span_ids": ["test_to_csv_series"], "tokens": 123}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_csv_series():\n df0 = pd.Series([\"a\", \"b\", \"c\", \"d\"], index=[1.0, 2.0, 3.0, 4.0])\n df = dd.from_pandas(df0, npartitions=2)\n with tmpdir() as dir:\n dir = str(dir)\n df.to_csv(dir, header=False)\n assert os.listdir(dir)\n result = dd.read_csv(os.path.join(dir, \"*\"), header=None, names=[\"x\"]).compute()\n assert (result.x == df0).all()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_with_get_test_to_csv_with_get.with_tmpdir_as_dn_.assert_eq_result_df_che": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_with_get_test_to_csv_with_get.with_tmpdir_as_dn_.assert_eq_result_df_che", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 1403, "end_line": 1419, "span_ids": ["test_to_csv_with_get"], "tokens": 163}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_csv_with_get():\n from dask.multiprocessing import get as mp_get\n\n flag = [False]\n\n def my_get(*args, **kwargs):\n flag[0] = True\n return mp_get(*args, **kwargs)\n\n df = pd.DataFrame({\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [1, 2, 3, 4]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n with tmpdir() as dn:\n ddf.to_csv(dn, index=False, compute_kwargs={\"scheduler\": my_get})\n assert flag[0]\n result = dd.read_csv(os.path.join(dn, \"*\"))\n assert_eq(result, df, check_index=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_warns_using_scheduler_argument_test_to_csv_warns_using_scheduler_argument.with_tmpdir_as_dn_.with_pytest_warns_FutureW.ddf_to_csv_dn_index_Fals": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_warns_using_scheduler_argument_test_to_csv_warns_using_scheduler_argument.with_tmpdir_as_dn_.with_pytest_warns_FutureW.ddf_to_csv_dn_index_Fals", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 1422, "end_line": 1433, "span_ids": ["test_to_csv_warns_using_scheduler_argument"], "tokens": 126}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_csv_warns_using_scheduler_argument():\n from dask.multiprocessing import get as mp_get\n\n df = pd.DataFrame({\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [1, 2, 3, 4]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n def my_get(*args, **kwargs):\n return mp_get(*args, **kwargs)\n\n with tmpdir() as dn:\n with pytest.warns(FutureWarning):\n ddf.to_csv(dn, index=False, scheduler=my_get)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_errors_using_multiple_scheduler_args_test_to_csv_errors_using_multiple_scheduler_args.with_tmpdir_as_dn_.with_pytest_raises_ValueE.ddf_to_csv_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_errors_using_multiple_scheduler_args_test_to_csv_errors_using_multiple_scheduler_args.with_tmpdir_as_dn_.with_pytest_raises_ValueE.ddf_to_csv_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 1436, "end_line": 1449, "span_ids": ["test_to_csv_errors_using_multiple_scheduler_args"], "tokens": 142}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_csv_errors_using_multiple_scheduler_args():\n from dask.multiprocessing import get as mp_get\n\n df = pd.DataFrame({\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [1, 2, 3, 4]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n def my_get(*args, **kwargs):\n return mp_get(*args, **kwargs)\n\n with tmpdir() as dn:\n with pytest.raises(ValueError) and pytest.warns(FutureWarning):\n ddf.to_csv(\n dn, index=False, scheduler=my_get, compute_kwargs={\"scheduler\": my_get}\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_keeps_all_non_scheduler_compute_kwargs_test_to_csv_paths.os_remove_foo1_csv_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_keeps_all_non_scheduler_compute_kwargs_test_to_csv_paths.os_remove_foo1_csv_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 1452, "end_line": 1478, "span_ids": ["test_to_csv_paths", "test_to_csv_keeps_all_non_scheduler_compute_kwargs"], "tokens": 236}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_csv_keeps_all_non_scheduler_compute_kwargs():\n from dask.multiprocessing import get as mp_get\n\n def my_get(*args, **kwargs):\n assert kwargs[\"test_kwargs_passed\"] == \"foobar\"\n return mp_get(*args, **kwargs)\n\n df = pd.DataFrame({\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [1, 2, 3, 4]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n with tmpdir() as dn:\n ddf.to_csv(\n dn,\n index=False,\n compute_kwargs={\"scheduler\": my_get, \"test_kwargs_passed\": \"foobar\"},\n )\n\n\ndef test_to_csv_paths():\n df = pd.DataFrame({\"A\": range(10)})\n ddf = dd.from_pandas(df, npartitions=2)\n paths = ddf.to_csv(\"foo*.csv\")\n assert paths[0].endswith(\"foo0.csv\")\n assert paths[1].endswith(\"foo1.csv\")\n\n os.remove(\"foo0.csv\")\n os.remove(\"foo1.csv\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_header_empty_dataframe_test_to_csv_header_empty_dataframe.with_tmpdir_as_dn_.os_remove_filename_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_header_empty_dataframe_test_to_csv_header_empty_dataframe.with_tmpdir_as_dn_.os_remove_filename_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 1481, "end_line": 1493, "span_ids": ["test_to_csv_header_empty_dataframe"], "tokens": 157}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"header, expected\", [(False, \"\"), (True, \"x,y\\n\")])\ndef test_to_csv_header_empty_dataframe(header, expected):\n dfe = pd.DataFrame({\"x\": [], \"y\": []})\n ddfe = dd.from_pandas(dfe, npartitions=1)\n\n with tmpdir() as dn:\n ddfe.to_csv(os.path.join(dn, \"fooe*.csv\"), index=False, header=header)\n assert not os.path.exists(os.path.join(dn, \"fooe1.csv\"))\n filename = os.path.join(dn, \"fooe0.csv\")\n with open(filename, \"r\") as fp:\n line = fp.readline()\n assert line == expected\n os.remove(filename)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_header_test_to_csv_header.with_tmpdir_as_dn_.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_header_test_to_csv_header.with_tmpdir_as_dn_.None_2", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 1496, "end_line": 1533, "span_ids": ["test_to_csv_header"], "tokens": 386}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"header,header_first_partition_only,expected_first,expected_next\",\n [\n (False, False, \"a,1\\n\", \"d,4\\n\"),\n (True, False, \"x,y\\n\", \"x,y\\n\"),\n (False, True, \"a,1\\n\", \"d,4\\n\"),\n (True, True, \"x,y\\n\", \"d,4\\n\"),\n ([\"aa\", \"bb\"], False, \"aa,bb\\n\", \"aa,bb\\n\"),\n ([\"aa\", \"bb\"], True, \"aa,bb\\n\", \"d,4\\n\"),\n ],\n)\ndef test_to_csv_header(\n header, header_first_partition_only, expected_first, expected_next\n):\n partition_count = 2\n df = pd.DataFrame({\"x\": [\"a\", \"b\", \"c\", \"d\", \"e\", \"f\"], \"y\": [1, 2, 3, 4, 5, 6]})\n ddf = dd.from_pandas(df, npartitions=partition_count)\n\n with tmpdir() as dn:\n # Test NO header case\n # (header=False, header_first_chunk_only not passed)\n ddf.to_csv(\n os.path.join(dn, \"fooa*.csv\"),\n index=False,\n header=header,\n header_first_partition_only=header_first_partition_only,\n )\n filename = os.path.join(dn, \"fooa0.csv\")\n with open(filename, \"r\") as fp:\n line = fp.readline()\n assert line == expected_first\n os.remove(filename)\n\n filename = os.path.join(dn, \"fooa1.csv\")\n with open(filename, \"r\") as fp:\n line = fp.readline()\n assert line == expected_next\n os.remove(filename)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_line_ending_test_to_csv_line_ending.assert_raw_in_expected": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_line_ending_test_to_csv_line_ending.assert_raw_in_expected", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 1536, "end_line": 1552, "span_ids": ["test_to_csv_line_ending"], "tokens": 225}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_csv_line_ending():\n df = pd.DataFrame({\"x\": [0]})\n ddf = dd.from_pandas(df, npartitions=1)\n expected = {b\"0\\r\\n\", b\"0\\n\"} # either/or\n # For comparison...\n # unexpected = {b'0\\r\\r\\n'}\n # This test addresses GH4809, and checks that only (at most) one\n # '\\r' character is written per line when writing to csv.\n # In case it's correct (on UNIX) to have no '\\r' at all, this test\n # considers either '\\r\\n' or '\\n' as appropriate line endings,\n # but not '\\r\\r\\n'.\n with tmpdir() as dn:\n ddf.to_csv(os.path.join(dn, \"foo*.csv\"), header=False, index=False)\n filename = os.path.join(dn, \"foo0.csv\")\n with open(filename, \"rb\") as f:\n raw = f.read()\n assert raw in expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_block_mask_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_block_mask_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 1555, "end_line": 1590, "span_ids": ["test_block_mask", "test_reading_empty_csv_files_with_path"], "tokens": 285}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"block_lists\",\n [\n [[1, 2], [3], [4, 5, 6]],\n [],\n [[], [], [1], [], [1]],\n [list(range(i)) for i in range(10)],\n ],\n)\ndef test_block_mask(block_lists):\n mask = list(block_mask(block_lists))\n assert len(mask) == len(list(flatten(block_lists)))\n\n\ndef test_reading_empty_csv_files_with_path():\n with tmpdir() as tdir:\n for k, content in enumerate([\"0, 1, 2\", \"\", \"6, 7, 8\"]):\n with open(os.path.join(tdir, str(k) + \".csv\"), \"w\") as file:\n file.write(content)\n result = dd.read_csv(\n os.path.join(tdir, \"*.csv\"),\n include_path_column=True,\n converters={\"path\": parse_filename},\n names=[\"A\", \"B\", \"C\"],\n ).compute()\n df = pd.DataFrame(\n {\n \"A\": [0, 6],\n \"B\": [1, 7],\n \"C\": [2, 8],\n \"path\": [\"0.csv\", \"2.csv\"],\n }\n )\n df[\"path\"] = df[\"path\"].astype(\"category\")\n assert_eq(result, df, check_index=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_demo.py_pd_test_make_timeseries.assert_a__name_e__name": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_demo.py_pd_test_make_timeseries.assert_a__name_e__name", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_demo.py", "file_name": "test_demo.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 72, "span_ids": ["imports", "test_make_timeseries"], "tokens": 596}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pandas as pd\nimport pytest\n\nimport dask\nimport dask.dataframe as dd\nfrom dask.dataframe._compat import tm\nfrom dask.dataframe.utils import assert_eq\n\n\ndef test_make_timeseries():\n df = dd.demo.make_timeseries(\n \"2000\", \"2015\", {\"A\": float, \"B\": int, \"C\": str}, freq=\"2D\", partition_freq=\"6M\"\n )\n\n assert df.divisions[0] == pd.Timestamp(\"2000-01-31\", freq=\"6M\")\n assert df.divisions[-1] == pd.Timestamp(\"2014-07-31\", freq=\"6M\")\n tm.assert_index_equal(df.columns, pd.Index([\"A\", \"B\", \"C\"]))\n assert df[\"A\"].head().dtype == float\n assert df[\"B\"].head().dtype == int\n assert df[\"C\"].head().dtype == object\n assert df.index.name == \"timestamp\"\n assert df.head().index.name == df.index.name\n assert df.divisions == tuple(pd.date_range(start=\"2000\", end=\"2015\", freq=\"6M\"))\n\n tm.assert_frame_equal(df.head(), df.head())\n\n a = dd.demo.make_timeseries(\n \"2000\",\n \"2015\",\n {\"A\": float, \"B\": int, \"C\": str},\n freq=\"2D\",\n partition_freq=\"6M\",\n seed=123,\n )\n b = dd.demo.make_timeseries(\n \"2000\",\n \"2015\",\n {\"A\": float, \"B\": int, \"C\": str},\n freq=\"2D\",\n partition_freq=\"6M\",\n seed=123,\n )\n c = dd.demo.make_timeseries(\n \"2000\",\n \"2015\",\n {\"A\": float, \"B\": int, \"C\": str},\n freq=\"2D\",\n partition_freq=\"6M\",\n seed=456,\n )\n d = dd.demo.make_timeseries(\n \"2000\",\n \"2015\",\n {\"A\": float, \"B\": int, \"C\": str},\n freq=\"2D\",\n partition_freq=\"3M\",\n seed=123,\n )\n e = dd.demo.make_timeseries(\n \"2000\",\n \"2015\",\n {\"A\": float, \"B\": int, \"C\": str},\n freq=\"1D\",\n partition_freq=\"6M\",\n seed=123,\n )\n tm.assert_frame_equal(a.head(), b.head())\n assert not (a.head(10) == c.head(10)).all().all()\n assert a._name == b._name\n assert a._name != c._name\n assert a._name != d._name\n assert a._name != e._name", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_demo.py_test_make_timeseries_no_args_test_daily_stock.assert_eq_df_df_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_demo.py_test_make_timeseries_no_args_test_daily_stock.assert_eq_df_df_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_demo.py", "file_name": "test_demo.py", "file_type": "text/x-python", "category": "test", "start_line": 75, "end_line": 100, "span_ids": ["test_daily_stock", "test_make_timeseries_no_args", "test_no_overlaps"], "tokens": 231}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_make_timeseries_no_args():\n df = dd.demo.make_timeseries()\n assert 1 < df.npartitions < 1000\n assert len(df.columns) > 1\n assert len(set(df.dtypes)) > 1\n\n\ndef test_no_overlaps():\n df = dd.demo.make_timeseries(\n \"2000\", \"2001\", {\"A\": float}, freq=\"3H\", partition_freq=\"3M\"\n )\n\n assert all(\n df.get_partition(i).index.max().compute()\n < df.get_partition(i + 1).index.min().compute()\n for i in range(df.npartitions - 2)\n )\n\n\n@pytest.mark.network\ndef test_daily_stock():\n pytest.importorskip(\"pandas_datareader\", minversion=\"0.8.0\")\n df = dd.demo.daily_stock(\"GOOG\", start=\"2010-01-01\", stop=\"2010-01-30\", freq=\"1h\")\n assert isinstance(df, dd.DataFrame)\n assert 10 < df.npartitions < 31\n assert_eq(df, df)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_demo.py_test_make_timeseries_keywords_test_make_timeseries_keywords.assert_1_bb_100": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_demo.py_test_make_timeseries_keywords_test_make_timeseries_keywords.assert_1_bb_100", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_demo.py", "file_name": "test_demo.py", "file_type": "text/x-python", "category": "test", "start_line": 103, "end_line": 119, "span_ids": ["test_make_timeseries_keywords"], "tokens": 136}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_make_timeseries_keywords():\n df = dd.demo.make_timeseries(\n \"2000\",\n \"2001\",\n {\"A\": int, \"B\": int, \"C\": str},\n freq=\"1D\",\n partition_freq=\"6M\",\n A_lam=1000000,\n B_lam=2,\n )\n a_cardinality = df.A.nunique()\n b_cardinality = df.B.nunique()\n\n aa, bb = dask.compute(a_cardinality, b_cardinality, scheduler=\"single-threaded\")\n\n assert 100 < aa <= 10000000\n assert 1 < bb <= 100", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_demo.py_test_make_timeseries_fancy_keywords_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_demo.py_test_make_timeseries_fancy_keywords_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_demo.py", "file_name": "test_demo.py", "file_type": "text/x-python", "category": "test", "start_line": 122, "end_line": 139, "span_ids": ["test_make_timeseries_fancy_keywords"], "tokens": 143}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_make_timeseries_fancy_keywords():\n df = dd.demo.make_timeseries(\n \"2000\",\n \"2001\",\n {\"A_B\": int, \"B_\": int, \"C\": str},\n freq=\"1D\",\n partition_freq=\"6M\",\n A_B_lam=1000000,\n B__lam=2,\n )\n a_cardinality = df.A_B.nunique()\n b_cardinality = df.B_.nunique()\n\n aa, bb = dask.compute(a_cardinality, b_cardinality, scheduler=\"single-threaded\")\n\n assert 100 < aa <= 10000000\n assert 1 < bb <= 100", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_from_distutils_version_im_test_to_hdf.None_3.tm_assert_frame_equal_df_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_from_distutils_version_im_test_to_hdf.None_3.tm_assert_frame_equal_df_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_hdf.py", "file_name": "test_hdf.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 45, "span_ids": ["imports", "test_to_hdf"], "tokens": 366}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from distutils.version import LooseVersion\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nimport os\nfrom time import sleep\nimport pathlib\n\nimport dask\nimport dask.dataframe as dd\nfrom dask.dataframe._compat import tm\nfrom dask.utils import tmpfile, tmpdir, dependency_depth\nfrom dask.dataframe.utils import assert_eq\n\n\ndef test_to_hdf():\n pytest.importorskip(\"tables\")\n df = pd.DataFrame(\n {\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [1, 2, 3, 4]}, index=[1.0, 2.0, 3.0, 4.0]\n )\n a = dd.from_pandas(df, 2)\n\n with tmpfile(\"h5\") as fn:\n a.to_hdf(fn, \"/data\")\n out = pd.read_hdf(fn, \"/data\")\n tm.assert_frame_equal(df, out[:])\n\n with tmpfile(\"h5\") as fn:\n a.x.to_hdf(fn, \"/data\")\n out = pd.read_hdf(fn, \"/data\")\n tm.assert_series_equal(df.x, out[:])\n\n a = dd.from_pandas(df, 1)\n with tmpfile(\"h5\") as fn:\n a.to_hdf(fn, \"/data\")\n out = pd.read_hdf(fn, \"/data\")\n tm.assert_frame_equal(df, out[:])\n\n # test compute = False\n with tmpfile(\"h5\") as fn:\n r = a.to_hdf(fn, \"/data\", compute=False)\n r.compute()\n out = pd.read_hdf(fn, \"/data\")\n tm.assert_frame_equal(df, out[:])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_hdf_multiple_nodes_test_to_hdf_multiple_nodes.None_3.with_pd_HDFStore_fn_as_h.assert_eq_df16_out_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_hdf_multiple_nodes_test_to_hdf_multiple_nodes.None_3.with_pd_HDFStore_fn_as_h.assert_eq_df16_out_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_hdf.py", "file_name": "test_hdf.py", "file_type": "text/x-python", "category": "test", "start_line": 48, "end_line": 125, "span_ids": ["test_to_hdf_multiple_nodes"], "tokens": 625}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_hdf_multiple_nodes():\n pytest.importorskip(\"tables\")\n df = pd.DataFrame(\n {\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [1, 2, 3, 4]}, index=[1.0, 2.0, 3.0, 4.0]\n )\n a = dd.from_pandas(df, 2)\n df16 = pd.DataFrame(\n {\n \"x\": [\n \"a\",\n \"b\",\n \"c\",\n \"d\",\n \"e\",\n \"f\",\n \"g\",\n \"h\",\n \"i\",\n \"j\",\n \"k\",\n \"l\",\n \"m\",\n \"n\",\n \"o\",\n \"p\",\n ],\n \"y\": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],\n },\n index=[\n 1.0,\n 2.0,\n 3.0,\n 4.0,\n 5.0,\n 6.0,\n 7.0,\n 8.0,\n 9.0,\n 10.0,\n 11.0,\n 12.0,\n 13.0,\n 14.0,\n 15.0,\n 16.0,\n ],\n )\n b = dd.from_pandas(df16, 16)\n\n # saving to multiple nodes\n with tmpfile(\"h5\") as fn:\n a.to_hdf(fn, \"/data*\")\n out = dd.read_hdf(fn, \"/data*\")\n assert_eq(df, out)\n\n # saving to multiple nodes making sure order is kept\n with tmpfile(\"h5\") as fn:\n b.to_hdf(fn, \"/data*\")\n out = dd.read_hdf(fn, \"/data*\")\n assert_eq(df16, out)\n\n # saving to multiple datasets with custom name_function\n with tmpfile(\"h5\") as fn:\n a.to_hdf(fn, \"/data_*\", name_function=lambda i: \"a\" * (i + 1))\n out = dd.read_hdf(fn, \"/data_*\")\n assert_eq(df, out)\n\n out = pd.read_hdf(fn, \"/data_a\")\n tm.assert_frame_equal(out, df.iloc[:2])\n out = pd.read_hdf(fn, \"/data_aa\")\n tm.assert_frame_equal(out, df.iloc[2:])\n\n # test multiple nodes with hdf object\n with tmpfile(\"h5\") as fn:\n with pd.HDFStore(fn) as hdf:\n b.to_hdf(hdf, \"/data*\")\n out = dd.read_hdf(fn, \"/data*\")\n assert_eq(df16, out)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_hdf_multiple_files_test_to_hdf_multiple_files.with_tmpfile_h5_as_fn_.with_pd_HDFStore_fn_as_h.assert_eq_df_out_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_hdf_multiple_files_test_to_hdf_multiple_files.with_tmpfile_h5_as_fn_.with_pd_HDFStore_fn_as_h.assert_eq_df_out_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_hdf.py", "file_name": "test_hdf.py", "file_type": "text/x-python", "category": "test", "start_line": 128, "end_line": 208, "span_ids": ["test_to_hdf_multiple_files"], "tokens": 672}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_hdf_multiple_files():\n pytest.importorskip(\"tables\")\n df = pd.DataFrame(\n {\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [1, 2, 3, 4]}, index=[1.0, 2.0, 3.0, 4.0]\n )\n a = dd.from_pandas(df, 2)\n df16 = pd.DataFrame(\n {\n \"x\": [\n \"a\",\n \"b\",\n \"c\",\n \"d\",\n \"e\",\n \"f\",\n \"g\",\n \"h\",\n \"i\",\n \"j\",\n \"k\",\n \"l\",\n \"m\",\n \"n\",\n \"o\",\n \"p\",\n ],\n \"y\": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],\n },\n index=[\n 1.0,\n 2.0,\n 3.0,\n 4.0,\n 5.0,\n 6.0,\n 7.0,\n 8.0,\n 9.0,\n 10.0,\n 11.0,\n 12.0,\n 13.0,\n 14.0,\n 15.0,\n 16.0,\n ],\n )\n b = dd.from_pandas(df16, 16)\n\n # saving to multiple files\n with tmpdir() as dn:\n fn = os.path.join(dn, \"data_*.h5\")\n a.to_hdf(fn, \"/data\")\n out = dd.read_hdf(fn, \"/data\")\n assert_eq(df, out)\n\n # saving to multiple files making sure order is kept\n with tmpdir() as dn:\n fn = os.path.join(dn, \"data_*.h5\")\n b.to_hdf(fn, \"/data\")\n out = dd.read_hdf(fn, \"/data\")\n assert_eq(df16, out)\n\n # saving to multiple files with custom name_function\n with tmpdir() as dn:\n fn = os.path.join(dn, \"data_*.h5\")\n a.to_hdf(fn, \"/data\", name_function=lambda i: \"a\" * (i + 1))\n out = dd.read_hdf(fn, \"/data\")\n assert_eq(df, out)\n\n out = pd.read_hdf(os.path.join(dn, \"data_a.h5\"), \"/data\")\n tm.assert_frame_equal(out, df.iloc[:2])\n out = pd.read_hdf(os.path.join(dn, \"data_aa.h5\"), \"/data\")\n tm.assert_frame_equal(out, df.iloc[2:])\n\n # test hdf object\n with tmpfile(\"h5\") as fn:\n with pd.HDFStore(fn) as hdf:\n a.to_hdf(hdf, \"/data*\")\n out = dd.read_hdf(fn, \"/data*\")\n assert_eq(df, out)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_hdf_modes_multiple_nodes_test_to_hdf_modes_multiple_nodes.None_4.assert_eq_df_append_df_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_hdf_modes_multiple_nodes_test_to_hdf_modes_multiple_nodes.None_4.assert_eq_df_append_df_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_hdf.py", "file_name": "test_hdf.py", "file_type": "text/x-python", "category": "test", "start_line": 211, "end_line": 256, "span_ids": ["test_to_hdf_modes_multiple_nodes"], "tokens": 489}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_hdf_modes_multiple_nodes():\n pytest.importorskip(\"tables\")\n df = pd.DataFrame(\n {\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [1, 2, 3, 4]}, index=[1.0, 2.0, 3.0, 4.0]\n )\n\n # appending a single partition to existing data\n a = dd.from_pandas(df, 1)\n with tmpfile(\"h5\") as fn:\n a.to_hdf(fn, \"/data2\")\n a.to_hdf(fn, \"/data*\", mode=\"a\")\n out = dd.read_hdf(fn, \"/data*\")\n assert_eq(df.append(df), out)\n\n # overwriting a file with a single partition\n a = dd.from_pandas(df, 1)\n with tmpfile(\"h5\") as fn:\n a.to_hdf(fn, \"/data2\")\n a.to_hdf(fn, \"/data*\", mode=\"w\")\n out = dd.read_hdf(fn, \"/data*\")\n assert_eq(df, out)\n\n # appending two partitions to existing data\n a = dd.from_pandas(df, 2)\n with tmpfile(\"h5\") as fn:\n a.to_hdf(fn, \"/data2\")\n a.to_hdf(fn, \"/data*\", mode=\"a\")\n out = dd.read_hdf(fn, \"/data*\")\n assert_eq(df.append(df), out)\n\n # overwriting a file with two partitions\n a = dd.from_pandas(df, 2)\n with tmpfile(\"h5\") as fn:\n a.to_hdf(fn, \"/data2\")\n a.to_hdf(fn, \"/data*\", mode=\"w\")\n out = dd.read_hdf(fn, \"/data*\")\n assert_eq(df, out)\n\n # overwriting a single partition, keeping other partitions\n a = dd.from_pandas(df, 2)\n with tmpfile(\"h5\") as fn:\n a.to_hdf(fn, \"/data1\")\n a.to_hdf(fn, \"/data2\")\n a.to_hdf(fn, \"/data*\", mode=\"a\", append=False)\n out = dd.read_hdf(fn, \"/data*\")\n assert_eq(df.append(df), out)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_hdf_modes_multiple_files_test_to_hdf_modes_multiple_files.None_3.assert_eq_df_append_df_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_hdf_modes_multiple_files_test_to_hdf_modes_multiple_files.None_3.assert_eq_df_append_df_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_hdf.py", "file_name": "test_hdf.py", "file_type": "text/x-python", "category": "test", "start_line": 259, "end_line": 299, "span_ids": ["test_to_hdf_modes_multiple_files"], "tokens": 464}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_hdf_modes_multiple_files():\n pytest.importorskip(\"tables\")\n df = pd.DataFrame(\n {\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [1, 2, 3, 4]}, index=[1.0, 2.0, 3.0, 4.0]\n )\n\n # appending a single partition to existing data\n a = dd.from_pandas(df, 1)\n with tmpdir() as dn:\n fn = os.path.join(dn, \"data*\")\n a.to_hdf(os.path.join(dn, \"data2\"), \"/data\")\n a.to_hdf(fn, \"/data\", mode=\"a\")\n out = dd.read_hdf(fn, \"/data*\")\n assert_eq(df.append(df), out)\n\n # appending two partitions to existing data\n a = dd.from_pandas(df, 2)\n with tmpdir() as dn:\n fn = os.path.join(dn, \"data*\")\n a.to_hdf(os.path.join(dn, \"data2\"), \"/data\")\n a.to_hdf(fn, \"/data\", mode=\"a\")\n out = dd.read_hdf(fn, \"/data\")\n assert_eq(df.append(df), out)\n\n # overwriting a file with two partitions\n a = dd.from_pandas(df, 2)\n with tmpdir() as dn:\n fn = os.path.join(dn, \"data*\")\n a.to_hdf(os.path.join(dn, \"data1\"), \"/data\")\n a.to_hdf(fn, \"/data\", mode=\"w\")\n out = dd.read_hdf(fn, \"/data\")\n assert_eq(df, out)\n\n # overwriting a single partition, keeping other partitions\n a = dd.from_pandas(df, 2)\n with tmpdir() as dn:\n fn = os.path.join(dn, \"data*\")\n a.to_hdf(os.path.join(dn, \"data1\"), \"/data\")\n a.to_hdf(fn, \"/data\", mode=\"a\", append=False)\n out = dd.read_hdf(fn, \"/data\")\n assert_eq(df.append(df), out)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_hdf_link_optimizations_test_to_hdf_link_optimizations.None_2.assert_dependency_depth_d": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_hdf_link_optimizations_test_to_hdf_link_optimizations.None_2.assert_dependency_depth_d", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_hdf.py", "file_name": "test_hdf.py", "file_type": "text/x-python", "category": "test", "start_line": 302, "end_line": 367, "span_ids": ["test_to_hdf_link_optimizations"], "tokens": 547}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_hdf_link_optimizations():\n \"\"\"testing dask link levels is correct by calculating the depth of the dask graph\"\"\"\n pytest.importorskip(\"tables\")\n df16 = pd.DataFrame(\n {\n \"x\": [\n \"a\",\n \"b\",\n \"c\",\n \"d\",\n \"e\",\n \"f\",\n \"g\",\n \"h\",\n \"i\",\n \"j\",\n \"k\",\n \"l\",\n \"m\",\n \"n\",\n \"o\",\n \"p\",\n ],\n \"y\": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],\n },\n index=[\n 1.0,\n 2.0,\n 3.0,\n 4.0,\n 5.0,\n 6.0,\n 7.0,\n 8.0,\n 9.0,\n 10.0,\n 11.0,\n 12.0,\n 13.0,\n 14.0,\n 15.0,\n 16.0,\n ],\n )\n a = dd.from_pandas(df16, 16)\n\n # saving to multiple hdf files, no links are needed\n # expected layers: from_pandas, to_hdf, list = depth of 3\n with tmpdir() as dn:\n fn = os.path.join(dn, \"data*\")\n d = a.to_hdf(fn, \"/data\", compute=False)\n assert dependency_depth(d.dask) == 3\n\n # saving to a single hdf file with multiple nodes\n # all subsequent nodes depend on the first\n # expected layers: from_pandas, first to_hdf(creates file+node), subsequent to_hdfs, list = 4\n with tmpfile() as fn:\n d = a.to_hdf(fn, \"/data*\", compute=False)\n assert dependency_depth(d.dask) == 4\n\n # saving to a single hdf file with a single node\n # every node depends on the previous node\n # expected layers: from_pandas, to_hdf times npartitions(15), list = 2 + npartitions = 17\n with tmpfile() as fn:\n d = a.to_hdf(fn, \"/data\", compute=False)\n assert dependency_depth(d.dask) == 2 + a.npartitions", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_hdf_lock_delays_test_to_hdf_lock_delays.with_tmpdir_as_dn_.assert_eq_df16_out_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_hdf_lock_delays_test_to_hdf_lock_delays.with_tmpdir_as_dn_.assert_eq_df16_out_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_hdf.py", "file_name": "test_hdf.py", "file_type": "text/x-python", "category": "test", "start_line": 370, "end_line": 437, "span_ids": ["test_to_hdf_lock_delays"], "tokens": 487}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.slow\ndef test_to_hdf_lock_delays():\n pytest.importorskip(\"tables\")\n df16 = pd.DataFrame(\n {\n \"x\": [\n \"a\",\n \"b\",\n \"c\",\n \"d\",\n \"e\",\n \"f\",\n \"g\",\n \"h\",\n \"i\",\n \"j\",\n \"k\",\n \"l\",\n \"m\",\n \"n\",\n \"o\",\n \"p\",\n ],\n \"y\": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],\n },\n index=[\n 1.0,\n 2.0,\n 3.0,\n 4.0,\n 5.0,\n 6.0,\n 7.0,\n 8.0,\n 9.0,\n 10.0,\n 11.0,\n 12.0,\n 13.0,\n 14.0,\n 15.0,\n 16.0,\n ],\n )\n a = dd.from_pandas(df16, 16)\n\n # adding artificial delays to make sure last tasks finish first\n # that's a way to simulate last tasks finishing last\n def delayed_nop(i):\n if i[1] < 10:\n sleep(0.1 * (10 - i[1]))\n return i\n\n # saving to multiple hdf nodes\n with tmpfile() as fn:\n a = a.apply(delayed_nop, axis=1, meta=a)\n a.to_hdf(fn, \"/data*\")\n out = dd.read_hdf(fn, \"/data*\")\n assert_eq(df16, out)\n\n # saving to multiple hdf files\n # adding artificial delays to make sure last tasks finish first\n with tmpdir() as dn:\n fn = os.path.join(dn, \"data*\")\n a = a.apply(delayed_nop, axis=1, meta=a)\n a.to_hdf(fn, \"/data\")\n out = dd.read_hdf(fn, \"/data\")\n assert_eq(df16, out)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_hdf_exceptions_test_to_hdf_exceptions.with_tmpfile_as_fn_.with_pd_HDFStore_fn_as_h.with_pytest_raises_ValueE.a_to_hdf_hdf_data____": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_hdf_exceptions_test_to_hdf_exceptions.with_tmpfile_as_fn_.with_pd_HDFStore_fn_as_h.with_pytest_raises_ValueE.a_to_hdf_hdf_data____", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_hdf.py", "file_name": "test_hdf.py", "file_type": "text/x-python", "category": "test", "start_line": 440, "end_line": 457, "span_ids": ["test_to_hdf_exceptions"], "tokens": 184}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_hdf_exceptions():\n pytest.importorskip(\"tables\")\n df = pd.DataFrame(\n {\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [1, 2, 3, 4]}, index=[1.0, 2.0, 3.0, 4.0]\n )\n a = dd.from_pandas(df, 1)\n\n # triggering too many asterisks error\n with tmpdir() as dn:\n with pytest.raises(ValueError):\n fn = os.path.join(dn, \"data_*.h5\")\n a.to_hdf(fn, \"/data_*\")\n\n # triggering too many asterisks error\n with tmpfile() as fn:\n with pd.HDFStore(fn) as hdf:\n with pytest.raises(ValueError):\n a.to_hdf(hdf, \"/data_*_*\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_hdf_schedulers_test_to_hdf_schedulers.None_2.assert_eq_df_out_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_hdf_schedulers_test_to_hdf_schedulers.None_2.assert_eq_df_out_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_hdf.py", "file_name": "test_hdf.py", "file_type": "text/x-python", "category": "test", "start_line": 460, "end_line": 524, "span_ids": ["test_to_hdf_schedulers"], "tokens": 477}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"scheduler\", [\"sync\", \"threads\", \"processes\"])\n@pytest.mark.parametrize(\"npartitions\", [1, 4, 10])\ndef test_to_hdf_schedulers(scheduler, npartitions):\n pytest.importorskip(\"tables\")\n df = pd.DataFrame(\n {\n \"x\": [\n \"a\",\n \"b\",\n \"c\",\n \"d\",\n \"e\",\n \"f\",\n \"g\",\n \"h\",\n \"i\",\n \"j\",\n \"k\",\n \"l\",\n \"m\",\n \"n\",\n \"o\",\n \"p\",\n ],\n \"y\": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],\n },\n index=[\n 1.0,\n 2.0,\n 3.0,\n 4.0,\n 5.0,\n 6.0,\n 7.0,\n 8.0,\n 9.0,\n 10.0,\n 11.0,\n 12.0,\n 13.0,\n 14.0,\n 15.0,\n 16.0,\n ],\n )\n a = dd.from_pandas(df, npartitions=npartitions)\n\n # test single file single node\n with tmpfile(\"h5\") as fn:\n a.to_hdf(fn, \"/data\", scheduler=scheduler)\n out = pd.read_hdf(fn, \"/data\")\n assert_eq(df, out)\n\n # test multiple files single node\n with tmpdir() as dn:\n fn = os.path.join(dn, \"data_*.h5\")\n a.to_hdf(fn, \"/data\", scheduler=scheduler)\n out = dd.read_hdf(fn, \"/data\")\n assert_eq(df, out)\n\n # test single file multiple nodes\n with tmpfile(\"h5\") as fn:\n a.to_hdf(fn, \"/data*\", scheduler=scheduler)\n out = dd.read_hdf(fn, \"/data*\")\n assert_eq(df, out)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_hdf_kwargs_test_to_hdf_kwargs.None_1.tm_assert_frame_equal_df_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_hdf_kwargs_test_to_hdf_kwargs.None_1.tm_assert_frame_equal_df_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_hdf.py", "file_name": "test_hdf.py", "file_type": "text/x-python", "category": "test", "start_line": 527, "end_line": 540, "span_ids": ["test_to_hdf_kwargs"], "tokens": 170}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_hdf_kwargs():\n pytest.importorskip(\"tables\")\n df = pd.DataFrame({\"A\": [\"a\", \"aaaa\"]})\n ddf = dd.from_pandas(df, npartitions=2)\n with tmpfile(\"h5\") as fn:\n ddf.to_hdf(fn, \"foo4\", format=\"table\", min_itemsize=4)\n df2 = pd.read_hdf(fn, \"foo4\")\n tm.assert_frame_equal(df, df2)\n\n # test shorthand 't' for table\n with tmpfile(\"h5\") as fn:\n ddf.to_hdf(fn, \"foo4\", format=\"t\", min_itemsize=4)\n df2 = pd.read_hdf(fn, \"foo4\")\n tm.assert_frame_equal(df, df2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_fmt_warns_test_to_fmt_warns.with_tmpdir_as_dn_.with_pytest_warns_None_.a_to_csv_fn_name_functio": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_fmt_warns_test_to_fmt_warns.with_tmpdir_as_dn_.with_pytest_warns_None_.a_to_csv_fn_name_functio", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_hdf.py", "file_name": "test_hdf.py", "file_type": "text/x-python", "category": "test", "start_line": 543, "end_line": 597, "span_ids": ["test_to_fmt_warns"], "tokens": 353}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_fmt_warns():\n pytest.importorskip(\"tables\")\n df16 = pd.DataFrame(\n {\n \"x\": [\n \"a\",\n \"b\",\n \"c\",\n \"d\",\n \"e\",\n \"f\",\n \"g\",\n \"h\",\n \"i\",\n \"j\",\n \"k\",\n \"l\",\n \"m\",\n \"n\",\n \"o\",\n \"p\",\n ],\n \"y\": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],\n },\n index=[\n 1.0,\n 2.0,\n 3.0,\n 4.0,\n 5.0,\n 6.0,\n 7.0,\n 8.0,\n 9.0,\n 10.0,\n 11.0,\n 12.0,\n 13.0,\n 14.0,\n 15.0,\n 16.0,\n ],\n )\n a = dd.from_pandas(df16, 16)\n\n # testing warning when breaking order\n with tmpfile(\"h5\") as fn:\n with pytest.warns(None):\n a.to_hdf(fn, \"/data*\", name_function=str)\n\n # testing warning when breaking order\n with tmpdir() as dn:\n with pytest.warns(None):\n fn = os.path.join(dn, \"data_*.csv\")\n a.to_csv(fn, name_function=str)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_read_hdf_test_read_hdf.None_2.compare_a_compute_sort": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_read_hdf_test_read_hdf.None_2.compare_a_compute_sort", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_hdf.py", "file_name": "test_hdf.py", "file_type": "text/x-python", "category": "test", "start_line": 600, "end_line": 645, "span_ids": ["test_read_hdf"], "tokens": 423}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"data, compare\",\n [\n (\n pd.DataFrame(\n {\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [1, 2, 3, 4]},\n index=[1.0, 2.0, 3.0, 4.0],\n ),\n tm.assert_frame_equal,\n ),\n (pd.Series([1, 2, 3, 4], name=\"a\"), tm.assert_series_equal),\n ],\n)\ndef test_read_hdf(data, compare):\n pytest.importorskip(\"tables\")\n with tmpfile(\"h5\") as fn:\n data.to_hdf(fn, \"/data\")\n try:\n dd.read_hdf(fn, \"data\", chunksize=2, mode=\"r\")\n assert False\n except TypeError as e:\n assert \"format='table'\" in str(e)\n\n with tmpfile(\"h5\") as fn:\n data.to_hdf(fn, \"/data\", format=\"table\")\n a = dd.read_hdf(fn, \"/data\", chunksize=2, mode=\"r\")\n assert a.npartitions == 2\n\n compare(a.compute(), data)\n\n compare(\n dd.read_hdf(fn, \"/data\", chunksize=2, start=1, stop=3, mode=\"r\").compute(),\n pd.read_hdf(fn, \"/data\", start=1, stop=3),\n )\n\n assert sorted(dd.read_hdf(fn, \"/data\", mode=\"r\").dask) == sorted(\n dd.read_hdf(fn, \"/data\", mode=\"r\").dask\n )\n\n with tmpfile(\"h5\") as fn:\n sorted_data = data.sort_index()\n sorted_data.to_hdf(fn, \"/data\", format=\"table\")\n a = dd.read_hdf(fn, \"/data\", chunksize=2, sorted_index=True, mode=\"r\")\n assert a.npartitions == 2\n\n compare(a.compute(), sorted_data)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_read_hdf_multiply_open_test_read_hdf_multiply_open.with_tmpfile_h5_as_fn_.with_pd_HDFStore_fn_mode.dd_read_hdf_fn_data_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_read_hdf_multiply_open_test_read_hdf_multiply_open.with_tmpfile_h5_as_fn_.with_pd_HDFStore_fn_mode.dd_read_hdf_fn_data_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_hdf.py", "file_name": "test_hdf.py", "file_type": "text/x-python", "category": "test", "start_line": 648, "end_line": 658, "span_ids": ["test_read_hdf_multiply_open"], "tokens": 154}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_hdf_multiply_open():\n \"\"\"Test that we can read from a file that's already opened elsewhere in\n read-only mode.\"\"\"\n pytest.importorskip(\"tables\")\n df = pd.DataFrame(\n {\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [1, 2, 3, 4]}, index=[1.0, 2.0, 3.0, 4.0]\n )\n with tmpfile(\"h5\") as fn:\n df.to_hdf(fn, \"/data\", format=\"table\")\n with pd.HDFStore(fn, mode=\"r\"):\n dd.read_hdf(fn, \"/data\", chunksize=2, mode=\"r\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_read_hdf_multiple_test_read_hdf_multiple.with_tmpfile_h5_as_fn_.assert_eq_a_r_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_read_hdf_multiple_test_read_hdf_multiple.with_tmpfile_h5_as_fn_.assert_eq_a_r_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_hdf.py", "file_name": "test_hdf.py", "file_type": "text/x-python", "category": "test", "start_line": 661, "end_line": 711, "span_ids": ["test_read_hdf_multiple"], "tokens": 330}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_hdf_multiple():\n pytest.importorskip(\"tables\")\n df = pd.DataFrame(\n {\n \"x\": [\n \"a\",\n \"b\",\n \"c\",\n \"d\",\n \"e\",\n \"f\",\n \"g\",\n \"h\",\n \"i\",\n \"j\",\n \"k\",\n \"l\",\n \"m\",\n \"n\",\n \"o\",\n \"p\",\n ],\n \"y\": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],\n },\n index=[\n 1.0,\n 2.0,\n 3.0,\n 4.0,\n 5.0,\n 6.0,\n 7.0,\n 8.0,\n 9.0,\n 10.0,\n 11.0,\n 12.0,\n 13.0,\n 14.0,\n 15.0,\n 16.0,\n ],\n )\n a = dd.from_pandas(df, 16)\n\n with tmpfile(\"h5\") as fn:\n a.to_hdf(fn, \"/data*\")\n r = dd.read_hdf(fn, \"/data*\", sorted_index=True)\n assert a.npartitions == r.npartitions\n assert a.divisions == r.divisions\n assert_eq(a, r)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_read_hdf_start_stop_values_test_read_hdf_start_stop_values.with_tmpfile_h5_as_fn_.None_2.dd_read_hdf_fn_data_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_read_hdf_start_stop_values_test_read_hdf_start_stop_values.with_tmpfile_h5_as_fn_.None_2.dd_read_hdf_fn_data_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_hdf.py", "file_name": "test_hdf.py", "file_type": "text/x-python", "category": "test", "start_line": 714, "end_line": 729, "span_ids": ["test_read_hdf_start_stop_values"], "tokens": 182}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_hdf_start_stop_values():\n pytest.importorskip(\"tables\")\n df = pd.DataFrame(\n {\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [1, 2, 3, 4]}, index=[1.0, 2.0, 3.0, 4.0]\n )\n with tmpfile(\"h5\") as fn:\n df.to_hdf(fn, \"/data\", format=\"table\")\n\n with pytest.raises(ValueError, match=\"number of rows\"):\n dd.read_hdf(fn, \"/data\", stop=10)\n\n with pytest.raises(ValueError, match=\"is above or equal to\"):\n dd.read_hdf(fn, \"/data\", start=10)\n\n with pytest.raises(ValueError, match=\"positive integer\"):\n dd.read_hdf(fn, \"/data\", chunksize=-1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_hdf_globbing_test_hdf_globbing.with_tmpdir_as_tdir_.with_dask_config_set_sche.None_4": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_hdf_globbing_test_hdf_globbing.with_tmpdir_as_tdir_.with_dask_config_set_sche.None_4", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_hdf.py", "file_name": "test_hdf.py", "file_type": "text/x-python", "category": "test", "start_line": 732, "end_line": 766, "span_ids": ["test_hdf_globbing"], "tokens": 469}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_hdf_globbing():\n pytest.importorskip(\"tables\")\n df = pd.DataFrame(\n {\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [1, 2, 3, 4]}, index=[1.0, 2.0, 3.0, 4.0]\n )\n\n with tmpdir() as tdir:\n df.to_hdf(os.path.join(tdir, \"one.h5\"), \"/foo/data\", format=\"table\")\n df.to_hdf(os.path.join(tdir, \"two.h5\"), \"/bar/data\", format=\"table\")\n df.to_hdf(os.path.join(tdir, \"two.h5\"), \"/foo/data\", format=\"table\")\n\n with dask.config.set(scheduler=\"sync\"):\n res = dd.read_hdf(os.path.join(tdir, \"one.h5\"), \"/*/data\", chunksize=2)\n assert res.npartitions == 2\n tm.assert_frame_equal(res.compute(), df)\n\n res = dd.read_hdf(\n os.path.join(tdir, \"one.h5\"), \"/*/data\", chunksize=2, start=1, stop=3\n )\n expected = pd.read_hdf(\n os.path.join(tdir, \"one.h5\"), \"/foo/data\", start=1, stop=3\n )\n tm.assert_frame_equal(res.compute(), expected)\n\n res = dd.read_hdf(os.path.join(tdir, \"two.h5\"), \"/*/data\", chunksize=2)\n assert res.npartitions == 2 + 2\n tm.assert_frame_equal(res.compute(), pd.concat([df] * 2))\n\n res = dd.read_hdf(os.path.join(tdir, \"*.h5\"), \"/foo/data\", chunksize=2)\n assert res.npartitions == 2 + 2\n tm.assert_frame_equal(res.compute(), pd.concat([df] * 2))\n\n res = dd.read_hdf(os.path.join(tdir, \"*.h5\"), \"/*/data\", chunksize=2)\n assert res.npartitions == 2 + 2 + 2\n tm.assert_frame_equal(res.compute(), pd.concat([df] * 3))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_hdf_file_list_test_hdf_file_list.with_tmpdir_as_tdir_.with_dask_config_set_sche.tm_assert_frame_equal_res": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_hdf_file_list_test_hdf_file_list.with_tmpdir_as_tdir_.with_dask_config_set_sche.tm_assert_frame_equal_res", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_hdf.py", "file_name": "test_hdf.py", "file_type": "text/x-python", "category": "test", "start_line": 769, "end_line": 782, "span_ids": ["test_hdf_file_list"], "tokens": 202}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_hdf_file_list():\n pytest.importorskip(\"tables\")\n df = pd.DataFrame(\n {\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [1, 2, 3, 4]}, index=[1.0, 2.0, 3.0, 4.0]\n )\n\n with tmpdir() as tdir:\n df.iloc[:2].to_hdf(os.path.join(tdir, \"one.h5\"), \"dataframe\", format=\"table\")\n df.iloc[2:].to_hdf(os.path.join(tdir, \"two.h5\"), \"dataframe\", format=\"table\")\n\n with dask.config.set(scheduler=\"sync\"):\n input_files = [os.path.join(tdir, \"one.h5\"), os.path.join(tdir, \"two.h5\")]\n res = dd.read_hdf(input_files, \"dataframe\")\n tm.assert_frame_equal(res.compute(), df)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_read_hdf_pattern_pathlike_test_read_hdf_pattern_pathlike.with_tmpfile_h5_as_fn_.assert_eq_res_df_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_read_hdf_pattern_pathlike_test_read_hdf_pattern_pathlike.with_tmpfile_h5_as_fn_.assert_eq_res_df_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_hdf.py", "file_name": "test_hdf.py", "file_type": "text/x-python", "category": "test", "start_line": 785, "end_line": 795, "span_ids": ["test_read_hdf_pattern_pathlike"], "tokens": 130}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_hdf_pattern_pathlike():\n pytest.importorskip(\"tables\")\n df = pd.DataFrame(\n {\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [1, 2, 3, 4]}, index=[1.0, 2.0, 3.0, 4.0]\n )\n\n with tmpfile(\"h5\") as fn:\n path = pathlib.Path(fn)\n df.to_hdf(path, \"dataframe\", format=\"table\")\n res = dd.read_hdf(path, \"dataframe\")\n assert_eq(res, df)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_hdf_path_pathlike_test_read_hdf_doesnt_segfault.with_tmpfile_h5_as_fn_.assert_len_ddf_N": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_hdf_path_pathlike_test_read_hdf_doesnt_segfault.with_tmpfile_h5_as_fn_.assert_len_ddf_N", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_hdf.py", "file_name": "test_hdf.py", "file_type": "text/x-python", "category": "test", "start_line": 798, "end_line": 821, "span_ids": ["test_to_hdf_path_pathlike", "test_read_hdf_doesnt_segfault"], "tokens": 239}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_hdf_path_pathlike():\n pytest.importorskip(\"tables\")\n df = pd.DataFrame(\n {\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [1, 2, 3, 4]}, index=[1.0, 2.0, 3.0, 4.0]\n )\n ddf = dd.from_pandas(df, npartitions=3)\n\n with tmpfile(\"h5\") as fn:\n path = pathlib.Path(fn)\n ddf.to_hdf(path, \"/data\")\n res = pd.read_hdf(path, \"/data\")\n assert_eq(res, ddf)\n\n\ndef test_read_hdf_doesnt_segfault():\n pytest.importorskip(\"tables\")\n with tmpfile(\"h5\") as fn:\n N = 40\n df = pd.DataFrame(np.random.randn(N, 3))\n with pd.HDFStore(fn, mode=\"w\") as store:\n store.append(\"/x\", df)\n\n ddf = dd.read_hdf(fn, \"/x\", chunksize=2)\n assert len(ddf) == N", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_hdf_filenames_test_hdf_filenames.os_remove_foo1_hdf5_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_hdf_filenames_test_hdf_filenames.os_remove_foo1_hdf5_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_hdf.py", "file_name": "test_hdf.py", "file_type": "text/x-python", "category": "test", "start_line": 824, "end_line": 832, "span_ids": ["test_hdf_filenames"], "tokens": 142}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_hdf_filenames():\n pytest.importorskip(\"tables\")\n df = pd.DataFrame(\n {\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [1, 2, 3, 4]}, index=[1.0, 2.0, 3.0, 4.0]\n )\n ddf = dd.from_pandas(df, npartitions=2)\n assert ddf.to_hdf(\"foo*.hdf5\", \"key\") == [\"foo0.hdf5\", \"foo1.hdf5\"]\n os.remove(\"foo0.hdf5\")\n os.remove(\"foo1.hdf5\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_hdf_path_exceptions_test_hdf_path_exceptions.with_pytest_raises_ValueE.dd_read_hdf_tmp_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_hdf_path_exceptions_test_hdf_path_exceptions.with_pytest_raises_ValueE.dd_read_hdf_tmp_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_hdf.py", "file_name": "test_hdf.py", "file_type": "text/x-python", "category": "test", "start_line": 835, "end_line": 847, "span_ids": ["test_hdf_path_exceptions"], "tokens": 112}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_hdf_path_exceptions():\n\n # single file doesn't exist\n with pytest.raises(IOError):\n dd.read_hdf(\"nonexistant_store_X34HJK\", \"/tmp\")\n\n # a file from a list of files doesn't exist\n with pytest.raises(IOError):\n dd.read_hdf([\"nonexistant_store_X34HJK\", \"nonexistant_store_UY56YH\"], \"/tmp\")\n\n # list of files is empty\n with pytest.raises(ValueError):\n dd.read_hdf([], \"/tmp\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_hdf_nonpandas_keys_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_hdf_nonpandas_keys_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_hdf.py", "file_name": "test_hdf.py", "file_type": "text/x-python", "category": "test", "start_line": 850, "end_line": 893, "span_ids": ["test_hdf_nonpandas_keys"], "tokens": 389}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n pd.__version__ < LooseVersion(\"0.24.2\"), reason=\"HDF key behaviour changed\"\n)\ndef test_hdf_nonpandas_keys():\n # https://github.com/dask/dask/issues/5934\n # TODO: maybe remove this if/when pandas copes with all keys\n\n tables = pytest.importorskip(\"tables\")\n import tables\n\n class Table1(tables.IsDescription):\n value1 = tables.Float32Col()\n\n class Table2(tables.IsDescription):\n value2 = tables.Float32Col()\n\n class Table3(tables.IsDescription):\n value3 = tables.Float32Col()\n\n with tmpfile(\"h5\") as path:\n with tables.open_file(path, mode=\"a\") as h5file:\n group = h5file.create_group(\"/\", \"group\")\n t = h5file.create_table(group, \"table1\", Table1, \"Table 1\")\n row = t.row\n row[\"value1\"] = 1\n row.append()\n t = h5file.create_table(group, \"table2\", Table2, \"Table 2\")\n row = t.row\n row[\"value2\"] = 1\n row.append()\n t = h5file.create_table(group, \"table3\", Table3, \"Table 3\")\n row = t.row\n row[\"value3\"] = 1\n row.append()\n\n # pandas keys should still work\n bar = pd.DataFrame(np.random.randn(10, 4))\n bar.to_hdf(path, \"/bar\", format=\"table\", mode=\"a\")\n\n dd.read_hdf(path, \"/group/table1\")\n dd.read_hdf(path, \"/group/table2\")\n dd.read_hdf(path, \"/group/table3\")\n dd.read_hdf(path, \"/bar\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_np_test_meta_from_array._Should_be_5_partitions_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_np_test_meta_from_array._Should_be_5_partitions_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 45, "span_ids": ["imports", "test_meta_from_array"], "tokens": 369}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import numpy as np\nimport pandas as pd\n\nimport pytest\nfrom threading import Lock\nfrom multiprocessing.pool import ThreadPool\n\nimport dask.array as da\nimport dask.dataframe as dd\nfrom dask.dataframe._compat import tm\nfrom dask.dataframe.io.io import _meta_from_array\nfrom dask.delayed import Delayed, delayed\n\nfrom dask.utils import tmpfile\n\nfrom dask.dataframe.utils import assert_eq, is_categorical_dtype\n\n\n####################\n# Arrays and BColz #\n####################\n\n\ndef test_meta_from_array():\n x = np.array([[1, 2], [3, 4]], dtype=np.int64)\n res = _meta_from_array(x)\n assert isinstance(res, pd.DataFrame)\n assert res[0].dtype == np.int64\n assert res[1].dtype == np.int64\n tm.assert_index_equal(res.columns, pd.Index([0, 1]))\n\n x = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float64)\n res = _meta_from_array(x, columns=[\"a\", \"b\"])\n assert isinstance(res, pd.DataFrame)\n assert res[\"a\"].dtype == np.float64\n assert res[\"b\"].dtype == np.float64\n tm.assert_index_equal(res.columns, pd.Index([\"a\", \"b\"]))\n\n with pytest.raises(ValueError):\n _meta_from_array(x, columns=[\"a\", \"b\", \"c\"])\n\n np.random.seed(42)\n x = np.random.rand(201, 2)\n x = dd.from_array(x, chunksize=50, columns=[\"a\", \"b\"])\n assert len(x.divisions) == 6 # Should be 5 partitions and the end", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_meta_from_1darray_test_meta_from_1darray.with_pytest_raises_ValueE._meta_from_array_x_colum": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_meta_from_1darray_test_meta_from_1darray.with_pytest_raises_ValueE._meta_from_array_x_colum", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 48, "end_line": 67, "span_ids": ["test_meta_from_1darray"], "tokens": 199}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_meta_from_1darray():\n x = np.array([1.0, 2.0, 3.0], dtype=np.float64)\n res = _meta_from_array(x)\n assert isinstance(res, pd.Series)\n assert res.dtype == np.float64\n\n x = np.array([1, 2, 3], dtype=np.object_)\n res = _meta_from_array(x, columns=\"x\")\n assert isinstance(res, pd.Series)\n assert res.name == \"x\"\n assert res.dtype == np.object_\n\n x = np.array([1, 2, 3], dtype=np.object_)\n res = _meta_from_array(x, columns=[\"x\"])\n assert isinstance(res, pd.DataFrame)\n assert res[\"x\"].dtype == np.object_\n tm.assert_index_equal(res.columns, pd.Index([\"x\"]))\n\n with pytest.raises(ValueError):\n _meta_from_array(x, columns=[\"a\", \"b\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_meta_from_recarray_test_meta_from_recarray.with_pytest_raises_ValueE._meta_from_array_x_colum": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_meta_from_recarray_test_meta_from_recarray.with_pytest_raises_ValueE._meta_from_array_x_colum", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 70, "end_line": 87, "span_ids": ["test_meta_from_recarray"], "tokens": 193}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_meta_from_recarray():\n x = np.array(\n [(i, i * 10) for i in range(10)], dtype=[(\"a\", np.float64), (\"b\", np.int64)]\n )\n res = _meta_from_array(x)\n assert isinstance(res, pd.DataFrame)\n assert res[\"a\"].dtype == np.float64\n assert res[\"b\"].dtype == np.int64\n tm.assert_index_equal(res.columns, pd.Index([\"a\", \"b\"]))\n\n res = _meta_from_array(x, columns=[\"b\", \"a\"])\n assert isinstance(res, pd.DataFrame)\n assert res[\"a\"].dtype == np.float64\n assert res[\"b\"].dtype == np.int64\n tm.assert_index_equal(res.columns, pd.Index([\"b\", \"a\"]))\n\n with pytest.raises(ValueError):\n _meta_from_array(x, columns=[\"a\", \"b\", \"c\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_array_test_from_array.with_pytest_raises_ValueE.dd_from_array_np_ones_sha": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_array_test_from_array.with_pytest_raises_ValueE.dd_from_array_np_ones_sha", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 90, "end_line": 105, "span_ids": ["test_from_array"], "tokens": 190}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_from_array():\n x = np.arange(10 * 3).reshape(10, 3)\n d = dd.from_array(x, chunksize=4)\n assert isinstance(d, dd.DataFrame)\n tm.assert_index_equal(d.columns, pd.Index([0, 1, 2]))\n assert d.divisions == (0, 4, 8, 9)\n assert (d.compute().values == x).all()\n\n d = dd.from_array(x, chunksize=4, columns=list(\"abc\"))\n assert isinstance(d, dd.DataFrame)\n tm.assert_index_equal(d.columns, pd.Index([\"a\", \"b\", \"c\"]))\n assert d.divisions == (0, 4, 8, 9)\n assert (d.compute().values == x).all()\n\n with pytest.raises(ValueError):\n dd.from_array(np.ones(shape=(10, 10, 10)))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_array_with_record_dtype_test_from_array_with_record_dtype.assert_d_compute_to_re": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_array_with_record_dtype_test_from_array_with_record_dtype.assert_d_compute_to_re", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 108, "end_line": 115, "span_ids": ["test_from_array_with_record_dtype"], "tokens": 113}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_from_array_with_record_dtype():\n x = np.array([(i, i * 10) for i in range(10)], dtype=[(\"a\", \"i4\"), (\"b\", \"i4\")])\n d = dd.from_array(x, chunksize=4)\n assert isinstance(d, dd.DataFrame)\n assert list(d.columns) == [\"a\", \"b\"]\n assert d.divisions == (0, 4, 8, 9)\n\n assert (d.compute().to_records(index=False) == x).all()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_bcolz_multiple_threads_test_from_bcolz_multiple_threads.pool_map_check_range_5_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_bcolz_multiple_threads_test_from_bcolz_multiple_threads.pool_map_check_range_5_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 118, "end_line": 144, "span_ids": ["test_from_bcolz_multiple_threads"], "tokens": 318}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_from_bcolz_multiple_threads():\n bcolz = pytest.importorskip(\"bcolz\")\n pool = ThreadPool(processes=5)\n\n def check(i):\n t = bcolz.ctable(\n [[1, 2, 3], [1.0, 2.0, 3.0], [\"a\", \"b\", \"a\"]], names=[\"x\", \"y\", \"a\"]\n )\n d = dd.from_bcolz(t, chunksize=2)\n assert d.npartitions == 2\n assert is_categorical_dtype(d.dtypes[\"a\"])\n assert list(d.x.compute(scheduler=\"sync\")) == [1, 2, 3]\n assert list(d.a.compute(scheduler=\"sync\")) == [\"a\", \"b\", \"a\"]\n\n d = dd.from_bcolz(t, chunksize=2, index=\"x\")\n L = list(d.index.compute(scheduler=\"sync\"))\n assert L == [1, 2, 3] or L == [1, 3, 2]\n\n # Names\n assert sorted(dd.from_bcolz(t, chunksize=2).dask) == sorted(\n dd.from_bcolz(t, chunksize=2).dask\n )\n assert sorted(dd.from_bcolz(t, chunksize=2).dask) != sorted(\n dd.from_bcolz(t, chunksize=3).dask\n )\n\n pool.map(check, range(5))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_bcolz_test_from_bcolz.None_8": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_bcolz_test_from_bcolz.None_8", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 147, "end_line": 178, "span_ids": ["test_from_bcolz"], "tokens": 378}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_from_bcolz():\n bcolz = pytest.importorskip(\"bcolz\")\n\n t = bcolz.ctable(\n [[1, 2, 3], [1.0, 2.0, 3.0], [\"a\", \"b\", \"a\"]], names=[\"x\", \"y\", \"a\"]\n )\n d = dd.from_bcolz(t, chunksize=2)\n assert d.npartitions == 2\n assert is_categorical_dtype(d.dtypes[\"a\"])\n assert list(d.x.compute(scheduler=\"sync\")) == [1, 2, 3]\n assert list(d.a.compute(scheduler=\"sync\")) == [\"a\", \"b\", \"a\"]\n L = list(d.index.compute(scheduler=\"sync\"))\n assert L == [0, 1, 2]\n\n d = dd.from_bcolz(t, chunksize=2, index=\"x\")\n L = list(d.index.compute(scheduler=\"sync\"))\n assert L == [1, 2, 3] or L == [1, 3, 2]\n\n # Names\n assert sorted(dd.from_bcolz(t, chunksize=2).dask) == sorted(\n dd.from_bcolz(t, chunksize=2).dask\n )\n assert sorted(dd.from_bcolz(t, chunksize=2).dask) != sorted(\n dd.from_bcolz(t, chunksize=3).dask\n )\n\n dsk = dd.from_bcolz(t, chunksize=3).dask\n\n t.append((4, 4.0, \"b\"))\n t.flush()\n\n assert sorted(dd.from_bcolz(t, chunksize=2).dask) != sorted(dsk)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_bcolz_no_lock_test_from_bcolz_no_lock.assert_not_any_isinstance": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_bcolz_no_lock_test_from_bcolz_no_lock.assert_not_any_isinstance", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 181, "end_line": 194, "span_ids": ["test_from_bcolz_no_lock"], "tokens": 180}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_from_bcolz_no_lock():\n bcolz = pytest.importorskip(\"bcolz\")\n locktype = type(Lock())\n\n t = bcolz.ctable(\n [[1, 2, 3], [1.0, 2.0, 3.0], [\"a\", \"b\", \"a\"]], names=[\"x\", \"y\", \"a\"], chunklen=2\n )\n a = dd.from_bcolz(t, chunksize=2)\n b = dd.from_bcolz(t, chunksize=2, lock=True)\n c = dd.from_bcolz(t, chunksize=2, lock=False)\n assert_eq(a, b)\n assert_eq(a, c)\n\n assert not any(isinstance(item, locktype) for v in c.dask.values() for item in v)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_bcolz_filename_test_from_bcolz_filename.with_tmpfile_bcolz_as.assert_list_d_x_compute_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_bcolz_filename_test_from_bcolz_filename.with_tmpfile_bcolz_as.assert_list_d_x_compute_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 197, "end_line": 209, "span_ids": ["test_from_bcolz_filename"], "tokens": 132}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_from_bcolz_filename():\n bcolz = pytest.importorskip(\"bcolz\")\n\n with tmpfile(\".bcolz\") as fn:\n t = bcolz.ctable(\n [[1, 2, 3], [1.0, 2.0, 3.0], [\"a\", \"b\", \"a\"]],\n names=[\"x\", \"y\", \"a\"],\n rootdir=fn,\n )\n t.flush()\n\n d = dd.from_bcolz(fn, chunksize=2)\n assert list(d.x.compute()) == [1, 2, 3]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_bcolz_column_order_test_from_bcolz_column_order.assert_list_df_loc_0_com": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_bcolz_column_order_test_from_bcolz_column_order.assert_list_df_loc_0_com", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 212, "end_line": 219, "span_ids": ["test_from_bcolz_column_order"], "tokens": 116}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_from_bcolz_column_order():\n bcolz = pytest.importorskip(\"bcolz\")\n\n t = bcolz.ctable(\n [[1, 2, 3], [1.0, 2.0, 3.0], [\"a\", \"b\", \"a\"]], names=[\"x\", \"y\", \"a\"]\n )\n df = dd.from_bcolz(t, chunksize=2)\n assert list(df.loc[0].compute().columns) == [\"x\", \"y\", \"a\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_pandas_dataframe_test_from_pandas_dataframe.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_pandas_dataframe_test_from_pandas_dataframe.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 222, "end_line": 244, "span_ids": ["test_from_pandas_dataframe"], "tokens": 279}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_from_pandas_dataframe():\n a = list(\"aaaaaaabbbbbbbbccccccc\")\n df = pd.DataFrame(\n dict(a=a, b=np.random.randn(len(a))),\n index=pd.date_range(start=\"20120101\", periods=len(a)),\n )\n ddf = dd.from_pandas(df, 3)\n assert len(ddf.dask) == 3\n assert len(ddf.divisions) == len(ddf.dask) + 1\n assert isinstance(ddf.divisions[0], type(df.index[0]))\n tm.assert_frame_equal(df, ddf.compute())\n ddf = dd.from_pandas(df, chunksize=8)\n msg = \"Exactly one of npartitions and chunksize must be specified.\"\n with pytest.raises(ValueError) as err:\n dd.from_pandas(df, npartitions=2, chunksize=2)\n assert msg in str(err.value)\n with pytest.raises((ValueError, AssertionError)) as err:\n dd.from_pandas(df)\n assert msg in str(err.value)\n assert len(ddf.dask) == 3\n assert len(ddf.divisions) == len(ddf.dask) + 1\n assert isinstance(ddf.divisions[0], type(df.index[0]))\n tm.assert_frame_equal(df, ddf.compute())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_pandas_small_test_from_pandas_small.for_sort_in_True_False_.for_i_in_0_2_.assert_eq_s_ds_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_pandas_small_test_from_pandas_small.for_sort_in_True_False_.for_i_in_0_2_.assert_eq_s_ds_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 247, "end_line": 268, "span_ids": ["test_from_pandas_small"], "tokens": 234}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_from_pandas_small():\n df = pd.DataFrame({\"x\": [1, 2, 3]})\n for i in [1, 2, 30]:\n a = dd.from_pandas(df, i)\n assert len(a.compute()) == 3\n assert a.divisions[0] == 0\n assert a.divisions[-1] == 2\n\n a = dd.from_pandas(df, chunksize=i)\n assert len(a.compute()) == 3\n assert a.divisions[0] == 0\n assert a.divisions[-1] == 2\n\n for sort in [True, False]:\n for i in [0, 2]:\n df = pd.DataFrame({\"x\": [0] * i})\n ddf = dd.from_pandas(df, npartitions=5, sort=sort)\n assert_eq(df, ddf)\n\n s = pd.Series([0] * i, name=\"x\", dtype=int)\n ds = dd.from_pandas(s, npartitions=5, sort=sort)\n assert_eq(s, ds)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_pandas_npartitions_is_accurate_test_from_pandas_npartitions_is_accurate.assert_dd_from_pandas_df_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_pandas_npartitions_is_accurate_test_from_pandas_npartitions_is_accurate.assert_dd_from_pandas_df_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 271, "end_line": 276, "span_ids": ["test_from_pandas_npartitions_is_accurate"], "tokens": 108}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"n\", [1, 2, 4, 5])\ndef test_from_pandas_npartitions_is_accurate(n):\n df = pd.DataFrame(\n {\"x\": [1, 2, 3, 4, 5, 6], \"y\": list(\"abdabd\")}, index=[10, 20, 30, 40, 50, 60]\n )\n assert dd.from_pandas(df, npartitions=n).npartitions <= n", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_pandas_series_test_from_pandas_series.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_pandas_series_test_from_pandas_series.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 279, "end_line": 292, "span_ids": ["test_from_pandas_series"], "tokens": 168}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_from_pandas_series():\n n = 20\n s = pd.Series(np.random.randn(n), index=pd.date_range(start=\"20120101\", periods=n))\n ds = dd.from_pandas(s, 3)\n assert len(ds.dask) == 3\n assert len(ds.divisions) == len(ds.dask) + 1\n assert isinstance(ds.divisions[0], type(s.index[0]))\n tm.assert_series_equal(s, ds.compute())\n\n ds = dd.from_pandas(s, chunksize=8)\n assert len(ds.dask) == 3\n assert len(ds.divisions) == len(ds.dask) + 1\n assert isinstance(ds.divisions[0], type(s.index[0]))\n tm.assert_series_equal(s, ds.compute())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_pandas_non_sorted_test_from_pandas_single_row.assert_eq_ddf_df_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_pandas_non_sorted_test_from_pandas_single_row.assert_eq_ddf_df_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 295, "end_line": 310, "span_ids": ["test_from_pandas_non_sorted", "test_from_pandas_single_row"], "tokens": 168}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_from_pandas_non_sorted():\n df = pd.DataFrame({\"x\": [1, 2, 3]}, index=[3, 1, 2])\n ddf = dd.from_pandas(df, npartitions=2, sort=False)\n assert not ddf.known_divisions\n assert_eq(df, ddf)\n\n ddf = dd.from_pandas(df, chunksize=2, sort=False)\n assert not ddf.known_divisions\n assert_eq(df, ddf)\n\n\ndef test_from_pandas_single_row():\n df = pd.DataFrame({\"x\": [1]}, index=[1])\n ddf = dd.from_pandas(df, npartitions=1)\n assert ddf.divisions == (1, 1)\n assert_eq(ddf, df)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_pandas_with_datetime_index_test_from_pandas_with_datetime_index.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_pandas_with_datetime_index_test_from_pandas_with_datetime_index.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 313, "end_line": 334, "span_ids": ["test_from_pandas_with_datetime_index"], "tokens": 175}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_from_pandas_with_datetime_index():\n df = pd.DataFrame(\n {\n \"Date\": [\n \"2015-08-28\",\n \"2015-08-27\",\n \"2015-08-26\",\n \"2015-08-25\",\n \"2015-08-24\",\n \"2015-08-21\",\n \"2015-08-20\",\n \"2015-08-19\",\n \"2015-08-18\",\n ],\n \"Val\": list(range(9)),\n }\n )\n df.Date = df.Date.astype(\"datetime64[ns]\")\n ddf = dd.from_pandas(df, 2)\n assert_eq(df, ddf)\n ddf = dd.from_pandas(df, chunksize=2)\n assert_eq(df, ddf)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_DataFrame_from_dask_array_test_DataFrame_from_dask_array.assert_df2_divisions_d": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_DataFrame_from_dask_array_test_DataFrame_from_dask_array.assert_df2_divisions_d", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 337, "end_line": 350, "span_ids": ["test_DataFrame_from_dask_array"], "tokens": 177}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_DataFrame_from_dask_array():\n x = da.ones((10, 3), chunks=(4, 2))\n\n df = dd.from_dask_array(x, [\"a\", \"b\", \"c\"])\n assert isinstance(df, dd.DataFrame)\n tm.assert_index_equal(df.columns, pd.Index([\"a\", \"b\", \"c\"]))\n assert list(df.divisions) == [0, 4, 8, 9]\n assert (df.compute(scheduler=\"sync\").values == x.compute(scheduler=\"sync\")).all()\n\n # dd.from_array should re-route to from_dask_array\n df2 = dd.from_array(x, columns=[\"a\", \"b\", \"c\"])\n assert isinstance(df, dd.DataFrame)\n tm.assert_index_equal(df2.columns, df.columns)\n assert df2.divisions == df.divisions", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_Series_from_dask_array_test_Series_from_dask_array.assert_eq_ser_ser2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_Series_from_dask_array_test_Series_from_dask_array.assert_eq_ser_ser2_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 353, "end_line": 369, "span_ids": ["test_Series_from_dask_array"], "tokens": 161}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_Series_from_dask_array():\n x = da.ones(10, chunks=4)\n\n ser = dd.from_dask_array(x, \"a\")\n assert isinstance(ser, dd.Series)\n assert ser.name == \"a\"\n assert list(ser.divisions) == [0, 4, 8, 9]\n assert (ser.compute(scheduler=\"sync\").values == x.compute(scheduler=\"sync\")).all()\n\n ser = dd.from_dask_array(x)\n assert isinstance(ser, dd.Series)\n assert ser.name is None\n\n # dd.from_array should re-route to from_dask_array\n ser2 = dd.from_array(x)\n assert isinstance(ser2, dd.Series)\n assert_eq(ser, ser2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_dask_array_index_test_from_dask_array_index_raises.assert_m_match_4_2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_dask_array_index_test_from_dask_array_index_raises.assert_m_match_4_2_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 372, "end_line": 395, "span_ids": ["test_from_dask_array_index", "test_from_dask_array_index_raises"], "tokens": 230}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"as_frame\", [True, False])\ndef test_from_dask_array_index(as_frame):\n s = dd.from_pandas(pd.Series(range(10), index=list(\"abcdefghij\")), npartitions=3)\n if as_frame:\n s = s.to_frame()\n result = dd.from_dask_array(s.values, index=s.index)\n assert_eq(s, result)\n\n\ndef test_from_dask_array_index_raises():\n x = da.random.uniform(size=(10,), chunks=(5,))\n with pytest.raises(ValueError) as m:\n dd.from_dask_array(x, index=pd.Index(np.arange(10)))\n assert m.match(\"must be an instance\")\n\n a = dd.from_pandas(pd.Series(range(12)), npartitions=2)\n b = dd.from_pandas(pd.Series(range(12)), npartitions=4)\n with pytest.raises(ValueError) as m:\n dd.from_dask_array(a.values, index=b.index)\n\n assert m.match(\"index\")\n assert m.match(\"number\")\n assert m.match(\"blocks\")\n assert m.match(\"4 != 2\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_dask_array_compat_numpy_array_test_from_dask_array_compat_numpy_array.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_dask_array_compat_numpy_array_test_from_dask_array_compat_numpy_array.None_3", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 398, "end_line": 432, "span_ids": ["test_from_dask_array_compat_numpy_array"], "tokens": 379}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_from_dask_array_compat_numpy_array():\n x = da.ones((3, 3, 3), chunks=2)\n\n with pytest.raises(ValueError):\n dd.from_dask_array(x) # dask\n\n with pytest.raises(ValueError):\n dd.from_array(x.compute()) # numpy\n\n x = da.ones((10, 3), chunks=(3, 3))\n d1 = dd.from_dask_array(x) # dask\n assert isinstance(d1, dd.DataFrame)\n assert (d1.compute().values == x.compute()).all()\n tm.assert_index_equal(d1.columns, pd.Index([0, 1, 2]))\n\n d2 = dd.from_array(x.compute()) # numpy\n assert isinstance(d1, dd.DataFrame)\n assert (d2.compute().values == x.compute()).all()\n tm.assert_index_equal(d2.columns, pd.Index([0, 1, 2]))\n\n with pytest.raises(ValueError):\n dd.from_dask_array(x, columns=[\"a\"]) # dask\n\n with pytest.raises(ValueError):\n dd.from_array(x.compute(), columns=[\"a\"]) # numpy\n\n d1 = dd.from_dask_array(x, columns=[\"a\", \"b\", \"c\"]) # dask\n assert isinstance(d1, dd.DataFrame)\n assert (d1.compute().values == x.compute()).all()\n tm.assert_index_equal(d1.columns, pd.Index([\"a\", \"b\", \"c\"]))\n\n d2 = dd.from_array(x.compute(), columns=[\"a\", \"b\", \"c\"]) # numpy\n assert isinstance(d1, dd.DataFrame)\n assert (d2.compute().values == x.compute()).all()\n tm.assert_index_equal(d2.columns, pd.Index([\"a\", \"b\", \"c\"]))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_dask_array_compat_numpy_array_1d_test_from_dask_array_compat_numpy_array_1d.tm_assert_index_equal_d2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_dask_array_compat_numpy_array_1d_test_from_dask_array_compat_numpy_array_1d.tm_assert_index_equal_d2_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 435, "end_line": 467, "span_ids": ["test_from_dask_array_compat_numpy_array_1d"], "tokens": 342}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_from_dask_array_compat_numpy_array_1d():\n\n x = da.ones(10, chunks=3)\n d1 = dd.from_dask_array(x) # dask\n assert isinstance(d1, dd.Series)\n assert (d1.compute().values == x.compute()).all()\n assert d1.name is None\n\n d2 = dd.from_array(x.compute()) # numpy\n assert isinstance(d1, dd.Series)\n assert (d2.compute().values == x.compute()).all()\n assert d2.name is None\n\n d1 = dd.from_dask_array(x, columns=\"name\") # dask\n assert isinstance(d1, dd.Series)\n assert (d1.compute().values == x.compute()).all()\n assert d1.name == \"name\"\n\n d2 = dd.from_array(x.compute(), columns=\"name\") # numpy\n assert isinstance(d1, dd.Series)\n assert (d2.compute().values == x.compute()).all()\n assert d2.name == \"name\"\n\n # passing list via columns results in DataFrame\n d1 = dd.from_dask_array(x, columns=[\"name\"]) # dask\n assert isinstance(d1, dd.DataFrame)\n assert (d1.compute().values == x.compute()).all()\n tm.assert_index_equal(d1.columns, pd.Index([\"name\"]))\n\n d2 = dd.from_array(x.compute(), columns=[\"name\"]) # numpy\n assert isinstance(d1, dd.DataFrame)\n assert (d2.compute().values == x.compute()).all()\n tm.assert_index_equal(d2.columns, pd.Index([\"name\"]))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_dask_array_struct_dtype_test_from_dask_array_struct_dtype.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_dask_array_struct_dtype_test_from_dask_array_struct_dtype.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 470, "end_line": 479, "span_ids": ["test_from_dask_array_struct_dtype"], "tokens": 122}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_from_dask_array_struct_dtype():\n x = np.array([(1, \"a\"), (2, \"b\")], dtype=[(\"a\", \"i4\"), (\"b\", \"object\")])\n y = da.from_array(x, chunks=(1,))\n df = dd.from_dask_array(y)\n tm.assert_index_equal(df.columns, pd.Index([\"a\", \"b\"]))\n assert_eq(df, pd.DataFrame(x))\n\n assert_eq(\n dd.from_dask_array(y, columns=[\"b\", \"a\"]), pd.DataFrame(x, columns=[\"b\", \"a\"])\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_dask_array_unknown_chunks_test_from_dask_array_unknown_chunks.with_pytest_raises_ValueE.df.dd_from_dask_array_dx_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_dask_array_unknown_chunks_test_from_dask_array_unknown_chunks.with_pytest_raises_ValueE.df.dd_from_dask_array_dx_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 482, "end_line": 506, "span_ids": ["test_from_dask_array_unknown_chunks"], "tokens": 272}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_from_dask_array_unknown_chunks():\n # Series\n dx = da.Array(\n {(\"x\", 0): np.arange(5), (\"x\", 1): np.arange(5, 11)},\n \"x\",\n ((np.nan, np.nan),),\n np.arange(1).dtype,\n )\n df = dd.from_dask_array(dx)\n assert isinstance(df, dd.Series)\n assert not df.known_divisions\n assert_eq(df, pd.Series(np.arange(11)), check_index=False)\n\n # DataFrame\n dsk = {(\"x\", 0, 0): np.random.random((2, 3)), (\"x\", 1, 0): np.random.random((5, 3))}\n dx = da.Array(dsk, \"x\", ((np.nan, np.nan), (3,)), np.float64)\n df = dd.from_dask_array(dx)\n assert isinstance(df, dd.DataFrame)\n assert not df.known_divisions\n assert_eq(df, pd.DataFrame(dx.compute()), check_index=False)\n\n # Unknown width\n dx = da.Array(dsk, \"x\", ((np.nan, np.nan), (np.nan,)), np.float64)\n with pytest.raises(ValueError):\n df = dd.from_dask_array(dx)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_to_bag_test_to_bag.assert_ddf_x_to_bag_com": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_to_bag_test_to_bag.assert_ddf_x_to_bag_com", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 509, "end_line": 520, "span_ids": ["test_to_bag"], "tokens": 163}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_bag():\n pytest.importorskip(\"dask.bag\")\n a = pd.DataFrame(\n {\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [2, 3, 4, 5]},\n index=pd.Index([1.0, 2.0, 3.0, 4.0], name=\"ind\"),\n )\n ddf = dd.from_pandas(a, 2)\n\n assert ddf.to_bag().compute() == list(a.itertuples(False))\n assert ddf.to_bag(True).compute() == list(a.itertuples(True))\n assert ddf.x.to_bag(True).compute() == list(a.x.iteritems())\n assert ddf.x.to_bag().compute() == list(a.x)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_to_records_test_to_records.assert_eq_df_to_records_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_to_records_test_to_records.assert_eq_df_to_records_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 523, "end_line": 533, "span_ids": ["test_to_records"], "tokens": 117}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_records():\n pytest.importorskip(\"dask.array\")\n from dask.array.utils import assert_eq\n\n df = pd.DataFrame(\n {\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [2, 3, 4, 5]},\n index=pd.Index([1.0, 2.0, 3.0, 4.0], name=\"ind\"),\n )\n ddf = dd.from_pandas(df, 2)\n\n assert_eq(df.to_records(), ddf.to_records())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_to_records_with_lengths_test_to_records_with_lengths.assert_result_chunks_e": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_to_records_with_lengths_test_to_records_with_lengths.assert_result_chunks_e", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 536, "end_line": 554, "span_ids": ["test_to_records_with_lengths"], "tokens": 172}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"lengths\", [[2, 2], True])\ndef test_to_records_with_lengths(lengths):\n pytest.importorskip(\"dask.array\")\n from dask.array.utils import assert_eq\n\n df = pd.DataFrame(\n {\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [2, 3, 4, 5]},\n index=pd.Index([1.0, 2.0, 3.0, 4.0], name=\"ind\"),\n )\n ddf = dd.from_pandas(df, 2)\n\n result = ddf.to_records(lengths=lengths)\n assert_eq(df.to_records(), result)\n\n assert isinstance(result, da.Array)\n\n expected_chunks = ((2, 2),)\n\n assert result.chunks == expected_chunks", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_to_records_raises_test_to_records_raises.None_1.pytest_fail_Unexpected_v": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_to_records_raises_test_to_records_raises.None_1.pytest_fail_Unexpected_v", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 557, "end_line": 571, "span_ids": ["test_to_records_raises"], "tokens": 150}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_records_raises():\n pytest.importorskip(\"dask.array\")\n df = pd.DataFrame(\n {\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [2, 3, 4, 5]},\n index=pd.Index([1.0, 2.0, 3.0, 4.0], name=\"ind\"),\n )\n ddf = dd.from_pandas(df, 2)\n\n with pytest.raises(ValueError):\n ddf.to_records(lengths=[2, 2, 2])\n pytest.fail(\"3 != 2\")\n\n with pytest.raises(ValueError):\n ddf.to_records(lengths=5)\n pytest.fail(\"Unexpected value\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_delayed_test_from_delayed.assert_str_e_value_start": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_delayed_test_from_delayed.assert_str_e_value_start", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 574, "end_line": 602, "span_ids": ["test_from_delayed"], "tokens": 396}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_from_delayed():\n df = pd.DataFrame(data=np.random.normal(size=(10, 4)), columns=list(\"abcd\"))\n parts = [df.iloc[:1], df.iloc[1:3], df.iloc[3:6], df.iloc[6:10]]\n dfs = [delayed(parts.__getitem__)(i) for i in range(4)]\n meta = dfs[0].compute()\n\n my_len = lambda x: pd.Series([len(x)])\n\n for divisions in [None, [0, 1, 3, 6, 10]]:\n ddf = dd.from_delayed(dfs, meta=meta, divisions=divisions)\n assert_eq(ddf, df)\n assert list(ddf.map_partitions(my_len).compute()) == [1, 2, 3, 4]\n assert ddf.known_divisions == (divisions is not None)\n\n s = dd.from_delayed([d.a for d in dfs], meta=meta.a, divisions=divisions)\n assert_eq(s, df.a)\n assert list(s.map_partitions(my_len).compute()) == [1, 2, 3, 4]\n assert ddf.known_divisions == (divisions is not None)\n\n meta2 = [(c, \"f8\") for c in df.columns]\n assert_eq(dd.from_delayed(dfs, meta=meta2), df)\n assert_eq(dd.from_delayed([d.a for d in dfs], meta=(\"a\", \"f8\")), df.a)\n\n with pytest.raises(ValueError):\n dd.from_delayed(dfs, meta=meta, divisions=[0, 1, 3, 6])\n\n with pytest.raises(ValueError) as e:\n dd.from_delayed(dfs, meta=meta.a).compute()\n assert str(e.value).startswith(\"Metadata mismatch found in `from_delayed`\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_delayed_misordered_meta_test_from_delayed_misordered_meta.assert_msg_in_str_info_va": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_delayed_misordered_meta_test_from_delayed_misordered_meta.assert_msg_in_str_info_va", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 605, "end_line": 626, "span_ids": ["test_from_delayed_misordered_meta"], "tokens": 198}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_from_delayed_misordered_meta():\n df = pd.DataFrame(\n columns=[\"(1)\", \"(2)\", \"date\", \"ent\", \"val\"],\n data=[range(i * 5, i * 5 + 5) for i in range(3)],\n index=range(3),\n )\n\n # meta with different order for columns\n misordered_meta = pd.DataFrame(\n columns=[\"date\", \"ent\", \"val\", \"(1)\", \"(2)\"], data=[range(5)]\n )\n\n ddf = dd.from_delayed([delayed(lambda: df)()], meta=misordered_meta)\n\n with pytest.raises(ValueError) as info:\n # produces dataframe which does not match meta\n ddf.reset_index().compute(scheduler=\"sync\")\n msg = (\n \"The columns in the computed data do not match the columns in the\"\n \" provided metadata\"\n )\n assert msg in str(info.value)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_delayed_sorted_test_to_delayed.assert_eq_dx_compute_x": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_delayed_sorted_test_to_delayed.assert_eq_dx_compute_x", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 629, "end_line": 653, "span_ids": ["test_from_delayed_sorted", "test_to_delayed"], "tokens": 226}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_from_delayed_sorted():\n a = pd.DataFrame({\"x\": [1, 2]}, index=[1, 10])\n b = pd.DataFrame({\"x\": [4, 1]}, index=[100, 200])\n\n A = dd.from_delayed([delayed(a), delayed(b)], divisions=\"sorted\")\n assert A.known_divisions\n\n assert A.divisions == (1, 100, 200)\n\n\ndef test_to_delayed():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4], \"y\": [10, 20, 30, 40]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n # Frame\n a, b = ddf.to_delayed()\n assert isinstance(a, Delayed)\n assert isinstance(b, Delayed)\n assert_eq(a.compute(), df.iloc[:2])\n\n # Scalar\n x = ddf.x.sum()\n dx = x.to_delayed()\n assert isinstance(dx, Delayed)\n assert_eq(dx.compute(), x)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_to_delayed_optimize_graph_test_to_delayed_optimize_graph.assert_eq_dx_compute_d": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_to_delayed_optimize_graph_test_to_delayed_optimize_graph.assert_eq_dx_compute_d", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 656, "end_line": 674, "span_ids": ["test_to_delayed_optimize_graph"], "tokens": 201}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_delayed_optimize_graph():\n df = pd.DataFrame({\"x\": list(range(20))})\n ddf = dd.from_pandas(df, npartitions=20)\n ddf2 = (ddf + 1).loc[:2]\n\n # Frame\n d = ddf2.to_delayed()[0]\n assert len(d.dask) < 20\n d2 = ddf2.to_delayed(optimize_graph=False)[0]\n assert sorted(d2.dask) == sorted(ddf2.dask)\n assert_eq(ddf2.get_partition(0), d.compute())\n assert_eq(ddf2.get_partition(0), d2.compute())\n\n # Scalar\n x = ddf2.x.sum()\n dx = x.to_delayed()\n dx2 = x.to_delayed(optimize_graph=False)\n assert len(dx.dask) < len(dx2.dask)\n assert_eq(dx.compute(), dx2.compute())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_dask_array_index_dtype_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_dask_array_index_dtype_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 677, "end_line": 700, "span_ids": ["test_from_dask_array_index_dtype"], "tokens": 236}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_from_dask_array_index_dtype():\n x = da.ones((10,), chunks=(5,))\n\n df = pd.DataFrame(\n {\n \"date\": pd.date_range(\"2019-01-01\", periods=10, freq=\"1T\"),\n \"val1\": list(range(10)),\n }\n )\n ddf = dd.from_pandas(df, npartitions=2).set_index(\"date\")\n\n ddf2 = dd.from_dask_array(x, index=ddf.index, columns=\"val2\")\n\n assert ddf.index.dtype == ddf2.index.dtype\n assert ddf.index.name == ddf2.index.name\n\n df = pd.DataFrame({\"idx\": np.arange(0, 1, 0.1), \"val1\": list(range(10))})\n ddf = dd.from_pandas(df, npartitions=2).set_index(\"idx\")\n\n ddf2 = dd.from_dask_array(x, index=ddf.index, columns=\"val2\")\n\n assert ddf.index.dtype == ddf2.index.dtype\n assert ddf.index.name == ddf2.index.name", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_json.py_json_test_read_json_basic.with_tmpfile_json_as_f.assert_eq_out_df_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_json.py_json_test_read_json_basic.with_tmpfile_json_as_f.assert_eq_out_df_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_json.py", "file_name": "test_json.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 27, "span_ids": ["imports", "test_read_json_basic"], "tokens": 210}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import json\nimport os\n\nimport pandas as pd\nimport pytest\n\nimport dask.dataframe as dd\nfrom dask.dataframe.utils import assert_eq\nfrom dask.utils import tmpfile, tmpdir\n\n\ndf = pd.DataFrame({\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [1, 2, 3, 4]})\nddf = dd.from_pandas(df, npartitions=2)\n\n\n@pytest.mark.parametrize(\"orient\", [\"split\", \"records\", \"index\", \"columns\", \"values\"])\ndef test_read_json_basic(orient):\n with tmpfile(\"json\") as f:\n df.to_json(f, orient=orient, lines=False)\n actual = dd.read_json(f, orient=orient, lines=False)\n actual_pd = pd.read_json(f, orient=orient, lines=False)\n\n out = actual.compute()\n assert_eq(out, actual_pd)\n if orient == \"values\":\n out.columns = list(df.columns)\n assert_eq(out, df)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_json.py_test_read_json_fkeyword_test_read_json_fkeyword.with_tmpfile_json_as_f.assert_eq_actual_actual_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_json.py_test_read_json_fkeyword_test_read_json_fkeyword.with_tmpfile_json_as_f.assert_eq_actual_actual_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_json.py", "file_name": "test_json.py", "file_type": "text/x-python", "category": "test", "start_line": 30, "end_line": 41, "span_ids": ["test_read_json_fkeyword"], "tokens": 130}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"fkeyword\", [\"pandas\", \"json\"])\ndef test_read_json_fkeyword(fkeyword):\n def _my_json_reader(*args, **kwargs):\n if fkeyword == \"json\":\n return pd.DataFrame.from_dict(json.load(*args))\n return pd.read_json(*args)\n\n with tmpfile(\"json\") as f:\n df.to_json(f, orient=\"records\", lines=False)\n actual = dd.read_json(f, orient=\"records\", lines=False, engine=_my_json_reader)\n actual_pd = pd.read_json(f, orient=\"records\", lines=False)\n assert_eq(actual, actual_pd)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_json.py_test_read_json_meta_test_read_json_meta.if_orient_records_.assert_eq_res_sol_check": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_json.py_test_read_json_meta_test_read_json_meta.if_orient_records_.assert_eq_res_sol_check", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_json.py", "file_name": "test_json.py", "file_type": "text/x-python", "category": "test", "start_line": 44, "end_line": 72, "span_ids": ["test_read_json_meta"], "tokens": 281}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"orient\", [\"split\", \"records\", \"index\", \"columns\", \"values\"])\ndef test_read_json_meta(orient, tmpdir):\n df = pd.DataFrame({\"x\": range(5), \"y\": [\"a\", \"b\", \"c\", \"d\", \"e\"]})\n df2 = df.assign(x=df.x + 0.5)\n lines = orient == \"records\"\n df.to_json(str(tmpdir.join(\"fil1.json\")), orient=orient, lines=lines)\n df2.to_json(str(tmpdir.join(\"fil2.json\")), orient=orient, lines=lines)\n sol = pd.concat([df, df2])\n meta = df2.iloc[:0]\n\n if orient == \"values\":\n # orient=values loses column names\n sol.columns = meta.columns = [0, 1]\n\n res = dd.read_json(\n str(tmpdir.join(\"fil*.json\")), orient=orient, meta=meta, lines=lines\n )\n assert_eq(res, sol)\n\n if orient == \"records\":\n # Also check chunked version\n res = dd.read_json(\n str(tmpdir.join(\"fil*.json\")),\n orient=orient,\n meta=meta,\n lines=True,\n blocksize=50,\n )\n assert_eq(res, sol, check_index=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_json.py_test_write_json_basic_test_to_json_with_get.with_tmpdir_as_dn_.assert_eq_result_df_che": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_json.py_test_write_json_basic_test_to_json_with_get.with_tmpdir_as_dn_.assert_eq_result_df_che", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_json.py", "file_name": "test_json.py", "file_type": "text/x-python", "category": "test", "start_line": 75, "end_line": 103, "span_ids": ["test_to_json_with_get", "test_write_json_basic"], "tokens": 265}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"orient\", [\"split\", \"records\", \"index\", \"columns\", \"values\"])\ndef test_write_json_basic(orient):\n with tmpdir() as path:\n fn = os.path.join(path, \"1.json\")\n df.to_json(fn, orient=orient, lines=False)\n actual = dd.read_json(fn, orient=orient, lines=False)\n out = actual.compute()\n if orient == \"values\":\n out.columns = list(df.columns)\n assert_eq(out, df)\n\n\ndef test_to_json_with_get():\n from dask.multiprocessing import get as mp_get\n\n flag = [False]\n\n def my_get(*args, **kwargs):\n flag[0] = True\n return mp_get(*args, **kwargs)\n\n df = pd.DataFrame({\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [1, 2, 3, 4]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n with tmpdir() as dn:\n ddf.to_json(dn, compute_kwargs={\"scheduler\": my_get})\n assert flag[0]\n result = dd.read_json(os.path.join(dn, \"*\"))\n assert_eq(result, df, check_index=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_json.py_test_read_json_error_test_read_chunked.with_tmpdir_as_path_.assert_eq_d_df_check_in": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_json.py_test_read_json_error_test_read_chunked.with_tmpdir_as_path_.assert_eq_d_df_check_in", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_json.py", "file_name": "test_json.py", "file_type": "text/x-python", "category": "test", "start_line": 106, "end_line": 122, "span_ids": ["test_read_json_error", "test_read_chunked"], "tokens": 177}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_json_error():\n with tmpfile(\"json\") as f:\n with pytest.raises(ValueError):\n df.to_json(f, orient=\"split\", lines=True)\n df.to_json(f, orient=\"split\", lines=False)\n with pytest.raises(ValueError):\n dd.read_json(f, orient=\"split\", blocksize=1)\n\n\n@pytest.mark.parametrize(\"block\", [5, 15, 33, 200, 90000])\ndef test_read_chunked(block):\n with tmpdir() as path:\n fn = os.path.join(path, \"1.json\")\n df.to_json(fn, orient=\"records\", lines=True)\n d = dd.read_json(fn, blocksize=block, sample=10)\n assert (d.npartitions > 1) or (block > 50)\n assert_eq(d, df, check_index=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_json.py_test_json_compressed_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_json.py_test_json_compressed_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_json.py", "file_name": "test_json.py", "file_type": "text/x-python", "category": "test", "start_line": 125, "end_line": 139, "span_ids": ["test_read_json_inferred_compression", "test_json_compressed"], "tokens": 135}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"compression\", [None, \"gzip\", \"xz\"])\ndef test_json_compressed(compression):\n with tmpdir() as path:\n dd.to_json(ddf, path, compression=compression)\n actual = dd.read_json(os.path.join(path, \"*\"), compression=compression)\n assert_eq(df, actual.compute(), check_index=False)\n\n\ndef test_read_json_inferred_compression():\n with tmpdir() as path:\n fn = os.path.join(path, \"*.json.gz\")\n dd.to_json(ddf, fn, compression=\"gzip\")\n actual = dd.read_json(fn)\n assert_eq(df, actual.compute(), check_index=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_orc.py_os_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_orc.py_os_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_orc.py", "file_name": "test_orc.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 74, "span_ids": ["orc_files", "imports", "test_orc_multiple", "test_orc_with_backend", "test_orc_single"], "tokens": 556}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import os\nimport shutil\nimport tempfile\nfrom distutils.version import LooseVersion\n\nimport pytest\n\nfrom dask.dataframe import read_orc\nfrom dask.dataframe.utils import assert_eq\nimport dask.dataframe as dd\n\npytest.importorskip(\"pyarrow.orc\")\n\n# Skip for broken ORC reader\nimport pyarrow as pa\n\npytestmark = pytest.mark.skipif(\n LooseVersion(pa.__version__) == \"0.10.0\",\n reason=(\n \"PyArrow 0.10.0 release broke the ORC reader, see \"\n \"https://issues.apache.org/jira/browse/ARROW-3009\"\n ),\n)\n\n\nurl = (\n \"https://www.googleapis.com/download/storage/v1/b/anaconda-public-data/o\"\n \"/orc%2FTestOrcFile.testDate1900.orc?generation=1522611448751555&alt=\"\n \"media\"\n)\ncolumns = [\"time\", \"date\"]\n\n\n@pytest.mark.network\ndef test_orc_with_backend():\n pytest.importorskip(\"requests\")\n d = read_orc(url)\n assert set(d.columns) == {\"time\", \"date\"} # order is not guaranteed\n assert len(d) == 70000\n\n\n@pytest.fixture(scope=\"module\")\ndef orc_files():\n requests = pytest.importorskip(\"requests\")\n data = requests.get(url).content\n d = tempfile.mkdtemp()\n files = [os.path.join(d, fn) for fn in [\"test1.orc\", \"test2.orc\"]]\n for fn in files:\n with open(fn, \"wb\") as f:\n f.write(data)\n try:\n yield files\n finally:\n shutil.rmtree(d, ignore_errors=True)\n\n\ndef test_orc_single(orc_files):\n fn = orc_files[0]\n d = read_orc(fn)\n assert len(d) == 70000\n assert d.npartitions == 8\n d2 = read_orc(fn, columns=[\"time\", \"date\"])\n assert_eq(d[columns], d2[columns])\n with pytest.raises(ValueError, match=\"nonexist\"):\n read_orc(fn, columns=[\"time\", \"nonexist\"])\n\n\ndef test_orc_multiple(orc_files):\n d = read_orc(orc_files[0])\n d2 = read_orc(orc_files)\n assert_eq(d2[columns], dd.concat([d, d])[columns], check_index=False)\n d2 = read_orc(os.path.dirname(orc_files[0]) + \"/*.orc\")\n assert_eq(d2[columns], dd.concat([d, d])[columns], check_index=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_math_engine.return.request_param": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_math_engine.return.request_param", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 91, "span_ids": ["impl:38", "check_fastparquet", "imports", "check_engine", "engine", "check_pyarrow"], "tokens": 637}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import math\nimport os\nimport sys\nimport warnings\nfrom distutils.version import LooseVersion\n\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nimport dask\nimport dask.multiprocessing\nimport dask.dataframe as dd\nfrom dask.blockwise import Blockwise, optimize_blockwise\nfrom dask.dataframe.utils import assert_eq, PANDAS_VERSION\nfrom dask.dataframe.io.parquet.utils import _parse_pandas_metadata\nfrom dask.dataframe.optimize import optimize_read_parquet_getitem\nfrom dask.dataframe.io.parquet.core import BlockwiseParquet, ParquetSubgraph\nfrom dask.utils import natural_sort_key, parse_bytes\n\n\ntry:\n import fastparquet\nexcept ImportError:\n fastparquet = False\n\n\ntry:\n import pyarrow as pa\nexcept ImportError:\n check_pa_divs = pa = False\n\ntry:\n import pyarrow.parquet as pq\nexcept ImportError:\n pq = False\n\n\nSKIP_FASTPARQUET = not fastparquet\nSKIP_FASTPARQUET_REASON = \"fastparquet not found\"\nFASTPARQUET_MARK = pytest.mark.skipif(SKIP_FASTPARQUET, reason=SKIP_FASTPARQUET_REASON)\n\nif pq and pa.__version__ < LooseVersion(\"0.13.1\"):\n SKIP_PYARROW = True\n SKIP_PYARROW_REASON = \"pyarrow >= 0.13.1 required for parquet\"\nelse:\n if sys.platform == \"win32\" and pa and pa.__version__ == LooseVersion(\"0.16.0\"):\n SKIP_PYARROW = True\n SKIP_PYARROW_REASON = \"https://github.com/dask/dask/issues/6093\"\n else:\n SKIP_PYARROW = not pq\n SKIP_PYARROW_REASON = \"pyarrow not found\"\nPYARROW_MARK = pytest.mark.skipif(SKIP_PYARROW, reason=SKIP_PYARROW_REASON)\n\n\ndef check_fastparquet():\n if SKIP_FASTPARQUET:\n pytest.skip(SKIP_FASTPARQUET_REASON)\n\n\ndef check_pyarrow():\n if SKIP_PYARROW:\n pytest.skip(SKIP_PYARROW_REASON)\n\n\ndef check_engine():\n if SKIP_FASTPARQUET and SKIP_PYARROW:\n pytest.skip(\"No parquet engine (fastparquet or pyarrow) found\")\n\n\nnrows = 40\nnpartitions = 15\ndf = pd.DataFrame(\n {\n \"x\": [i * 7 % 5 for i in range(nrows)], # Not sorted\n \"y\": [i * 2.5 for i in range(nrows)], # Sorted\n },\n index=pd.Index([10 * i for i in range(nrows)], name=\"myindex\"),\n)\n\nddf = dd.from_pandas(df, npartitions=npartitions)\n\n\n@pytest.fixture(\n params=[\n pytest.param(\"fastparquet\", marks=FASTPARQUET_MARK),\n pytest.param(\"pyarrow\", marks=PYARROW_MARK),\n ]\n)\ndef engine(request):\n return request.param", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_write_read_engines_write_read_engines.return.pytest_mark_parametrize_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_write_read_engines_write_read_engines.return.pytest_mark_parametrize_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 93, "end_line": 133, "span_ids": ["write_read_engines"], "tokens": 347}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def write_read_engines(**kwargs):\n \"\"\"Product of both engines for write/read:\n\n To add custom marks, pass keyword of the form: `mark_writer_reader=reason`,\n or `mark_engine=reason` to apply to all parameters with that engine.\"\"\"\n backends = {\"pyarrow\", \"fastparquet\"}\n marks = {(w, r): [] for w in backends for r in backends}\n\n # Skip if uninstalled\n for name, skip, reason in [\n (\"fastparquet\", SKIP_FASTPARQUET, SKIP_FASTPARQUET_REASON),\n (\"pyarrow\", SKIP_PYARROW, SKIP_PYARROW_REASON),\n ]:\n if skip:\n val = pytest.mark.skip(reason=reason)\n for k in marks:\n if name in k:\n marks[k].append(val)\n\n # Custom marks\n for kw, val in kwargs.items():\n kind, rest = kw.split(\"_\", 1)\n key = tuple(rest.split(\"_\"))\n if (\n kind not in (\"xfail\", \"skip\")\n or len(key) > 2\n or set(key).difference(backends)\n ):\n raise ValueError(\"unknown keyword %r\" % kw)\n val = getattr(pytest.mark, kind)(reason=val)\n if len(key) == 2:\n marks[key].append(val)\n else:\n for k in marks:\n if key in k:\n marks[k].append(val)\n\n return pytest.mark.parametrize(\n (\"write_engine\", \"read_engine\"),\n [pytest.param(*k, marks=tuple(v)) for (k, v) in sorted(marks.items())],\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_pyarrow_fastparquet_msg_test_local.for_column_in_df_columns_.assert_data_column_o": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_pyarrow_fastparquet_msg_test_local.for_column_in_df_columns_.assert_data_column_o", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 136, "end_line": 174, "span_ids": ["test_local", "impl:46"], "tokens": 330}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "pyarrow_fastparquet_msg = \"fastparquet fails reading pyarrow written directories\"\nwrite_read_engines_xfail = write_read_engines(\n xfail_pyarrow_fastparquet=pyarrow_fastparquet_msg\n)\n\nfp_pandas_msg = \"pandas with fastparquet engine does not preserve index\"\nfp_pandas_xfail = write_read_engines(xfail_fastparquet_pyarrow=fp_pandas_msg)\n\n\n@write_read_engines()\ndef test_local(tmpdir, write_engine, read_engine):\n tmp = str(tmpdir)\n data = pd.DataFrame(\n {\n \"i32\": np.arange(1000, dtype=np.int32),\n \"i64\": np.arange(1000, dtype=np.int64),\n \"f\": np.arange(1000, dtype=np.float64),\n \"bhello\": np.random.choice([\"hello\", \"yo\", \"people\"], size=1000).astype(\n \"O\"\n ),\n }\n )\n df = dd.from_pandas(data, chunksize=500)\n\n df.to_parquet(tmp, write_index=False, engine=write_engine)\n\n files = os.listdir(tmp)\n assert \"_common_metadata\" in files\n assert \"_metadata\" in files\n assert \"part.0.parquet\" in files\n\n df2 = dd.read_parquet(tmp, index=False, engine=read_engine)\n\n assert len(df2.divisions) > 1\n\n out = df2.compute(scheduler=\"sync\").reset_index()\n\n for column in df.columns:\n assert (data[column] == out[column]).all()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_empty_test_empty.assert_eq_ddf_read_df_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_empty_test_empty.assert_eq_ddf_read_df_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 177, "end_line": 188, "span_ids": ["test_empty"], "tokens": 146}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"index\", [False, True])\n@write_read_engines_xfail\ndef test_empty(tmpdir, write_engine, read_engine, index):\n fn = str(tmpdir)\n df = pd.DataFrame({\"a\": [\"a\", \"b\", \"b\"], \"b\": [4, 5, 6]})[:0]\n if index:\n df.set_index(\"a\", inplace=True, drop=True)\n ddf = dd.from_pandas(df, npartitions=2)\n\n ddf.to_parquet(fn, write_index=index, engine=write_engine)\n read_df = dd.read_parquet(fn, engine=read_engine)\n assert_eq(ddf, read_df)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_simple_test_simple.assert_eq_ddf_read_df_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_simple_test_simple.assert_eq_ddf_read_df_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 191, "end_line": 199, "span_ids": ["test_simple"], "tokens": 123}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@write_read_engines()\ndef test_simple(tmpdir, write_engine, read_engine):\n fn = str(tmpdir)\n df = pd.DataFrame({\"a\": [\"a\", \"b\", \"b\"], \"b\": [4, 5, 6]})\n df.set_index(\"a\", inplace=True, drop=True)\n ddf = dd.from_pandas(df, npartitions=2)\n ddf.to_parquet(fn, engine=write_engine)\n read_df = dd.read_parquet(fn, index=[\"a\"], engine=read_engine)\n assert_eq(ddf, read_df)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_delayed_no_metadata_test_delayed_no_metadata.assert_eq_ddf_read_df_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_delayed_no_metadata_test_delayed_no_metadata.assert_eq_ddf_read_df_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 202, "end_line": 220, "span_ids": ["test_delayed_no_metadata"], "tokens": 189}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@write_read_engines()\ndef test_delayed_no_metadata(tmpdir, write_engine, read_engine):\n fn = str(tmpdir)\n df = pd.DataFrame({\"a\": [\"a\", \"b\", \"b\"], \"b\": [4, 5, 6]})\n df.set_index(\"a\", inplace=True, drop=True)\n ddf = dd.from_pandas(df, npartitions=2)\n ddf.to_parquet(\n fn, engine=write_engine, compute=False, write_metadata_file=False\n ).compute()\n files = os.listdir(fn)\n assert \"_metadata\" not in files\n # Fastparquet doesn't currently handle a directory without \"_metadata\"\n read_df = dd.read_parquet(\n os.path.join(fn, \"*.parquet\"),\n index=[\"a\"],\n engine=read_engine,\n gather_statistics=True,\n )\n assert_eq(ddf, read_df)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_glob_test_read_glob.assert_eq_ddf_ddf2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_glob_test_read_glob.assert_eq_ddf_ddf2_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 223, "end_line": 238, "span_ids": ["test_read_glob"], "tokens": 143}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@write_read_engines()\ndef test_read_glob(tmpdir, write_engine, read_engine):\n tmp_path = str(tmpdir)\n ddf.to_parquet(tmp_path, engine=write_engine)\n if os.path.exists(os.path.join(tmp_path, \"_metadata\")):\n os.unlink(os.path.join(tmp_path, \"_metadata\"))\n files = os.listdir(tmp_path)\n assert \"_metadata\" not in files\n\n ddf2 = dd.read_parquet(\n os.path.join(tmp_path, \"*.parquet\"),\n engine=read_engine,\n index=\"myindex\", # Must specify index without _metadata\n gather_statistics=True,\n )\n assert_eq(ddf, ddf2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_list_test_read_list.assert_eq_ddf_ddf2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_list_test_read_list.assert_eq_ddf_ddf2_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 241, "end_line": 262, "span_ids": ["test_read_list"], "tokens": 171}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@write_read_engines()\ndef test_read_list(tmpdir, write_engine, read_engine):\n if write_engine == read_engine == \"fastparquet\" and os.name == \"nt\":\n # fastparquet or dask is not normalizing filepaths correctly on\n # windows.\n pytest.skip(\"filepath bug.\")\n\n tmpdir = str(tmpdir)\n ddf.to_parquet(tmpdir, engine=write_engine)\n files = sorted(\n [\n os.path.join(tmpdir, f)\n for f in os.listdir(tmpdir)\n if not f.endswith(\"_metadata\")\n ],\n key=natural_sort_key,\n )\n\n ddf2 = dd.read_parquet(\n files, engine=read_engine, index=\"myindex\", gather_statistics=True\n )\n assert_eq(ddf, ddf2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_columns_auto_index_test_columns_auto_index.None_4": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_columns_auto_index_test_columns_auto_index.None_4", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 265, "end_line": 291, "span_ids": ["test_columns_auto_index"], "tokens": 223}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@write_read_engines()\ndef test_columns_auto_index(tmpdir, write_engine, read_engine):\n fn = str(tmpdir)\n ddf.to_parquet(fn, engine=write_engine)\n\n # XFAIL, auto index selection not longer supported (for simplicity)\n # ### Emtpy columns ###\n # With divisions if supported\n assert_eq(dd.read_parquet(fn, columns=[], engine=read_engine), ddf[[]])\n\n # No divisions\n assert_eq(\n dd.read_parquet(fn, columns=[], engine=read_engine, gather_statistics=False),\n ddf[[]].clear_divisions(),\n check_divisions=True,\n )\n\n # ### Single column, auto select index ###\n # With divisions if supported\n assert_eq(dd.read_parquet(fn, columns=[\"x\"], engine=read_engine), ddf[[\"x\"]])\n\n # No divisions\n assert_eq(\n dd.read_parquet(fn, columns=[\"x\"], engine=read_engine, gather_statistics=False),\n ddf[[\"x\"]].clear_divisions(),\n check_divisions=True,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_columns_index_test_columns_index.None_6": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_columns_index_test_columns_index.None_6", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 294, "end_line": 354, "span_ids": ["test_columns_index"], "tokens": 365}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@write_read_engines()\ndef test_columns_index(tmpdir, write_engine, read_engine):\n fn = str(tmpdir)\n ddf.to_parquet(fn, engine=write_engine)\n\n # With Index\n # ----------\n # ### Emtpy columns, specify index ###\n # With divisions if supported\n assert_eq(\n dd.read_parquet(fn, columns=[], engine=read_engine, index=\"myindex\"), ddf[[]]\n )\n\n # No divisions\n assert_eq(\n dd.read_parquet(\n fn, columns=[], engine=read_engine, index=\"myindex\", gather_statistics=False\n ),\n ddf[[]].clear_divisions(),\n check_divisions=True,\n )\n\n # ### Single column, specify index ###\n # With divisions if supported\n assert_eq(\n dd.read_parquet(fn, index=\"myindex\", columns=[\"x\"], engine=read_engine),\n ddf[[\"x\"]],\n )\n\n # No divisions\n assert_eq(\n dd.read_parquet(\n fn,\n index=\"myindex\",\n columns=[\"x\"],\n engine=read_engine,\n gather_statistics=False,\n ),\n ddf[[\"x\"]].clear_divisions(),\n check_divisions=True,\n )\n\n # ### Two columns, specify index ###\n # With divisions if supported\n assert_eq(\n dd.read_parquet(fn, index=\"myindex\", columns=[\"x\", \"y\"], engine=read_engine),\n ddf,\n )\n\n # No divisions\n assert_eq(\n dd.read_parquet(\n fn,\n index=\"myindex\",\n columns=[\"x\", \"y\"],\n engine=read_engine,\n gather_statistics=False,\n ),\n ddf.clear_divisions(),\n check_divisions=True,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_nonsense_column_test_gather_statistics_no_index.assert_not_df_known_divis": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_nonsense_column_test_gather_statistics_no_index.assert_not_df_known_divis", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 357, "end_line": 418, "span_ids": ["test_columns_no_index", "test_gather_statistics_no_index", "test_nonsense_column"], "tokens": 418}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_nonsense_column(tmpdir, engine):\n fn = str(tmpdir)\n ddf.to_parquet(fn, engine=engine)\n with pytest.raises((ValueError, KeyError)):\n dd.read_parquet(fn, columns=[\"nonesense\"], engine=engine)\n with pytest.raises((Exception, KeyError)):\n dd.read_parquet(fn, columns=[\"nonesense\"] + list(ddf.columns), engine=engine)\n\n\n@write_read_engines()\ndef test_columns_no_index(tmpdir, write_engine, read_engine):\n fn = str(tmpdir)\n ddf.to_parquet(fn, engine=write_engine)\n ddf2 = ddf.reset_index()\n\n # No Index\n # --------\n # All columns, none as index\n assert_eq(\n dd.read_parquet(fn, index=False, engine=read_engine, gather_statistics=True),\n ddf2,\n check_index=False,\n check_divisions=True,\n )\n\n # Two columns, none as index\n assert_eq(\n dd.read_parquet(\n fn,\n index=False,\n columns=[\"x\", \"y\"],\n engine=read_engine,\n gather_statistics=True,\n ),\n ddf2[[\"x\", \"y\"]],\n check_index=False,\n check_divisions=True,\n )\n\n # One column and one index, all as columns\n assert_eq(\n dd.read_parquet(\n fn,\n index=False,\n columns=[\"myindex\", \"x\"],\n engine=read_engine,\n gather_statistics=True,\n ),\n ddf2[[\"myindex\", \"x\"]],\n check_index=False,\n check_divisions=True,\n )\n\n\n@write_read_engines()\ndef test_gather_statistics_no_index(tmpdir, write_engine, read_engine):\n fn = str(tmpdir)\n ddf.to_parquet(fn, engine=write_engine, write_index=False)\n\n df = dd.read_parquet(fn, engine=read_engine, index=False)\n assert df.index.name is None\n assert not df.known_divisions", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_columns_index_with_multi_index_test_columns_index_with_multi_index.for_ind_col_sol_df_in_.assert_eq_d_sol_df_col_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_columns_index_with_multi_index_test_columns_index_with_multi_index.for_ind_col_sol_df_in_.assert_eq_d_sol_df_col_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 421, "end_line": 485, "span_ids": ["test_columns_index_with_multi_index"], "tokens": 734}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_columns_index_with_multi_index(tmpdir, engine):\n fn = os.path.join(str(tmpdir), \"test.parquet\")\n index = pd.MultiIndex.from_arrays(\n [np.arange(10), np.arange(10) + 1], names=[\"x0\", \"x1\"]\n )\n df = pd.DataFrame(np.random.randn(10, 2), columns=[\"a\", \"b\"], index=index)\n df2 = df.reset_index(drop=False)\n\n if engine == \"fastparquet\":\n fastparquet.write(fn, df.reset_index(), write_index=False)\n\n # fastparquet doesn't support multi-index\n with pytest.raises(ValueError):\n ddf = dd.read_parquet(fn, engine=engine, index=index.names)\n\n else:\n pq.write_table(pa.Table.from_pandas(df.reset_index(), preserve_index=False), fn)\n\n # Pyarrow supports multi-index reads\n ddf = dd.read_parquet(fn, engine=engine, index=index.names)\n assert_eq(ddf, df)\n\n d = dd.read_parquet(fn, columns=\"a\", engine=engine, index=index.names)\n assert_eq(d, df[\"a\"])\n\n d = dd.read_parquet(fn, index=[\"a\", \"b\"], columns=[\"x0\", \"x1\"], engine=engine)\n assert_eq(d, df2.set_index([\"a\", \"b\"])[[\"x0\", \"x1\"]])\n\n # Just index\n d = dd.read_parquet(fn, index=False, engine=engine)\n assert_eq(d, df2)\n\n d = dd.read_parquet(fn, columns=[\"b\"], index=[\"a\"], engine=engine)\n assert_eq(d, df2.set_index(\"a\")[[\"b\"]])\n\n d = dd.read_parquet(fn, columns=[\"a\", \"b\"], index=[\"x0\"], engine=engine)\n assert_eq(d, df2.set_index(\"x0\")[[\"a\", \"b\"]])\n\n # Just columns\n d = dd.read_parquet(fn, columns=[\"x0\", \"a\"], index=[\"x1\"], engine=engine)\n assert_eq(d, df2.set_index(\"x1\")[[\"x0\", \"a\"]])\n\n # Both index and columns\n d = dd.read_parquet(fn, index=False, columns=[\"x0\", \"b\"], engine=engine)\n assert_eq(d, df2[[\"x0\", \"b\"]])\n\n for index in [\"x1\", \"b\"]:\n d = dd.read_parquet(fn, index=index, columns=[\"x0\", \"a\"], engine=engine)\n assert_eq(d, df2.set_index(index)[[\"x0\", \"a\"]])\n\n # Columns and index intersect\n for index in [\"a\", \"x0\"]:\n with pytest.raises(ValueError):\n d = dd.read_parquet(fn, index=index, columns=[\"x0\", \"a\"], engine=engine)\n\n # Series output\n for ind, col, sol_df in [\n (\"x1\", \"x0\", df2.set_index(\"x1\")),\n (False, \"b\", df2),\n (False, \"x0\", df2[[\"x0\"]]),\n (\"a\", \"x0\", df2.set_index(\"a\")[[\"x0\"]]),\n (\"a\", \"b\", df2.set_index(\"a\")),\n ]:\n d = dd.read_parquet(fn, index=ind, columns=col, engine=engine)\n assert_eq(d, sol_df[col])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_no_index_test_read_series.assert_eq_ddf_x_ddf2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_no_index_test_read_series.assert_eq_ddf_x_ddf2_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 488, "end_line": 505, "span_ids": ["test_read_series", "test_no_index"], "tokens": 213}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@write_read_engines()\ndef test_no_index(tmpdir, write_engine, read_engine):\n fn = str(tmpdir)\n df = pd.DataFrame({\"a\": [1, 2, 3], \"b\": [4, 5, 6]})\n ddf = dd.from_pandas(df, npartitions=2)\n ddf.to_parquet(fn, engine=write_engine)\n ddf2 = dd.read_parquet(fn, engine=read_engine)\n assert_eq(df, ddf2, check_index=False)\n\n\ndef test_read_series(tmpdir, engine):\n fn = str(tmpdir)\n ddf.to_parquet(fn, engine=engine)\n ddf2 = dd.read_parquet(fn, columns=[\"x\"], index=\"myindex\", engine=engine)\n assert_eq(ddf[[\"x\"]], ddf2)\n\n ddf2 = dd.read_parquet(fn, columns=\"x\", index=\"myindex\", engine=engine)\n assert_eq(ddf.x, ddf2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_names_test_names.assert_set_read_fn_colum": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_names_test_names.assert_set_read_fn_colum", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 508, "end_line": 519, "span_ids": ["test_names"], "tokens": 114}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_names(tmpdir, engine):\n fn = str(tmpdir)\n ddf.to_parquet(fn, engine=engine)\n\n def read(fn, **kwargs):\n return dd.read_parquet(fn, engine=engine, **kwargs)\n\n assert set(read(fn).dask) == set(read(fn).dask)\n\n assert set(read(fn).dask) != set(read(fn, columns=[\"x\"]).dask)\n\n assert set(read(fn, columns=(\"x\",)).dask) == set(read(fn, columns=[\"x\"]).dask)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_roundtrip_from_pandas_test_roundtrip_from_pandas.assert_eq_dfp_ddf_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_roundtrip_from_pandas_test_roundtrip_from_pandas.assert_eq_dfp_ddf_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 522, "end_line": 532, "span_ids": ["test_roundtrip_from_pandas"], "tokens": 120}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n PANDAS_VERSION < \"0.22.0\", reason=\"new pyarrow assumes new-ish pandas versions\"\n)\n@write_read_engines()\ndef test_roundtrip_from_pandas(tmpdir, write_engine, read_engine):\n fn = str(tmpdir.join(\"test.parquet\"))\n dfp = df.copy()\n dfp.index.name = \"index\"\n dfp.to_parquet(fn, engine=write_engine)\n ddf = dd.read_parquet(fn, index=\"index\", engine=read_engine)\n assert_eq(dfp, ddf)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_categorical_test_categorical.assert_df_x_ddf2_x_a": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_categorical_test_categorical.assert_df_x_ddf2_x_a", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 535, "end_line": 560, "span_ids": ["test_categorical"], "tokens": 282}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@write_read_engines()\ndef test_categorical(tmpdir, write_engine, read_engine):\n tmp = str(tmpdir)\n df = pd.DataFrame({\"x\": [\"a\", \"b\", \"c\"] * 100}, dtype=\"category\")\n ddf = dd.from_pandas(df, npartitions=3)\n dd.to_parquet(ddf, tmp, engine=write_engine)\n\n ddf2 = dd.read_parquet(tmp, categories=\"x\", engine=read_engine)\n assert ddf2.compute().x.cat.categories.tolist() == [\"a\", \"b\", \"c\"]\n\n ddf2 = dd.read_parquet(tmp, categories=[\"x\"], engine=read_engine)\n assert ddf2.compute().x.cat.categories.tolist() == [\"a\", \"b\", \"c\"]\n\n # autocat\n if read_engine != \"pyarrow\":\n ddf2 = dd.read_parquet(tmp, engine=read_engine)\n assert ddf2.compute().x.cat.categories.tolist() == [\"a\", \"b\", \"c\"]\n\n ddf2.loc[:1000].compute()\n assert assert_eq(df, ddf2)\n\n # dereference cats\n ddf2 = dd.read_parquet(tmp, categories=[], engine=read_engine)\n\n ddf2.loc[:1000].compute()\n assert (df.x == ddf2.x).all()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_append_test_append.assert_eq_df_ddf3_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_append_test_append.assert_eq_df_ddf3_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 563, "end_line": 586, "span_ids": ["test_append"], "tokens": 232}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_append(tmpdir, engine):\n \"\"\"Test that appended parquet equal to the original one.\"\"\"\n check_fastparquet()\n tmp = str(tmpdir)\n df = pd.DataFrame(\n {\n \"i32\": np.arange(1000, dtype=np.int32),\n \"i64\": np.arange(1000, dtype=np.int64),\n \"f\": np.arange(1000, dtype=np.float64),\n \"bhello\": np.random.choice([\"hello\", \"yo\", \"people\"], size=1000).astype(\n \"O\"\n ),\n }\n )\n df.index.name = \"index\"\n\n half = len(df) // 2\n ddf1 = dd.from_pandas(df.iloc[:half], chunksize=100)\n ddf2 = dd.from_pandas(df.iloc[half:], chunksize=100)\n ddf1.to_parquet(tmp, engine=engine)\n ddf2.to_parquet(tmp, append=True, engine=engine)\n\n ddf3 = dd.read_parquet(tmp, engine=engine)\n assert_eq(df, ddf3)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_append_create_test_append_create.assert_eq_df_ddf3_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_append_create_test_append_create.assert_eq_df_ddf3_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 589, "end_line": 611, "span_ids": ["test_append_create"], "tokens": 234}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_append_create(tmpdir, engine):\n \"\"\"Test that appended parquet equal to the original one.\"\"\"\n tmp_path = str(tmpdir)\n df = pd.DataFrame(\n {\n \"i32\": np.arange(1000, dtype=np.int32),\n \"i64\": np.arange(1000, dtype=np.int64),\n \"f\": np.arange(1000, dtype=np.float64),\n \"bhello\": np.random.choice([\"hello\", \"yo\", \"people\"], size=1000).astype(\n \"O\"\n ),\n }\n )\n df.index.name = \"index\"\n\n half = len(df) // 2\n ddf1 = dd.from_pandas(df.iloc[:half], chunksize=100)\n ddf2 = dd.from_pandas(df.iloc[half:], chunksize=100)\n ddf1.to_parquet(tmp_path, append=True, engine=engine)\n ddf2.to_parquet(tmp_path, append=True, engine=engine)\n\n ddf3 = dd.read_parquet(tmp_path, engine=engine)\n assert_eq(df, ddf3)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_append_with_partition_test_append_with_partition.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_append_with_partition_test_append_with_partition.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 614, "end_line": 652, "span_ids": ["test_append_with_partition"], "tokens": 319}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_append_with_partition(tmpdir, engine):\n # check_fastparquet()\n tmp = str(tmpdir)\n df0 = pd.DataFrame(\n {\n \"lat\": np.arange(0, 10),\n \"lon\": np.arange(10, 20),\n \"value\": np.arange(100, 110),\n }\n )\n df0.index.name = \"index\"\n df1 = pd.DataFrame(\n {\n \"lat\": np.arange(10, 20),\n \"lon\": np.arange(10, 20),\n \"value\": np.arange(120, 130),\n }\n )\n df1.index.name = \"index\"\n dd_df0 = dd.from_pandas(df0, npartitions=1)\n dd_df1 = dd.from_pandas(df1, npartitions=1)\n dd.to_parquet(dd_df0, tmp, partition_on=[\"lon\"], engine=engine)\n dd.to_parquet(\n dd_df1,\n tmp,\n partition_on=[\"lon\"],\n append=True,\n ignore_divisions=True,\n engine=engine,\n )\n\n out = dd.read_parquet(\n tmp, engine=engine, index=\"index\", gather_statistics=True\n ).compute()\n out[\"lon\"] = out.lon.astype(\"int\") # just to pass assert\n # sort required since partitioning breaks index order\n assert_eq(\n out.sort_values(\"value\"), pd.concat([df0, df1])[out.columns], check_index=False\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_partition_on_cats_test_partition_on_cats.assert_set_df_b_cat_categ": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_partition_on_cats_test_partition_on_cats.assert_set_df_b_cat_categ", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 655, "end_line": 667, "span_ids": ["test_partition_on_cats"], "tokens": 138}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_partition_on_cats(tmpdir, engine):\n tmp = str(tmpdir)\n d = pd.DataFrame(\n {\n \"a\": np.random.rand(50),\n \"b\": np.random.choice([\"x\", \"y\", \"z\"], size=50),\n \"c\": np.random.choice([\"x\", \"y\", \"z\"], size=50),\n }\n )\n d = dd.from_pandas(d, 2)\n d.to_parquet(tmp, partition_on=[\"b\"], engine=engine)\n df = dd.read_parquet(tmp, engine=engine)\n assert set(df.b.cat.categories) == {\"x\", \"y\", \"z\"}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_partition_on_cats_pyarrow_test_partition_on_cats_pyarrow.assert_set_df_b_cat_categ": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_partition_on_cats_pyarrow_test_partition_on_cats_pyarrow.assert_set_df_b_cat_categ", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 670, "end_line": 686, "span_ids": ["test_partition_on_cats_pyarrow"], "tokens": 182}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"meta\", [False, True])\n@pytest.mark.parametrize(\"stats\", [False, True])\ndef test_partition_on_cats_pyarrow(tmpdir, stats, meta):\n check_pyarrow()\n\n tmp = str(tmpdir)\n d = pd.DataFrame(\n {\n \"a\": np.random.rand(50),\n \"b\": np.random.choice([\"x\", \"y\", \"z\"], size=50),\n \"c\": np.random.choice([\"x\", \"y\", \"z\"], size=50),\n }\n )\n d = dd.from_pandas(d, 2)\n d.to_parquet(tmp, partition_on=[\"b\"], engine=\"pyarrow\", write_metadata_file=meta)\n df = dd.read_parquet(tmp, engine=\"pyarrow\", gather_statistics=stats)\n assert set(df.b.cat.categories) == {\"x\", \"y\", \"z\"}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_partition_on_cats_2_test_partition_on_cats_2.assert_set_df_cat_categor": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_partition_on_cats_2_test_partition_on_cats_2.assert_set_df_cat_categor", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 689, "end_line": 713, "span_ids": ["test_partition_on_cats_2"], "tokens": 299}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_partition_on_cats_2(tmpdir, engine):\n tmp = str(tmpdir)\n d = pd.DataFrame(\n {\n \"a\": np.random.rand(50),\n \"b\": np.random.choice([\"x\", \"y\", \"z\"], size=50),\n \"c\": np.random.choice([\"x\", \"y\", \"z\"], size=50),\n }\n )\n d = dd.from_pandas(d, 2)\n d.to_parquet(tmp, partition_on=[\"b\", \"c\"], engine=engine)\n df = dd.read_parquet(tmp, engine=engine)\n assert set(df.b.cat.categories) == {\"x\", \"y\", \"z\"}\n assert set(df.c.cat.categories) == {\"x\", \"y\", \"z\"}\n\n df = dd.read_parquet(tmp, columns=[\"a\", \"c\"], engine=engine)\n assert set(df.c.cat.categories) == {\"x\", \"y\", \"z\"}\n assert \"b\" not in df.columns\n assert_eq(df, df.compute())\n df = dd.read_parquet(tmp, index=\"c\", engine=engine)\n assert set(df.index.categories) == {\"x\", \"y\", \"z\"}\n assert \"c\" not in df.columns\n # series\n df = dd.read_parquet(tmp, columns=\"b\", engine=engine)\n assert set(df.cat.categories) == {\"x\", \"y\", \"z\"}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_append_wo_index_test_append_wo_index.assert_eq_df_set_index_f": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_append_wo_index_test_append_wo_index.assert_eq_df_set_index_f", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 716, "end_line": 743, "span_ids": ["test_append_wo_index"], "tokens": 307}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_append_wo_index(tmpdir, engine):\n \"\"\"Test append with write_index=False.\"\"\"\n tmp = str(tmpdir.join(\"tmp1.parquet\"))\n df = pd.DataFrame(\n {\n \"i32\": np.arange(1000, dtype=np.int32),\n \"i64\": np.arange(1000, dtype=np.int64),\n \"f\": np.arange(1000, dtype=np.float64),\n \"bhello\": np.random.choice([\"hello\", \"yo\", \"people\"], size=1000).astype(\n \"O\"\n ),\n }\n )\n half = len(df) // 2\n ddf1 = dd.from_pandas(df.iloc[:half], chunksize=100)\n ddf2 = dd.from_pandas(df.iloc[half:], chunksize=100)\n ddf1.to_parquet(tmp, engine=engine)\n\n with pytest.raises(ValueError) as excinfo:\n ddf2.to_parquet(tmp, write_index=False, append=True, engine=engine)\n assert \"Appended columns\" in str(excinfo.value)\n\n tmp = str(tmpdir.join(\"tmp2.parquet\"))\n ddf1.to_parquet(tmp, write_index=False, engine=engine)\n ddf2.to_parquet(tmp, write_index=False, append=True, engine=engine)\n\n ddf3 = dd.read_parquet(tmp, index=\"f\", engine=engine)\n assert_eq(df.set_index(\"f\"), ddf3)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_append_overlapping_divisions_test_append_overlapping_divisions.ddf2_to_parquet_tmp_engi": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_append_overlapping_divisions_test_append_overlapping_divisions.ddf2_to_parquet_tmp_engi", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 746, "end_line": 768, "span_ids": ["test_append_overlapping_divisions"], "tokens": 242}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_append_overlapping_divisions(tmpdir, engine):\n \"\"\"Test raising of error when divisions overlapping.\"\"\"\n tmp = str(tmpdir)\n df = pd.DataFrame(\n {\n \"i32\": np.arange(1000, dtype=np.int32),\n \"i64\": np.arange(1000, dtype=np.int64),\n \"f\": np.arange(1000, dtype=np.float64),\n \"bhello\": np.random.choice([\"hello\", \"yo\", \"people\"], size=1000).astype(\n \"O\"\n ),\n }\n )\n half = len(df) // 2\n ddf1 = dd.from_pandas(df.iloc[:half], chunksize=100)\n ddf2 = dd.from_pandas(df.iloc[half - 10 :], chunksize=100)\n ddf1.to_parquet(tmp, engine=engine)\n\n with pytest.raises(ValueError) as excinfo:\n ddf2.to_parquet(tmp, engine=engine, append=True)\n assert \"Appended divisions\" in str(excinfo.value)\n\n ddf2.to_parquet(tmp, engine=engine, append=True, ignore_divisions=True)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_append_different_columns_test_append_different_columns.assert_Appended_dtypes_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_append_different_columns_test_append_different_columns.assert_Appended_dtypes_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 771, "end_line": 790, "span_ids": ["test_append_different_columns"], "tokens": 232}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_append_different_columns(tmpdir, engine):\n \"\"\"Test raising of error when non equal columns.\"\"\"\n tmp = str(tmpdir)\n df1 = pd.DataFrame({\"i32\": np.arange(100, dtype=np.int32)})\n df2 = pd.DataFrame({\"i64\": np.arange(100, dtype=np.int64)})\n df3 = pd.DataFrame({\"i32\": np.arange(100, dtype=np.int64)})\n\n ddf1 = dd.from_pandas(df1, chunksize=2)\n ddf2 = dd.from_pandas(df2, chunksize=2)\n ddf3 = dd.from_pandas(df3, chunksize=2)\n\n ddf1.to_parquet(tmp, engine=engine)\n\n with pytest.raises(ValueError) as excinfo:\n ddf2.to_parquet(tmp, engine=engine, append=True)\n assert \"Appended columns\" in str(excinfo.value)\n\n with pytest.raises(ValueError) as excinfo:\n ddf3.to_parquet(tmp, engine=engine, append=True)\n assert \"Appended dtypes\" in str(excinfo.value)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_ordering_test_ordering.assert_eq_ddf_ddf2_chec": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_ordering_test_ordering.assert_eq_ddf_ddf2_chec", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 793, "end_line": 809, "span_ids": ["test_ordering"], "tokens": 207}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@write_read_engines_xfail\ndef test_ordering(tmpdir, write_engine, read_engine):\n tmp = str(tmpdir)\n df = pd.DataFrame(\n {\"a\": [1, 2, 3], \"b\": [10, 20, 30], \"c\": [100, 200, 300]},\n index=pd.Index([-1, -2, -3], name=\"myindex\"),\n columns=[\"c\", \"a\", \"b\"],\n )\n ddf = dd.from_pandas(df, npartitions=2)\n dd.to_parquet(ddf, tmp, engine=write_engine)\n\n if read_engine == \"fastparquet\":\n pf = fastparquet.ParquetFile(tmp)\n assert pf.columns == [\"myindex\", \"c\", \"a\", \"b\"]\n\n ddf2 = dd.read_parquet(tmp, index=\"myindex\", engine=read_engine)\n assert_eq(ddf, ddf2, check_divisions=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_parquet_custom_columns_test_read_parquet_custom_columns.assert_eq_df_f_i32_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_parquet_custom_columns_test_read_parquet_custom_columns.assert_eq_df_f_i32_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 812, "end_line": 833, "span_ids": ["test_read_parquet_custom_columns"], "tokens": 245}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_parquet_custom_columns(tmpdir, engine):\n import glob\n\n tmp = str(tmpdir)\n data = pd.DataFrame(\n {\"i32\": np.arange(1000, dtype=np.int32), \"f\": np.arange(1000, dtype=np.float64)}\n )\n df = dd.from_pandas(data, chunksize=50)\n df.to_parquet(tmp, engine=engine)\n\n df2 = dd.read_parquet(tmp, columns=[\"i32\", \"f\"], engine=engine)\n assert_eq(df[[\"i32\", \"f\"]], df2, check_index=False)\n\n import glob\n\n fns = glob.glob(os.path.join(tmp, \"*.parquet\"))\n df2 = dd.read_parquet(fns, columns=[\"i32\"], engine=engine).compute()\n df2.sort_values(\"i32\", inplace=True)\n assert_eq(df[[\"i32\"]], df2, check_index=False, check_divisions=False)\n\n df3 = dd.read_parquet(tmp, columns=[\"f\", \"i32\"], engine=engine)\n assert_eq(df[[\"f\", \"i32\"]], df3, check_index=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_roundtrip_test_roundtrip.assert_eq_ddf_ddf2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_roundtrip_test_roundtrip.assert_eq_ddf_ddf2_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 836, "end_line": 886, "span_ids": ["test_roundtrip"], "tokens": 770}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"df,write_kwargs,read_kwargs\",\n [\n (pd.DataFrame({\"x\": [3, 2, 1]}), {}, {}),\n (pd.DataFrame({\"x\": [\"c\", \"a\", \"b\"]}), {}, {}),\n (pd.DataFrame({\"x\": [\"cc\", \"a\", \"bbb\"]}), {}, {}),\n (pd.DataFrame({\"x\": [b\"a\", b\"b\", b\"c\"]}), {\"object_encoding\": \"bytes\"}, {}),\n (\n pd.DataFrame({\"x\": pd.Categorical([\"a\", \"b\", \"a\"])}),\n {},\n {\"categories\": [\"x\"]},\n ),\n (pd.DataFrame({\"x\": pd.Categorical([1, 2, 1])}), {}, {\"categories\": [\"x\"]}),\n (pd.DataFrame({\"x\": list(map(pd.Timestamp, [3000, 2000, 1000]))}), {}, {}),\n (pd.DataFrame({\"x\": [3000, 2000, 1000]}).astype(\"M8[ns]\"), {}, {}),\n pytest.param(\n pd.DataFrame({\"x\": [3, 2, 1]}).astype(\"M8[ns]\"),\n {},\n {},\n marks=pytest.mark.xfail(\n reason=\"Parquet doesn't support nanosecond precision\"\n ),\n ),\n (pd.DataFrame({\"x\": [3, 2, 1]}).astype(\"M8[us]\"), {}, {}),\n (pd.DataFrame({\"x\": [3, 2, 1]}).astype(\"M8[ms]\"), {}, {}),\n (pd.DataFrame({\"x\": [3, 2, 1]}).astype(\"uint16\"), {}, {}),\n (pd.DataFrame({\"x\": [3, 2, 1]}).astype(\"float32\"), {}, {}),\n (pd.DataFrame({\"x\": [3, 1, 2]}, index=[3, 2, 1]), {}, {}),\n (pd.DataFrame({\"x\": [3, 1, 5]}, index=pd.Index([1, 2, 3], name=\"foo\")), {}, {}),\n (pd.DataFrame({\"x\": [1, 2, 3], \"y\": [3, 2, 1]}), {}, {}),\n (pd.DataFrame({\"x\": [1, 2, 3], \"y\": [3, 2, 1]}, columns=[\"y\", \"x\"]), {}, {}),\n (pd.DataFrame({\"0\": [3, 2, 1]}), {}, {}),\n (pd.DataFrame({\"x\": [3, 2, None]}), {}, {}),\n (pd.DataFrame({\"-\": [3.0, 2.0, None]}), {}, {}),\n (pd.DataFrame({\".\": [3.0, 2.0, None]}), {}, {}),\n (pd.DataFrame({\" \": [3.0, 2.0, None]}), {}, {}),\n ],\n)\ndef test_roundtrip(tmpdir, df, write_kwargs, read_kwargs, engine):\n tmp = str(tmpdir)\n if df.index.name is None:\n df.index.name = \"index\"\n ddf = dd.from_pandas(df, npartitions=2)\n\n oe = write_kwargs.pop(\"object_encoding\", None)\n if oe and engine == \"fastparquet\":\n dd.to_parquet(ddf, tmp, engine=engine, object_encoding=oe, **write_kwargs)\n else:\n dd.to_parquet(ddf, tmp, engine=engine, **write_kwargs)\n ddf2 = dd.read_parquet(tmp, index=df.index.name, engine=engine, **read_kwargs)\n assert_eq(ddf, ddf2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_categories_test_categories.with_pytest_raises_Value.ddf2.dd_read_parquet_fn_categ": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_categories_test_categories.with_pytest_raises_Value.ddf2.dd_read_parquet_fn_categ", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 889, "end_line": 915, "span_ids": ["test_categories"], "tokens": 330}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_categories(tmpdir, engine):\n fn = str(tmpdir)\n df = pd.DataFrame({\"x\": [1, 2, 3, 4, 5], \"y\": list(\"caaab\")})\n ddf = dd.from_pandas(df, npartitions=2)\n ddf[\"y\"] = ddf.y.astype(\"category\")\n ddf.to_parquet(fn, engine=engine)\n ddf2 = dd.read_parquet(fn, categories=[\"y\"], engine=engine)\n\n # Shouldn't need to specify categories explicitly\n ddf3 = dd.read_parquet(fn, engine=engine)\n assert_eq(ddf3, ddf2)\n\n with pytest.raises(NotImplementedError):\n ddf2.y.cat.categories\n assert set(ddf2.y.compute().cat.categories) == {\"a\", \"b\", \"c\"}\n cats_set = ddf2.map_partitions(lambda x: x.y.cat.categories.sort_values()).compute()\n assert cats_set.tolist() == [\"a\", \"c\", \"a\", \"b\"]\n\n if engine == \"fastparquet\":\n assert_eq(ddf.y, ddf2.y, check_names=False)\n with pytest.raises(TypeError):\n # attempt to load as category that which is not so encoded\n ddf2 = dd.read_parquet(fn, categories=[\"x\"], engine=engine).compute()\n\n with pytest.raises((ValueError, FutureWarning)):\n # attempt to load as category unknown column\n ddf2 = dd.read_parquet(fn, categories=[\"foo\"], engine=engine)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_empty_partition_test_empty_partition.assert_eq_sol_ddf3_chec": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_empty_partition_test_empty_partition.assert_eq_sol_ddf3_chec", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 918, "end_line": 929, "span_ids": ["test_empty_partition"], "tokens": 130}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_empty_partition(tmpdir, engine):\n fn = str(tmpdir)\n df = pd.DataFrame({\"a\": range(10), \"b\": range(10)})\n ddf = dd.from_pandas(df, npartitions=5)\n\n ddf2 = ddf[ddf.a <= 5]\n ddf2.to_parquet(fn, engine=engine)\n\n ddf3 = dd.read_parquet(fn, engine=engine)\n assert ddf3.npartitions < 5\n sol = ddf2.compute()\n assert_eq(sol, ddf3, check_names=False, check_index=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_timestamp_index_test_to_parquet_default_writes_nulls.assert_table_1_null_coun": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_timestamp_index_test_to_parquet_default_writes_nulls.assert_table_1_null_coun", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 932, "end_line": 952, "span_ids": ["test_to_parquet_default_writes_nulls", "test_timestamp_index"], "tokens": 192}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_timestamp_index(tmpdir, engine):\n fn = str(tmpdir)\n df = dd._compat.makeTimeDataFrame()\n df.index.name = \"foo\"\n ddf = dd.from_pandas(df, npartitions=5)\n ddf.to_parquet(fn, engine=engine)\n ddf2 = dd.read_parquet(fn, engine=engine)\n assert_eq(ddf, ddf2)\n\n\ndef test_to_parquet_default_writes_nulls(tmpdir):\n check_fastparquet()\n check_pyarrow()\n fn = str(tmpdir.join(\"test.parquet\"))\n\n df = pd.DataFrame({\"c1\": [1.0, np.nan, 2, np.nan, 3]})\n ddf = dd.from_pandas(df, npartitions=1)\n\n ddf.to_parquet(fn)\n table = pq.read_table(fn)\n assert table[1].null_count == 2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_to_parquet_pyarrow_w_inconsistent_schema_by_partition_fails_by_default_test_to_parquet_pyarrow_w_inconsistent_schema_by_partition_fails_by_default.with_pytest_raises_ValueE.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_to_parquet_pyarrow_w_inconsistent_schema_by_partition_fails_by_default_test_to_parquet_pyarrow_w_inconsistent_schema_by_partition_fails_by_default.with_pytest_raises_ValueE.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 955, "end_line": 987, "span_ids": ["test_to_parquet_pyarrow_w_inconsistent_schema_by_partition_fails_by_default"], "tokens": 287}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_parquet_pyarrow_w_inconsistent_schema_by_partition_fails_by_default(tmpdir):\n check_pyarrow()\n\n df = pd.DataFrame(\n {\"partition_column\": [0, 0, 1, 1], \"strings\": [\"a\", \"b\", None, None]}\n )\n\n ddf = dd.from_pandas(df, npartitions=2)\n # In order to allow pyarrow to write an inconsistent schema,\n # we need to avoid writing the _metadata file (will fail >0.17.1)\n # and need to avoid schema inference (i.e. use `schema=None`)\n ddf.to_parquet(\n str(tmpdir),\n engine=\"pyarrow\",\n partition_on=[\"partition_column\"],\n write_metadata_file=False,\n schema=None,\n )\n\n # Test that schema is not validated by default\n # (shouldn't raise error)\n dd.read_parquet(str(tmpdir), engine=\"pyarrow\", gather_statistics=False).compute()\n\n # Test that read fails when validate_schema=True\n with pytest.raises(ValueError) as e_info:\n dd.read_parquet(\n str(tmpdir),\n engine=\"pyarrow\",\n gather_statistics=False,\n dataset={\"validate_schema\": True},\n ).compute()\n assert e_info.message.contains(\"ValueError: Schema in partition\")\n assert e_info.message.contains(\"was different\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_to_parquet_pyarrow_w_inconsistent_schema_by_partition_succeeds_w_manual_schema_test_to_parquet_pyarrow_w_inconsistent_schema_by_partition_succeeds_w_manual_schema.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_to_parquet_pyarrow_w_inconsistent_schema_by_partition_succeeds_w_manual_schema_test_to_parquet_pyarrow_w_inconsistent_schema_by_partition_succeeds_w_manual_schema.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 990, "end_line": 1076, "span_ids": ["test_to_parquet_pyarrow_w_inconsistent_schema_by_partition_succeeds_w_manual_schema"], "tokens": 816}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_parquet_pyarrow_w_inconsistent_schema_by_partition_succeeds_w_manual_schema(\n tmpdir,\n):\n check_pyarrow()\n\n # Data types to test: strings, arrays, ints, timezone aware timestamps\n in_arrays = [[0, 1, 2], [3, 4], np.nan, np.nan]\n out_arrays = [[0, 1, 2], [3, 4], None, None]\n in_strings = [\"a\", \"b\", np.nan, np.nan]\n out_strings = [\"a\", \"b\", None, None]\n tstamp = pd.Timestamp(1513393355, unit=\"s\")\n in_tstamps = [tstamp, tstamp, pd.NaT, pd.NaT]\n out_tstamps = [\n # Timestamps come out in numpy.datetime64 format\n tstamp.to_datetime64(),\n tstamp.to_datetime64(),\n np.datetime64(\"NaT\"),\n np.datetime64(\"NaT\"),\n ]\n timezone = \"US/Eastern\"\n tz_tstamp = pd.Timestamp(1513393355, unit=\"s\", tz=timezone)\n in_tz_tstamps = [tz_tstamp, tz_tstamp, pd.NaT, pd.NaT]\n out_tz_tstamps = [\n # Timezones do not make it through a write-read cycle.\n tz_tstamp.tz_convert(None).to_datetime64(),\n tz_tstamp.tz_convert(None).to_datetime64(),\n np.datetime64(\"NaT\"),\n np.datetime64(\"NaT\"),\n ]\n\n df = pd.DataFrame(\n {\n \"partition_column\": [0, 0, 1, 1],\n \"arrays\": in_arrays,\n \"strings\": in_strings,\n \"tstamps\": in_tstamps,\n \"tz_tstamps\": in_tz_tstamps,\n }\n )\n\n ddf = dd.from_pandas(df, npartitions=2)\n schema = pa.schema(\n [\n (\"arrays\", pa.list_(pa.int64())),\n (\"strings\", pa.string()),\n (\"tstamps\", pa.timestamp(\"ns\")),\n (\"tz_tstamps\", pa.timestamp(\"ns\", timezone)),\n (\"partition_column\", pa.int64()),\n ]\n )\n ddf.to_parquet(\n str(tmpdir), engine=\"pyarrow\", partition_on=\"partition_column\", schema=schema\n )\n ddf_after_write = (\n dd.read_parquet(str(tmpdir), engine=\"pyarrow\", gather_statistics=False)\n .compute()\n .reset_index(drop=True)\n )\n\n # Check array support\n arrays_after_write = ddf_after_write.arrays.values\n for i in range(len(df)):\n assert np.array_equal(arrays_after_write[i], out_arrays[i]), type(out_arrays[i])\n\n # Check datetime support\n tstamps_after_write = ddf_after_write.tstamps.values\n for i in range(len(df)):\n # Need to test NaT separately\n if np.isnat(tstamps_after_write[i]):\n assert np.isnat(out_tstamps[i])\n else:\n assert tstamps_after_write[i] == out_tstamps[i]\n\n # Check timezone aware datetime support\n tz_tstamps_after_write = ddf_after_write.tz_tstamps.values\n for i in range(len(df)):\n # Need to test NaT separately\n if np.isnat(tz_tstamps_after_write[i]):\n assert np.isnat(out_tz_tstamps[i])\n else:\n assert tz_tstamps_after_write[i] == out_tz_tstamps[i]\n\n # Check string support\n assert np.array_equal(ddf_after_write.strings.values, out_strings)\n\n # Check partition column\n assert np.array_equal(ddf_after_write.partition_column, df.partition_column)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_pyarrow_schema_inference_test_pyarrow_schema_inference.if_index_and_engine_f.else_.assert_eq_df_df_out_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_pyarrow_schema_inference_test_pyarrow_schema_inference.if_index_and_engine_f.else_.assert_eq_df_df_out_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1079, "end_line": 1123, "span_ids": ["test_pyarrow_schema_inference"], "tokens": 395}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"index\", [False, True])\n@pytest.mark.parametrize(\"schema\", [\"infer\", \"complex\"])\ndef test_pyarrow_schema_inference(tmpdir, index, engine, schema):\n\n check_pyarrow()\n if pa.__version__ < LooseVersion(\"0.15.0\"):\n pytest.skip(\"PyArrow>=0.15 Required.\")\n if schema == \"complex\":\n schema = {\"index\": pa.string(), \"amount\": pa.int64()}\n\n tmpdir = str(tmpdir)\n df = pd.DataFrame(\n {\n \"index\": [\"1\", \"2\", \"3\", \"2\", \"3\", \"1\", \"4\"],\n \"date\": pd.to_datetime(\n [\n \"2017-01-01\",\n \"2017-01-01\",\n \"2017-01-01\",\n \"2017-01-02\",\n \"2017-01-02\",\n \"2017-01-06\",\n \"2017-01-09\",\n ]\n ),\n \"amount\": [100, 200, 300, 400, 500, 600, 700],\n },\n index=range(7, 14),\n )\n if index:\n df = dd.from_pandas(df, npartitions=2).set_index(\"index\")\n else:\n df = dd.from_pandas(df, npartitions=2)\n\n df.to_parquet(tmpdir, engine=\"pyarrow\", schema=schema, compute=False).compute(\n scheduler=\"synchronous\"\n )\n df_out = dd.read_parquet(tmpdir, engine=engine)\n\n if index and engine == \"fastparquet\":\n # Fastparquet not handling divisions for\n # pyarrow-written dataset with string index\n assert_eq(df, df_out, check_divisions=False)\n else:\n assert_eq(df, df_out)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_partition_on_test_partition_on.for_val_in_df_a2_unique_.assert_set_df_b_df_a2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_partition_on_test_partition_on.for_val_in_df_a2_unique_.assert_set_df_b_df_a2_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1126, "end_line": 1150, "span_ids": ["test_partition_on"], "tokens": 303}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_partition_on(tmpdir, engine):\n tmpdir = str(tmpdir)\n df = pd.DataFrame(\n {\n \"a1\": np.random.choice([\"A\", \"B\", \"C\"], size=100),\n \"a2\": np.random.choice([\"X\", \"Y\", \"Z\"], size=100),\n \"b\": np.random.random(size=100),\n \"c\": np.random.randint(1, 5, size=100),\n \"d\": np.arange(0, 100),\n }\n )\n d = dd.from_pandas(df, npartitions=2)\n d.to_parquet(tmpdir, partition_on=[\"a1\", \"a2\"], engine=engine)\n # Note #1: Cross-engine functionality is missing\n # Note #2: The index is not preserved in pyarrow when partition_on is used\n out = dd.read_parquet(\n tmpdir, engine=engine, index=False, gather_statistics=False\n ).compute()\n for val in df.a1.unique():\n assert set(df.b[df.a1 == val]) == set(out.b[out.a1 == val])\n\n # Now specify the columns and allow auto-index detection\n out = dd.read_parquet(tmpdir, engine=engine, columns=[\"b\", \"a2\"]).compute()\n for val in df.a2.unique():\n assert set(df.b[df.a2 == val]) == set(out.b[out.a2 == val])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_partition_on_duplicates_test_partition_on_duplicates.for_root_dirs_files_in_.for_file_in_files_.assert_file_in_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_partition_on_duplicates_test_partition_on_duplicates.for_root_dirs_files_in_.for_file_in_files_.assert_file_in_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1153, "end_line": 1178, "span_ids": ["test_partition_on_duplicates"], "tokens": 219}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_partition_on_duplicates(tmpdir, engine):\n # https://github.com/dask/dask/issues/6445\n tmpdir = str(tmpdir)\n df = pd.DataFrame(\n {\n \"a1\": np.random.choice([\"A\", \"B\", \"C\"], size=100),\n \"a2\": np.random.choice([\"X\", \"Y\", \"Z\"], size=100),\n \"data\": np.random.random(size=100),\n }\n )\n d = dd.from_pandas(df, npartitions=2)\n\n for _ in range(2):\n d.to_parquet(tmpdir, partition_on=[\"a1\", \"a2\"], engine=engine)\n\n out = dd.read_parquet(tmpdir, engine=engine).compute()\n\n assert len(df) == len(out)\n for root, dirs, files in os.walk(tmpdir):\n for file in files:\n assert file in (\n \"part.0.parquet\",\n \"part.1.parquet\",\n \"_common_metadata\",\n \"_metadata\",\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_partition_on_string_test_partition_on_string.for_val_in_df_aa_unique_.assert_set_df_bb_df_aa_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_partition_on_string_test_partition_on_string.for_val_in_df_aa_unique_.assert_set_df_bb_df_aa_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1181, "end_line": 1203, "span_ids": ["test_partition_on_string"], "tokens": 223}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"partition_on\", [\"aa\", [\"aa\"]])\ndef test_partition_on_string(tmpdir, partition_on):\n tmpdir = str(tmpdir)\n check_pyarrow()\n with dask.config.set(scheduler=\"single-threaded\"):\n tmpdir = str(tmpdir)\n df = pd.DataFrame(\n {\n \"aa\": np.random.choice([\"A\", \"B\", \"C\"], size=100),\n \"bb\": np.random.random(size=100),\n \"cc\": np.random.randint(1, 5, size=100),\n }\n )\n d = dd.from_pandas(df, npartitions=2)\n d.to_parquet(\n tmpdir, partition_on=partition_on, write_index=False, engine=\"pyarrow\"\n )\n out = dd.read_parquet(\n tmpdir, index=False, gather_statistics=False, engine=\"pyarrow\"\n )\n out = out.compute()\n for val in df.aa.unique():\n assert set(df.bb[df.aa == val]) == set(out.bb[out.aa == val])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_filters_categorical_test_filters_categorical.assert_len_ddftest_read_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_filters_categorical_test_filters_categorical.assert_len_ddftest_read_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1206, "end_line": 1224, "span_ids": ["test_filters_categorical"], "tokens": 211}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@write_read_engines()\ndef test_filters_categorical(tmpdir, write_engine, read_engine):\n tmpdir = str(tmpdir)\n cats = [\"2018-01-01\", \"2018-01-02\", \"2018-01-03\", \"2018-01-04\"]\n dftest = pd.DataFrame(\n {\n \"dummy\": [1, 1, 1, 1],\n \"DatePart\": pd.Categorical(cats, categories=cats, ordered=True),\n }\n )\n ddftest = dd.from_pandas(dftest, npartitions=4).set_index(\"dummy\")\n ddftest.to_parquet(tmpdir, partition_on=\"DatePart\", engine=write_engine)\n ddftest_read = dd.read_parquet(\n tmpdir,\n index=\"dummy\",\n engine=read_engine,\n filters=[((\"DatePart\", \"<=\", \"2018-01-02\"))],\n )\n assert len(ddftest_read) == 2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_filters_test_filters.assert_e_x_2_e_x_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_filters_test_filters.assert_e_x_2_e_x_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1227, "end_line": 1265, "span_ids": ["test_filters"], "tokens": 400}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@write_read_engines()\ndef test_filters(tmpdir, write_engine, read_engine):\n tmp_path = str(tmpdir)\n df = pd.DataFrame({\"x\": range(10), \"y\": list(\"aabbccddee\")})\n ddf = dd.from_pandas(df, npartitions=5)\n assert ddf.npartitions == 5\n\n ddf.to_parquet(tmp_path, engine=write_engine)\n\n a = dd.read_parquet(tmp_path, engine=read_engine, filters=[(\"x\", \">\", 4)])\n assert a.npartitions == 3\n assert (a.x > 3).all().compute()\n\n b = dd.read_parquet(tmp_path, engine=read_engine, filters=[(\"y\", \"==\", \"c\")])\n assert b.npartitions == 1\n assert (b.y == \"c\").all().compute()\n\n c = dd.read_parquet(\n tmp_path, engine=read_engine, filters=[(\"y\", \"==\", \"c\"), (\"x\", \">\", 6)]\n )\n assert c.npartitions <= 1\n assert not len(c)\n assert_eq(c, c)\n\n d = dd.read_parquet(\n tmp_path,\n engine=read_engine,\n filters=[\n # Select two overlapping ranges\n [(\"x\", \">\", 1), (\"x\", \"<\", 6)],\n [(\"x\", \">\", 3), (\"x\", \"<\", 8)],\n ],\n )\n assert d.npartitions == 3\n assert ((d.x > 1) & (d.x < 8)).all().compute()\n\n e = dd.read_parquet(tmp_path, engine=read_engine, filters=[(\"x\", \"in\", (0, 9))])\n assert e.npartitions == 2\n assert ((e.x < 2) | (e.x > 7)).all().compute()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_filters_v0_test_filters_v0.assert_len_ddf2_0": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_filters_v0_test_filters_v0.assert_len_ddf2_0", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1268, "end_line": 1300, "span_ids": ["test_filters_v0"], "tokens": 399}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@write_read_engines()\ndef test_filters_v0(tmpdir, write_engine, read_engine):\n if write_engine == \"fastparquet\" or read_engine == \"fastparquet\":\n pytest.importorskip(\"fastparquet\", minversion=\"0.3.1\")\n fn = str(tmpdir)\n\n df = pd.DataFrame({\"at\": [\"ab\", \"aa\", \"ba\", \"da\", \"bb\"]})\n ddf = dd.from_pandas(df, npartitions=1)\n\n # Ok with 1 partition and filters\n ddf.repartition(npartitions=1, force=True).to_parquet(\n fn, write_index=False, engine=write_engine\n )\n ddf2 = dd.read_parquet(\n fn, index=False, engine=read_engine, filters=[(\"at\", \"==\", \"aa\")]\n ).compute()\n assert_eq(ddf2, ddf)\n\n # with >1 partition and no filters\n ddf.repartition(npartitions=2, force=True).to_parquet(fn, engine=write_engine)\n dd.read_parquet(fn, engine=read_engine).compute()\n assert_eq(ddf2, ddf)\n\n # with >1 partition and filters using base fastparquet\n if read_engine == \"fastparquet\":\n ddf.repartition(npartitions=2, force=True).to_parquet(fn, engine=write_engine)\n df2 = fastparquet.ParquetFile(fn).to_pandas(filters=[(\"at\", \"==\", \"aa\")])\n assert len(df2) > 0\n\n # with >1 partition and filters\n ddf.repartition(npartitions=2, force=True).to_parquet(fn, engine=write_engine)\n dd.read_parquet(fn, engine=read_engine, filters=[(\"at\", \"==\", \"aa\")]).compute()\n assert len(ddf2) > 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_fiters_file_list_test_fiters_file_list.assert_len_ddf2_0": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_fiters_file_list_test_fiters_file_list.assert_len_ddf2_0", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1303, "end_line": 1323, "span_ids": ["test_fiters_file_list"], "tokens": 211}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fiters_file_list(tmpdir, engine):\n df = pd.DataFrame({\"x\": range(10), \"y\": list(\"aabbccddee\")})\n ddf = dd.from_pandas(df, npartitions=5)\n\n ddf.to_parquet(str(tmpdir), engine=engine)\n fils = str(tmpdir.join(\"*.parquet\"))\n ddf_out = dd.read_parquet(\n fils, gather_statistics=True, engine=engine, filters=[(\"x\", \">\", 3)]\n )\n\n assert ddf_out.npartitions == 3\n assert_eq(df[df[\"x\"] > 3], ddf_out.compute(), check_index=False)\n\n # Check that first parition gets filtered for single-path input\n ddf2 = dd.read_parquet(\n str(tmpdir.join(\"part.0.parquet\")),\n gather_statistics=True,\n engine=engine,\n filters=[(\"x\", \">\", 3)],\n )\n assert len(ddf2) == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_divisions_read_with_filters_test_divisions_read_with_filters.assert_out_divisions_e": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_divisions_read_with_filters_test_divisions_read_with_filters.assert_out_divisions_e", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1326, "end_line": 1348, "span_ids": ["test_divisions_read_with_filters"], "tokens": 228}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_divisions_read_with_filters(tmpdir):\n pytest.importorskip(\"fastparquet\", minversion=\"0.3.1\")\n tmpdir = str(tmpdir)\n # generate dataframe\n size = 100\n categoricals = []\n for value in [\"a\", \"b\", \"c\", \"d\"]:\n categoricals += [value] * int(size / 4)\n df = pd.DataFrame(\n {\n \"a\": categoricals,\n \"b\": np.random.random(size=size),\n \"c\": np.random.randint(1, 5, size=size),\n }\n )\n d = dd.from_pandas(df, npartitions=4)\n # save it\n d.to_parquet(tmpdir, write_index=True, partition_on=[\"a\"], engine=\"fastparquet\")\n # read it\n out = dd.read_parquet(tmpdir, engine=\"fastparquet\", filters=[(\"a\", \"==\", \"b\")])\n # test it\n expected_divisions = (25, 49)\n assert out.divisions == expected_divisions", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_divisions_are_known_read_with_filters_test_divisions_are_known_read_with_filters.assert_out_divisions_e": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_divisions_are_known_read_with_filters_test_divisions_are_known_read_with_filters.assert_out_divisions_e", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1351, "end_line": 1370, "span_ids": ["test_divisions_are_known_read_with_filters"], "tokens": 254}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_divisions_are_known_read_with_filters(tmpdir):\n pytest.importorskip(\"fastparquet\", minversion=\"0.3.1\")\n tmpdir = str(tmpdir)\n # generate dataframe\n df = pd.DataFrame(\n {\n \"unique\": [0, 0, 1, 1, 2, 2, 3, 3],\n \"id\": [\"id1\", \"id2\", \"id1\", \"id2\", \"id1\", \"id2\", \"id1\", \"id2\"],\n },\n index=[0, 0, 1, 1, 2, 2, 3, 3],\n )\n d = dd.from_pandas(df, npartitions=2)\n # save it\n d.to_parquet(tmpdir, partition_on=[\"id\"], engine=\"fastparquet\")\n # read it\n out = dd.read_parquet(tmpdir, engine=\"fastparquet\", filters=[(\"id\", \"==\", \"id1\")])\n # test it\n assert out.known_divisions\n expected_divisions = (0, 2, 3)\n assert out.divisions == expected_divisions", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_from_fastparquet_parquetfile_test_read_from_fastparquet_parquetfile.with_pytest_raises_Assert.out.dd_read_parquet_pq_f_eng": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_from_fastparquet_parquetfile_test_read_from_fastparquet_parquetfile.with_pytest_raises_Assert.out.dd_read_parquet_pq_f_eng", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1373, "end_line": 1401, "span_ids": ["test_read_from_fastparquet_parquetfile"], "tokens": 277}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.xfail(reason=\"No longer accept ParquetFile objects\")\ndef test_read_from_fastparquet_parquetfile(tmpdir):\n check_fastparquet()\n fn = str(tmpdir)\n\n df = pd.DataFrame(\n {\n \"a\": np.random.choice([\"A\", \"B\", \"C\"], size=100),\n \"b\": np.random.random(size=100),\n \"c\": np.random.randint(1, 5, size=100),\n }\n )\n d = dd.from_pandas(df, npartitions=2)\n d.to_parquet(fn, partition_on=[\"a\"], engine=\"fastparquet\")\n\n pq_f = fastparquet.ParquetFile(fn)\n\n # OK with no filters\n out = dd.read_parquet(pq_f).compute()\n for val in df.a.unique():\n assert set(df.b[df.a == val]) == set(out.b[out.a == val])\n\n # OK with filters\n out = dd.read_parquet(pq_f, filters=[(\"a\", \"==\", \"B\")]).compute()\n assert set(df.b[df.a == \"B\"]) == set(out.b)\n\n # Engine should not be set to 'pyarrow'\n with pytest.raises(AssertionError):\n out = dd.read_parquet(pq_f, engine=\"pyarrow\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_to_parquet_lazy_test_to_parquet_lazy.assert_eq_ddf_ddf2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_to_parquet_lazy_test_to_parquet_lazy.assert_eq_ddf_ddf2_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1404, "end_line": 1418, "span_ids": ["test_to_parquet_lazy"], "tokens": 170}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"scheduler\", [\"threads\", \"processes\"])\ndef test_to_parquet_lazy(tmpdir, scheduler, engine):\n tmpdir = str(tmpdir)\n df = pd.DataFrame({\"a\": [1, 2, 3, 4], \"b\": [1.0, 2.0, 3.0, 4.0]})\n df.index.name = \"index\"\n ddf = dd.from_pandas(df, npartitions=2)\n value = ddf.to_parquet(tmpdir, compute=False, engine=engine)\n\n assert hasattr(value, \"dask\")\n value.compute(scheduler=scheduler)\n assert os.path.exists(tmpdir)\n\n ddf2 = dd.read_parquet(tmpdir, engine=engine)\n\n assert_eq(ddf, ddf2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_timestamp96_test_timestamp96.assert_eq_out_df_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_timestamp96_test_timestamp96.assert_eq_out_df_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1421, "end_line": 1430, "span_ids": ["test_timestamp96"], "tokens": 122}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_timestamp96(tmpdir):\n check_fastparquet()\n fn = str(tmpdir)\n df = pd.DataFrame({\"a\": [\"now\"]}, dtype=\"M8[ns]\")\n ddf = dd.from_pandas(df, 1)\n ddf.to_parquet(fn, write_index=False, times=\"int96\")\n pf = fastparquet.ParquetFile(fn)\n assert pf._schema[1].type == fastparquet.parquet_thrift.Type.INT96\n out = dd.read_parquet(fn, index=False).compute()\n assert_eq(out, df)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_drill_scheme_test_drill_scheme.assert_np_unique_out_dir": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_drill_scheme_test_drill_scheme.assert_np_unique_out_dir", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1433, "end_line": 1453, "span_ids": ["test_drill_scheme"], "tokens": 225}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_drill_scheme(tmpdir):\n check_fastparquet()\n fn = str(tmpdir)\n N = 5\n df1 = pd.DataFrame({c: np.random.random(N) for i, c in enumerate([\"a\", \"b\", \"c\"])})\n df2 = pd.DataFrame({c: np.random.random(N) for i, c in enumerate([\"a\", \"b\", \"c\"])})\n files = []\n for d in [\"test_data1\", \"test_data2\"]:\n dn = os.path.join(fn, d)\n if not os.path.exists(dn):\n os.mkdir(dn)\n files.append(os.path.join(dn, \"data1.parq\"))\n\n fastparquet.write(files[0], df1)\n fastparquet.write(files[1], df2)\n\n df = dd.read_parquet(files)\n assert \"dir0\" in df.columns\n out = df.compute()\n assert \"dir0\" in out\n assert (np.unique(out.dir0) == [\"test_data1\", \"test_data2\"]).all()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_parquet_select_cats_test_parquet_select_cats.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_parquet_select_cats_test_parquet_select_cats.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1456, "end_line": 1474, "span_ids": ["test_parquet_select_cats"], "tokens": 186}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_parquet_select_cats(tmpdir, engine):\n fn = str(tmpdir)\n df = pd.DataFrame(\n {\n \"categories\": pd.Series(\n np.random.choice([\"a\", \"b\", \"c\", \"d\", \"e\", \"f\"], size=100),\n dtype=\"category\",\n ),\n \"ints\": pd.Series(list(range(0, 100)), dtype=\"int\"),\n \"floats\": pd.Series(list(range(0, 100)), dtype=\"float\"),\n }\n )\n\n ddf = dd.from_pandas(df, 1)\n ddf.to_parquet(fn, engine=engine)\n rddf = dd.read_parquet(fn, columns=[\"ints\"], engine=engine)\n assert list(rddf.columns) == [\"ints\"]\n rddf = dd.read_parquet(fn, engine=engine)\n assert list(rddf.columns) == list(df)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_columns_name_test_columns_name.assert_eq_result_df_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_columns_name_test_columns_name.assert_eq_result_df_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1477, "end_line": 1487, "span_ids": ["test_columns_name"], "tokens": 150}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_columns_name(tmpdir, engine):\n if engine == \"fastparquet\" and fastparquet.__version__ <= LooseVersion(\"0.3.1\"):\n pytest.skip(\"Fastparquet does not write column_indexes up to 0.3.1\")\n tmp_path = str(tmpdir)\n df = pd.DataFrame({\"A\": [1, 2]}, index=pd.Index([\"a\", \"b\"], name=\"idx\"))\n df.columns.name = \"cols\"\n ddf = dd.from_pandas(df, 2)\n\n ddf.to_parquet(tmp_path, engine=engine)\n result = dd.read_parquet(tmp_path, engine=engine, index=[\"idx\"])\n assert_eq(result, df)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_check_compression_check_compression.if_engine_fastparquet.else_.for_i_in_range_metadata_n.for_j_in_range_len_names_.if_compression_is_None_.else_.assert_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_check_compression_check_compression.if_engine_fastparquet.else_.for_i_in_range_metadata_n.for_j_in_range_len_names_.if_compression_is_None_.else_.assert_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1490, "end_line": 1516, "span_ids": ["check_compression"], "tokens": 221}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def check_compression(engine, filename, compression):\n if engine == \"fastparquet\":\n pf = fastparquet.ParquetFile(filename)\n md = pf.fmd.row_groups[0].columns[0].meta_data\n if compression is None:\n assert md.total_compressed_size == md.total_uncompressed_size\n else:\n assert md.total_compressed_size != md.total_uncompressed_size\n else:\n metadata = pa.parquet.ParquetDataset(filename).metadata\n names = metadata.schema.names\n for i in range(metadata.num_row_groups):\n row_group = metadata.row_group(i)\n for j in range(len(names)):\n column = row_group.column(j)\n if compression is None:\n assert (\n column.total_compressed_size == column.total_uncompressed_size\n )\n else:\n compress_expect = compression\n if compression == \"default\":\n compress_expect = \"snappy\"\n assert compress_expect.lower() == column.compression.lower()\n assert (\n column.total_compressed_size != column.total_uncompressed_size\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_writing_parquet_with_compression_test_writing_parquet_with_compression.check_compression_engine_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_writing_parquet_with_compression_test_writing_parquet_with_compression.check_compression_engine_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1519, "end_line": 1532, "span_ids": ["test_writing_parquet_with_compression"], "tokens": 169}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"compression,\", [\"default\", None, \"gzip\", \"snappy\"])\ndef test_writing_parquet_with_compression(tmpdir, compression, engine):\n fn = str(tmpdir)\n if compression in [\"snappy\", \"default\"]:\n pytest.importorskip(\"snappy\")\n\n df = pd.DataFrame({\"x\": [\"a\", \"b\", \"c\"] * 10, \"y\": [1, 2, 3] * 10})\n df.index.name = \"index\"\n ddf = dd.from_pandas(df, npartitions=3)\n\n ddf.to_parquet(fn, compression=compression, engine=engine)\n out = dd.read_parquet(fn, engine=engine)\n assert_eq(out, ddf)\n check_compression(engine, fn, compression)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_writing_parquet_with_partition_on_and_compression_test_writing_parquet_with_partition_on_and_compression.check_compression_engine_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_writing_parquet_with_partition_on_and_compression_test_writing_parquet_with_partition_on_and_compression.check_compression_engine_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1535, "end_line": 1546, "span_ids": ["test_writing_parquet_with_partition_on_and_compression"], "tokens": 156}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"compression,\", [\"default\", None, \"gzip\", \"snappy\"])\ndef test_writing_parquet_with_partition_on_and_compression(tmpdir, compression, engine):\n fn = str(tmpdir)\n if compression in [\"snappy\", \"default\"]:\n pytest.importorskip(\"snappy\")\n\n df = pd.DataFrame({\"x\": [\"a\", \"b\", \"c\"] * 10, \"y\": [1, 2, 3] * 10})\n df.index.name = \"index\"\n ddf = dd.from_pandas(df, npartitions=3)\n\n ddf.to_parquet(fn, compression=compression, engine=engine, partition_on=[\"x\"])\n check_compression(engine, fn, compression)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_pandas_metadata_pandas_metadata.return.request_param": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_pandas_metadata_pandas_metadata.return.request_param", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1549, "end_line": 1623, "span_ids": ["pandas_metadata"], "tokens": 452}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.fixture(\n params=[\n # fastparquet 0.1.3\n {\n \"columns\": [\n {\n \"metadata\": None,\n \"name\": \"idx\",\n \"numpy_type\": \"int64\",\n \"pandas_type\": \"int64\",\n },\n {\n \"metadata\": None,\n \"name\": \"A\",\n \"numpy_type\": \"int64\",\n \"pandas_type\": \"int64\",\n },\n ],\n \"index_columns\": [\"idx\"],\n \"pandas_version\": \"0.21.0\",\n },\n # pyarrow 0.7.1\n {\n \"columns\": [\n {\n \"metadata\": None,\n \"name\": \"A\",\n \"numpy_type\": \"int64\",\n \"pandas_type\": \"int64\",\n },\n {\n \"metadata\": None,\n \"name\": \"idx\",\n \"numpy_type\": \"int64\",\n \"pandas_type\": \"int64\",\n },\n ],\n \"index_columns\": [\"idx\"],\n \"pandas_version\": \"0.21.0\",\n },\n # pyarrow 0.8.0\n {\n \"column_indexes\": [\n {\n \"field_name\": None,\n \"metadata\": {\"encoding\": \"UTF-8\"},\n \"name\": None,\n \"numpy_type\": \"object\",\n \"pandas_type\": \"unicode\",\n }\n ],\n \"columns\": [\n {\n \"field_name\": \"A\",\n \"metadata\": None,\n \"name\": \"A\",\n \"numpy_type\": \"int64\",\n \"pandas_type\": \"int64\",\n },\n {\n \"field_name\": \"__index_level_0__\",\n \"metadata\": None,\n \"name\": \"idx\",\n \"numpy_type\": \"int64\",\n \"pandas_type\": \"int64\",\n },\n ],\n \"index_columns\": [\"__index_level_0__\"],\n \"pandas_version\": \"0.21.0\",\n },\n # TODO: fastparquet update\n ]\n)\ndef pandas_metadata(request):\n return request.param", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_parse_pandas_metadata_test_parse_pandas_metadata.assert_isinstance_mapping": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_parse_pandas_metadata_test_parse_pandas_metadata.assert_isinstance_mapping", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1626, "end_line": 1640, "span_ids": ["test_parse_pandas_metadata"], "tokens": 131}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_parse_pandas_metadata(pandas_metadata):\n index_names, column_names, mapping, column_index_names = _parse_pandas_metadata(\n pandas_metadata\n )\n assert index_names == [\"idx\"]\n assert column_names == [\"A\"]\n assert column_index_names == [None]\n\n # for new pyarrow\n if pandas_metadata[\"index_columns\"] == [\"__index_level_0__\"]:\n assert mapping == {\"__index_level_0__\": \"idx\", \"A\": \"A\"}\n else:\n assert mapping == {\"idx\": \"idx\", \"A\": \"A\"}\n\n assert isinstance(mapping, dict)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_parse_pandas_metadata_null_index_test_parse_pandas_metadata_null_index.None_9": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_parse_pandas_metadata_null_index_test_parse_pandas_metadata_null_index.None_9", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1643, "end_line": 1708, "span_ids": ["test_parse_pandas_metadata_null_index"], "tokens": 492}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_parse_pandas_metadata_null_index():\n # pyarrow 0.7.1 None for index\n e_index_names = [None]\n e_column_names = [\"x\"]\n e_mapping = {\"__index_level_0__\": None, \"x\": \"x\"}\n e_column_index_names = [None]\n\n md = {\n \"columns\": [\n {\n \"metadata\": None,\n \"name\": \"x\",\n \"numpy_type\": \"int64\",\n \"pandas_type\": \"int64\",\n },\n {\n \"metadata\": None,\n \"name\": \"__index_level_0__\",\n \"numpy_type\": \"int64\",\n \"pandas_type\": \"int64\",\n },\n ],\n \"index_columns\": [\"__index_level_0__\"],\n \"pandas_version\": \"0.21.0\",\n }\n index_names, column_names, mapping, column_index_names = _parse_pandas_metadata(md)\n assert index_names == e_index_names\n assert column_names == e_column_names\n assert mapping == e_mapping\n assert column_index_names == e_column_index_names\n\n # pyarrow 0.8.0 None for index\n md = {\n \"column_indexes\": [\n {\n \"field_name\": None,\n \"metadata\": {\"encoding\": \"UTF-8\"},\n \"name\": None,\n \"numpy_type\": \"object\",\n \"pandas_type\": \"unicode\",\n }\n ],\n \"columns\": [\n {\n \"field_name\": \"x\",\n \"metadata\": None,\n \"name\": \"x\",\n \"numpy_type\": \"int64\",\n \"pandas_type\": \"int64\",\n },\n {\n \"field_name\": \"__index_level_0__\",\n \"metadata\": None,\n \"name\": None,\n \"numpy_type\": \"int64\",\n \"pandas_type\": \"int64\",\n },\n ],\n \"index_columns\": [\"__index_level_0__\"],\n \"pandas_version\": \"0.21.0\",\n }\n index_names, column_names, mapping, column_index_names = _parse_pandas_metadata(md)\n assert index_names == e_index_names\n assert column_names == e_column_names\n assert mapping == e_mapping\n assert column_index_names == e_column_index_names", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_no_metadata_test_read_no_metadata.assert_eq_result_expecte": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_no_metadata_test_read_no_metadata.assert_eq_result_expecte", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1711, "end_line": 1723, "span_ids": ["test_read_no_metadata"], "tokens": 147}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_no_metadata(tmpdir, engine):\n # use pyarrow.parquet to create a parquet file without\n # pandas metadata\n check_pyarrow()\n tmp = str(tmpdir) + \"table.parq\"\n\n table = pa.Table.from_arrays(\n [pa.array([1, 2, 3]), pa.array([3, 4, 5])], names=[\"A\", \"B\"]\n )\n pq.write_table(table, tmp)\n result = dd.read_parquet(tmp, engine=engine)\n expected = pd.DataFrame({\"A\": [1, 2, 3], \"B\": [3, 4, 5]})\n assert_eq(result, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_parse_pandas_metadata_duplicate_index_columns_test_parse_pandas_metadata_duplicate_index_columns.assert_column_index_names": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_parse_pandas_metadata_duplicate_index_columns_test_parse_pandas_metadata_duplicate_index_columns.assert_column_index_names", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1726, "end_line": 1765, "span_ids": ["test_parse_pandas_metadata_duplicate_index_columns"], "tokens": 271}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_parse_pandas_metadata_duplicate_index_columns():\n md = {\n \"column_indexes\": [\n {\n \"field_name\": None,\n \"metadata\": {\"encoding\": \"UTF-8\"},\n \"name\": None,\n \"numpy_type\": \"object\",\n \"pandas_type\": \"unicode\",\n }\n ],\n \"columns\": [\n {\n \"field_name\": \"A\",\n \"metadata\": None,\n \"name\": \"A\",\n \"numpy_type\": \"int64\",\n \"pandas_type\": \"int64\",\n },\n {\n \"field_name\": \"__index_level_0__\",\n \"metadata\": None,\n \"name\": \"A\",\n \"numpy_type\": \"object\",\n \"pandas_type\": \"unicode\",\n },\n ],\n \"index_columns\": [\"__index_level_0__\"],\n \"pandas_version\": \"0.21.0\",\n }\n (\n index_names,\n column_names,\n storage_name_mapping,\n column_index_names,\n ) = _parse_pandas_metadata(md)\n assert index_names == [\"A\"]\n assert column_names == [\"A\"]\n assert storage_name_mapping == {\"__index_level_0__\": \"A\", \"A\": \"A\"}\n assert column_index_names == [None]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_parse_pandas_metadata_column_with_index_name_test_parse_pandas_metadata_column_with_index_name.assert_column_index_names": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_parse_pandas_metadata_column_with_index_name_test_parse_pandas_metadata_column_with_index_name.assert_column_index_names", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1768, "end_line": 1807, "span_ids": ["test_parse_pandas_metadata_column_with_index_name"], "tokens": 272}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_parse_pandas_metadata_column_with_index_name():\n md = {\n \"column_indexes\": [\n {\n \"field_name\": None,\n \"metadata\": {\"encoding\": \"UTF-8\"},\n \"name\": None,\n \"numpy_type\": \"object\",\n \"pandas_type\": \"unicode\",\n }\n ],\n \"columns\": [\n {\n \"field_name\": \"A\",\n \"metadata\": None,\n \"name\": \"A\",\n \"numpy_type\": \"int64\",\n \"pandas_type\": \"int64\",\n },\n {\n \"field_name\": \"__index_level_0__\",\n \"metadata\": None,\n \"name\": \"A\",\n \"numpy_type\": \"object\",\n \"pandas_type\": \"unicode\",\n },\n ],\n \"index_columns\": [\"__index_level_0__\"],\n \"pandas_version\": \"0.21.0\",\n }\n (\n index_names,\n column_names,\n storage_name_mapping,\n column_index_names,\n ) = _parse_pandas_metadata(md)\n assert index_names == [\"A\"]\n assert column_names == [\"A\"]\n assert storage_name_mapping == {\"__index_level_0__\": \"A\", \"A\": \"A\"}\n assert column_index_names == [None]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_writing_parquet_with_kwargs_test_writing_parquet_with_kwargs.for_val_in_df_a_unique_.assert_set_df_b_df_a_v": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_writing_parquet_with_kwargs_test_writing_parquet_with_kwargs.for_val_in_df_a_unique_.assert_set_df_b_df_a_v", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1810, "end_line": 1846, "span_ids": ["test_writing_parquet_with_kwargs"], "tokens": 356}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_writing_parquet_with_kwargs(tmpdir, engine):\n fn = str(tmpdir)\n path1 = os.path.join(fn, \"normal\")\n path2 = os.path.join(fn, \"partitioned\")\n pytest.importorskip(\"snappy\")\n\n df = pd.DataFrame(\n {\n \"a\": np.random.choice([\"A\", \"B\", \"C\"], size=100),\n \"b\": np.random.random(size=100),\n \"c\": np.random.randint(1, 5, size=100),\n }\n )\n df.index.name = \"index\"\n ddf = dd.from_pandas(df, npartitions=3)\n\n engine_kwargs = {\n \"pyarrow\": {\n \"compression\": \"snappy\",\n \"coerce_timestamps\": None,\n \"use_dictionary\": True,\n },\n \"fastparquet\": {\"compression\": \"snappy\", \"times\": \"int64\", \"fixed_text\": None},\n }\n\n ddf.to_parquet(path1, engine=engine, **engine_kwargs[engine])\n out = dd.read_parquet(path1, engine=engine)\n assert_eq(out, ddf, check_index=(engine != \"fastparquet\"))\n\n # Avoid race condition in pyarrow 0.8.0 on writing partitioned datasets\n with dask.config.set(scheduler=\"sync\"):\n ddf.to_parquet(\n path2, engine=engine, partition_on=[\"a\"], **engine_kwargs[engine]\n )\n out = dd.read_parquet(path2, engine=engine).compute()\n for val in df.a.unique():\n assert set(df.b[df.a == val]) == set(out.b[out.a == val])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_writing_parquet_with_unknown_kwargs_test_to_parquet_with_get.assert_eq_result_df_che": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_writing_parquet_with_unknown_kwargs_test_to_parquet_with_get.assert_eq_result_df_che", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1849, "end_line": 1876, "span_ids": ["test_to_parquet_with_get", "test_writing_parquet_with_unknown_kwargs"], "tokens": 214}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_writing_parquet_with_unknown_kwargs(tmpdir, engine):\n fn = str(tmpdir)\n\n with pytest.raises(TypeError):\n ddf.to_parquet(fn, engine=engine, unknown_key=\"unknown_value\")\n\n\ndef test_to_parquet_with_get(tmpdir):\n check_engine()\n\n from dask.multiprocessing import get as mp_get\n\n tmpdir = str(tmpdir)\n\n flag = [False]\n\n def my_get(*args, **kwargs):\n flag[0] = True\n return mp_get(*args, **kwargs)\n\n df = pd.DataFrame({\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [1, 2, 3, 4]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n ddf.to_parquet(tmpdir, compute_kwargs={\"scheduler\": my_get})\n assert flag[0]\n\n result = dd.read_parquet(os.path.join(tmpdir, \"*\"))\n assert_eq(result, df, check_index=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_select_partitioned_column_test_select_partitioned_column.df_partitioned_df_partiti": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_select_partitioned_column_test_select_partitioned_column.df_partitioned_df_partiti", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1879, "end_line": 1904, "span_ids": ["test_select_partitioned_column"], "tokens": 240}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_select_partitioned_column(tmpdir, engine):\n pytest.importorskip(\"snappy\")\n if engine == \"pyarrow\":\n import pyarrow as pa\n\n if pa.__version__ < LooseVersion(\"0.9.0\"):\n pytest.skip(\"pyarrow<0.9.0 did not support this\")\n\n fn = str(tmpdir)\n size = 20\n d = {\n \"signal1\": np.random.normal(0, 0.3, size=size).cumsum() + 50,\n \"fake_categorical1\": np.random.choice([\"A\", \"B\", \"C\"], size=size),\n \"fake_categorical2\": np.random.choice([\"D\", \"E\", \"F\"], size=size),\n }\n df = dd.from_pandas(pd.DataFrame(d), 2)\n df.to_parquet(\n fn,\n compression=\"snappy\",\n write_index=False,\n engine=engine,\n partition_on=[\"fake_categorical1\", \"fake_categorical2\"],\n )\n\n df_partitioned = dd.read_parquet(fn, engine=engine)\n df_partitioned[df_partitioned.fake_categorical1 == \"A\"].compute()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_with_tz_test_with_tz.with_warnings_catch_warni.if_engine_fastparquet.assert_eq_df_df2_check_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_with_tz_test_with_tz.with_warnings_catch_warni.if_engine_fastparquet.assert_eq_df_df2_check_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1907, "end_line": 1923, "span_ids": ["test_with_tz"], "tokens": 237}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_with_tz(tmpdir, engine):\n if engine == \"pyarrow\" and pa.__version__ < LooseVersion(\"0.11.0\"):\n pytest.skip(\"pyarrow<0.11.0 did not support this\")\n if engine == \"fastparquet\" and fastparquet.__version__ < LooseVersion(\"0.3.0\"):\n pytest.skip(\"fastparquet<0.3.0 did not support this\")\n\n with warnings.catch_warnings():\n if engine == \"fastparquet\":\n # fastparquet-442\n warnings.simplefilter(\"ignore\", DeprecationWarning) # pandas 0.23\n warnings.simplefilter(\"ignore\", FutureWarning) # pandas 0.25\n fn = str(tmpdir)\n df = pd.DataFrame([[0]], columns=[\"a\"], dtype=\"datetime64[ns, UTC]\")\n df = dd.from_pandas(df, 1)\n df.to_parquet(fn, engine=engine)\n df2 = dd.read_parquet(fn, engine=engine)\n assert_eq(df, df2, check_divisions=False, check_index=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_arrow_partitioning_test_arrow_partitioning.ddf_astype_b_np_float": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_arrow_partitioning_test_arrow_partitioning.ddf_astype_b_np_float", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1926, "end_line": 1942, "span_ids": ["test_arrow_partitioning"], "tokens": 179}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_arrow_partitioning(tmpdir):\n # Issue #3518\n check_pyarrow()\n path = str(tmpdir)\n data = {\n \"p\": np.repeat(np.arange(3), 2).astype(np.int8),\n \"b\": np.repeat(-1, 6).astype(np.int16),\n \"c\": np.repeat(-2, 6).astype(np.float32),\n \"d\": np.repeat(-3, 6).astype(np.float64),\n }\n pdf = pd.DataFrame(data)\n ddf = dd.from_pandas(pdf, npartitions=2)\n ddf.to_parquet(path, engine=\"pyarrow\", write_index=False, partition_on=\"p\")\n\n ddf = dd.read_parquet(path, index=False, engine=\"pyarrow\")\n\n ddf.astype({\"b\": np.float32}).compute()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_sorted_warnings_test_sorted_warnings._still_may_have_some_arr": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_sorted_warnings_test_sorted_warnings._still_may_have_some_arr", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1945, "end_line": 1965, "span_ids": ["test_sorted_warnings"], "tokens": 206}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_sorted_warnings(tmpdir, engine):\n\n if engine == \"pyarrow\":\n pytest.skip(\n \"ArrowEngine will only collect statistics for \"\n \"known index columns and/or filtered columns.\"\n )\n\n tmpdir = str(tmpdir)\n df = dd.from_pandas(\n pd.DataFrame({\"cola\": range(10), \"colb\": range(10)}), npartitions=2\n )\n df.to_parquet(tmpdir, engine=engine, write_index=False)\n with pytest.warns(RuntimeWarning) as record:\n out = dd.read_parquet(tmpdir, engine=engine)\n assert \"['cola', 'colb']\" in str(record[-1].message)\n warnings = len(record)\n assert out.columns.tolist() == [\"cola\", \"colb\"]\n with pytest.warns(None) as record:\n dd.read_parquet(tmpdir, engine=engine, index=False)\n assert len(record) < warnings # still may have some arrow warnings", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_informative_error_messages_test_append_cat_fp.assert_d_x_tolist_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_informative_error_messages_test_append_cat_fp.assert_d_x_tolist_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1968, "end_line": 1988, "span_ids": ["test_append_cat_fp", "test_informative_error_messages"], "tokens": 225}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_informative_error_messages():\n with pytest.raises(ValueError) as info:\n dd.read_parquet(\"foo\", engine=\"foo\")\n\n assert \"foo\" in str(info.value)\n assert \"arrow\" in str(info.value)\n assert \"fastparquet\" in str(info.value)\n\n\ndef test_append_cat_fp(tmpdir, engine):\n path = str(tmpdir)\n # https://github.com/dask/dask/issues/4120\n df = pd.DataFrame({\"x\": [\"a\", \"a\", \"b\", \"a\", \"b\"]})\n df[\"x\"] = df[\"x\"].astype(\"category\")\n ddf = dd.from_pandas(df, npartitions=1)\n\n dd.to_parquet(ddf, path, engine=engine)\n dd.to_parquet(ddf, path, append=True, ignore_divisions=True, engine=engine)\n\n d = dd.read_parquet(path, engine=engine).compute()\n assert d[\"x\"].tolist() == [\"a\", \"a\", \"b\", \"a\", \"b\"] * 2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_roundtrip_arrow_test_roundtrip_arrow.assert_eq_ddf_ddf2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_roundtrip_arrow_test_roundtrip_arrow.assert_eq_ddf_ddf2_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1991, "end_line": 2030, "span_ids": ["test_roundtrip_arrow"], "tokens": 682}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"df\",\n [\n pd.DataFrame({\"x\": [4, 5, 6, 1, 2, 3]}),\n pd.DataFrame({\"x\": [\"c\", \"a\", \"b\"]}),\n pd.DataFrame({\"x\": [\"cc\", \"a\", \"bbb\"]}),\n pd.DataFrame({\"x\": [b\"a\", b\"b\", b\"c\"]}),\n pytest.param(pd.DataFrame({\"x\": pd.Categorical([\"a\", \"b\", \"a\"])})),\n pytest.param(pd.DataFrame({\"x\": pd.Categorical([1, 2, 1])})),\n pd.DataFrame({\"x\": list(map(pd.Timestamp, [3000000, 2000000, 1000000]))}), # ms\n pd.DataFrame({\"x\": list(map(pd.Timestamp, [3000, 2000, 1000]))}), # us\n pd.DataFrame({\"x\": [3000, 2000, 1000]}).astype(\"M8[ns]\"),\n # pd.DataFrame({'x': [3, 2, 1]}).astype('M8[ns]'), # Casting errors\n pd.DataFrame({\"x\": [3, 2, 1]}).astype(\"M8[us]\"),\n pd.DataFrame({\"x\": [3, 2, 1]}).astype(\"M8[ms]\"),\n pd.DataFrame({\"x\": [3, 2, 1]}).astype(\"uint16\"),\n pd.DataFrame({\"x\": [3, 2, 1]}).astype(\"float32\"),\n pd.DataFrame({\"x\": [3, 1, 2]}, index=[3, 2, 1]),\n pd.DataFrame(\n {\"x\": [4, 5, 6, 1, 2, 3]}, index=pd.Index([1, 2, 3, 4, 5, 6], name=\"foo\")\n ),\n pd.DataFrame({\"x\": [1, 2, 3], \"y\": [3, 2, 1]}),\n pd.DataFrame({\"x\": [1, 2, 3], \"y\": [3, 2, 1]}, columns=[\"y\", \"x\"]),\n pd.DataFrame({\"0\": [3, 2, 1]}),\n pd.DataFrame({\"x\": [3, 2, None]}),\n pd.DataFrame({\"-\": [3.0, 2.0, None]}),\n pd.DataFrame({\".\": [3.0, 2.0, None]}),\n pd.DataFrame({\" \": [3.0, 2.0, None]}),\n ],\n)\ndef test_roundtrip_arrow(tmpdir, df):\n check_pyarrow()\n # Index will be given a name when preserved as index\n tmp_path = str(tmpdir)\n if not df.index.name:\n df.index.name = \"index\"\n ddf = dd.from_pandas(df, npartitions=2)\n dd.to_parquet(ddf, tmp_path, engine=\"pyarrow\", write_index=True)\n ddf2 = dd.read_parquet(tmp_path, engine=\"pyarrow\", gather_statistics=True)\n assert_eq(ddf, ddf2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_datasets_timeseries_test_pathlib_path.assert_eq_ddf_ddf2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_datasets_timeseries_test_pathlib_path.assert_eq_ddf_ddf2_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2033, "end_line": 2053, "span_ids": ["test_pathlib_path", "test_datasets_timeseries"], "tokens": 200}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_datasets_timeseries(tmpdir, engine):\n tmp_path = str(tmpdir)\n df = dask.datasets.timeseries(\n start=\"2000-01-01\", end=\"2000-01-10\", freq=\"1d\"\n ).persist()\n df.to_parquet(tmp_path, engine=engine)\n\n df2 = dd.read_parquet(tmp_path, engine=engine)\n assert_eq(df, df2)\n\n\ndef test_pathlib_path(tmpdir, engine):\n import pathlib\n\n df = pd.DataFrame({\"x\": [4, 5, 6, 1, 2, 3]})\n df.index.name = \"index\"\n ddf = dd.from_pandas(df, npartitions=2)\n path = pathlib.Path(str(tmpdir))\n ddf.to_parquet(path, engine=engine)\n ddf2 = dd.read_parquet(path, engine=engine)\n assert_eq(ddf, ddf2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_pyarrow_metadata_nthreads_test_pyarrow_metadata_nthreads.assert_eq_ddf_ddf2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_pyarrow_metadata_nthreads_test_pyarrow_metadata_nthreads.assert_eq_ddf_ddf2_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2056, "end_line": 2065, "span_ids": ["test_pyarrow_metadata_nthreads"], "tokens": 131}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_pyarrow_metadata_nthreads(tmpdir):\n check_pyarrow()\n tmp_path = str(tmpdir)\n df = pd.DataFrame({\"x\": [4, 5, 6, 1, 2, 3]})\n df.index.name = \"index\"\n ddf = dd.from_pandas(df, npartitions=2)\n ddf.to_parquet(tmp_path, engine=\"pyarrow\")\n ops = {\"dataset\": {\"metadata_nthreads\": 2}}\n ddf2 = dd.read_parquet(tmp_path, engine=\"pyarrow\", **ops)\n assert_eq(ddf, ddf2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_categories_large_test_categories_large.assert_eq_sorted_df_name_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_categories_large_test_categories_large.assert_eq_sorted_df_name_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2068, "end_line": 2079, "span_ids": ["test_categories_large"], "tokens": 136}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_categories_large(tmpdir, engine):\n # Issue #5112\n check_fastparquet()\n fn = str(tmpdir.join(\"parquet_int16.parq\"))\n numbers = np.random.randint(0, 800000, size=1000000)\n df = pd.DataFrame(numbers.T, columns=[\"name\"])\n df.name = df.name.astype(\"category\")\n\n df.to_parquet(fn, engine=\"fastparquet\", compression=\"uncompressed\")\n ddf = dd.read_parquet(fn, engine=engine, categories={\"name\": 80000})\n\n assert_eq(sorted(df.name.cat.categories), sorted(ddf.compute().name.cat.categories))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_glob_no_stats_test_read_glob_yes_stats.assert_eq_ddf_ddf2_chec": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_glob_no_stats_test_read_glob_yes_stats.assert_eq_ddf_ddf2_chec", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2082, "end_line": 2102, "span_ids": ["test_read_glob_no_stats", "test_read_glob_yes_stats"], "tokens": 197}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@write_read_engines()\ndef test_read_glob_no_stats(tmpdir, write_engine, read_engine):\n tmp_path = str(tmpdir)\n ddf.to_parquet(tmp_path, engine=write_engine)\n\n ddf2 = dd.read_parquet(\n os.path.join(tmp_path, \"*.parquet\"), engine=read_engine, gather_statistics=False\n )\n assert_eq(ddf, ddf2, check_divisions=False)\n\n\n@write_read_engines()\ndef test_read_glob_yes_stats(tmpdir, write_engine, read_engine):\n tmp_path = str(tmpdir)\n ddf.to_parquet(tmp_path, engine=write_engine)\n import glob\n\n paths = glob.glob(os.path.join(tmp_path, \"*.parquet\"))\n paths.append(os.path.join(tmp_path, \"_metadata\"))\n ddf2 = dd.read_parquet(paths, engine=read_engine, gather_statistics=False)\n assert_eq(ddf, ddf2, check_divisions=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_dir_nometa_test_read_dir_nometa.assert_eq_ddf_ddf2_chec": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_dir_nometa_test_read_dir_nometa.assert_eq_ddf_ddf2_chec", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2105, "end_line": 2120, "span_ids": ["test_read_dir_nometa"], "tokens": 185}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"statistics\", [True, False, None])\n@pytest.mark.parametrize(\"remove_common\", [True, False])\n@write_read_engines()\ndef test_read_dir_nometa(tmpdir, write_engine, read_engine, statistics, remove_common):\n tmp_path = str(tmpdir)\n ddf.to_parquet(tmp_path, engine=write_engine)\n if os.path.exists(os.path.join(tmp_path, \"_metadata\")):\n os.unlink(os.path.join(tmp_path, \"_metadata\"))\n files = os.listdir(tmp_path)\n assert \"_metadata\" not in files\n\n if remove_common and os.path.exists(os.path.join(tmp_path, \"_common_metadata\")):\n os.unlink(os.path.join(tmp_path, \"_common_metadata\"))\n\n ddf2 = dd.read_parquet(tmp_path, engine=read_engine, gather_statistics=statistics)\n assert_eq(ddf, ddf2, check_divisions=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_timeseries_nulls_in_schema_test_timeseries_nulls_in_schema.if_engine_pyarrow_an.with_pytest_raises_ValueE.ddf_read.dd_read_parquet_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_timeseries_nulls_in_schema_test_timeseries_nulls_in_schema.if_engine_pyarrow_an.with_pytest_raises_ValueE.ddf_read.dd_read_parquet_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2123, "end_line": 2160, "span_ids": ["test_timeseries_nulls_in_schema"], "tokens": 394}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"schema\", [\"infer\", None])\ndef test_timeseries_nulls_in_schema(tmpdir, engine, schema):\n\n if (\n schema == \"infer\"\n and engine == \"pyarrow\"\n and pa.__version__ < LooseVersion(\"0.15.0\")\n ):\n pytest.skip(\"PyArrow>=0.15 Required.\")\n\n # GH#5608: relative path failing _metadata/_common_metadata detection.\n tmp_path = str(tmpdir.mkdir(\"files\"))\n tmp_path = os.path.join(tmp_path, \"../\", \"files\")\n\n ddf2 = (\n dask.datasets.timeseries(start=\"2000-01-01\", end=\"2000-01-03\", freq=\"1h\")\n .reset_index()\n .map_partitions(lambda x: x.loc[:5])\n )\n ddf2 = ddf2.set_index(\"x\").reset_index().persist()\n ddf2.name = ddf2.name.where(ddf2.timestamp == \"2000-01-01\", None)\n\n # Note: `append_row_groups` will fail with pyarrow>0.17.1 for _metadata write\n ddf2.to_parquet(tmp_path, engine=engine, write_metadata_file=False, schema=schema)\n ddf_read = dd.read_parquet(\n tmp_path, engine=engine, dataset={\"validate_schema\": False}\n )\n\n assert_eq(ddf_read, ddf2, check_divisions=False, check_index=False)\n\n # Can force schema validation on each partition in pyarrow\n if engine == \"pyarrow\" and schema is None:\n # The schema mismatch should raise an error if the\n # dataset was written with `schema=None` (no inference)\n with pytest.raises(ValueError):\n ddf_read = dd.read_parquet(\n tmp_path, dataset={\"validate_schema\": True}, engine=engine\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_timeseries_nulls_in_schema_pyarrow_test_timeseries_nulls_in_schema_pyarrow.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_timeseries_nulls_in_schema_pyarrow_test_timeseries_nulls_in_schema_pyarrow.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2163, "end_line": 2203, "span_ids": ["test_timeseries_nulls_in_schema_pyarrow"], "tokens": 376}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"numerical\", [True, False])\n@pytest.mark.parametrize(\n \"timestamp\", [\"2000-01-01\", \"2000-01-02\", \"2000-01-03\", \"2000-01-04\"]\n)\ndef test_timeseries_nulls_in_schema_pyarrow(tmpdir, timestamp, numerical):\n check_pyarrow()\n tmp_path = str(tmpdir)\n ddf2 = dd.from_pandas(\n pd.DataFrame(\n {\n \"timestamp\": [\n pd.Timestamp(\"2000-01-01\"),\n pd.Timestamp(\"2000-01-02\"),\n pd.Timestamp(\"2000-01-03\"),\n pd.Timestamp(\"2000-01-04\"),\n ],\n \"id\": np.arange(4, dtype=\"float64\"),\n \"name\": [\"cat\", \"dog\", \"bird\", \"cow\"],\n }\n ),\n npartitions=2,\n ).persist()\n if numerical:\n ddf2.id = ddf2.id.where(ddf2.timestamp == timestamp, None)\n ddf2.id = ddf2.id.astype(\"float64\")\n else:\n ddf2.name = ddf2.name.where(ddf2.timestamp == timestamp, None)\n\n # There should be no schema error if you specify a schema on write\n schema = pa.schema(\n [(\"timestamp\", pa.timestamp(\"ns\")), (\"id\", pa.float64()), (\"name\", pa.string())]\n )\n ddf2.to_parquet(tmp_path, schema=schema, write_index=False, engine=\"pyarrow\")\n assert_eq(\n dd.read_parquet(\n tmp_path, dataset={\"validate_schema\": True}, index=False, engine=\"pyarrow\"\n ),\n ddf2,\n check_divisions=False,\n check_index=False,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_inconsistent_schema_pyarrow_test_read_inconsistent_schema_pyarrow.None_4": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_inconsistent_schema_pyarrow_test_read_inconsistent_schema_pyarrow.None_4", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2206, "end_line": 2237, "span_ids": ["test_read_inconsistent_schema_pyarrow"], "tokens": 321}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_inconsistent_schema_pyarrow(tmpdir):\n check_pyarrow()\n\n # Note: This is a proxy test for a cudf-related issue fix\n # (see cudf#5062 github issue). The cause of that issue is\n # schema inconsistencies that do not actually correspond to\n # different types, but whether or not the file/column contains\n # null values.\n\n df1 = pd.DataFrame({\"id\": [0, 1], \"val\": [10, 20]})\n df2 = pd.DataFrame({\"id\": [2, 3], \"val\": [30, 40]})\n\n desired_type = \"int64\"\n other_type = \"int32\"\n df1.val = df1.val.astype(desired_type)\n df2.val = df2.val.astype(other_type)\n\n df_expect = pd.concat([df1, df2], ignore_index=True)\n df_expect[\"val\"] = df_expect.val.astype(desired_type)\n\n df1.to_parquet(os.path.join(tmpdir, \"0.parquet\"))\n df2.to_parquet(os.path.join(tmpdir, \"1.parquet\"))\n\n # Read Directory\n check = dd.read_parquet(str(tmpdir), dataset={\"validate_schema\": False})\n assert_eq(check.compute(), df_expect, check_index=False)\n\n # Read List\n check = dd.read_parquet(\n os.path.join(tmpdir, \"*.parquet\"), dataset={\"validate_schema\": False}\n )\n assert_eq(check.compute(), df_expect, check_index=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_graph_size_pyarrow_test_graph_size_pyarrow.assert_len_pickle_dumps_d": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_graph_size_pyarrow_test_graph_size_pyarrow.assert_len_pickle_dumps_d", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2240, "end_line": 2252, "span_ids": ["test_graph_size_pyarrow"], "tokens": 112}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_graph_size_pyarrow(tmpdir, engine):\n import pickle\n\n fn = str(tmpdir)\n\n ddf1 = dask.datasets.timeseries(\n start=\"2000-01-01\", end=\"2000-01-02\", freq=\"60S\", partition_freq=\"1H\"\n )\n\n ddf1.to_parquet(fn, engine=engine)\n ddf2 = dd.read_parquet(fn, engine=engine)\n\n assert len(pickle.dumps(ddf2.__dask_graph__())) < 10000", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_getitem_optimization_test_getitem_optimization.assert_eq_ddf_compute_opt": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_getitem_optimization_test_getitem_optimization.assert_eq_ddf_compute_opt", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2256, "end_line": 2275, "span_ids": ["test_getitem_optimization"], "tokens": 255}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"preserve_index\", [True, False])\n@pytest.mark.parametrize(\"index\", [None, np.random.permutation(2000)])\ndef test_getitem_optimization(tmpdir, engine, preserve_index, index):\n df = pd.DataFrame(\n {\"A\": [1, 2] * 1000, \"B\": [3, 4] * 1000, \"C\": [5, 6] * 1000}, index=index\n )\n df.index.name = \"my_index\"\n ddf = dd.from_pandas(df, 2, sort=False)\n fn = os.path.join(str(tmpdir))\n ddf.to_parquet(fn, engine=engine, write_index=preserve_index)\n\n ddf = dd.read_parquet(fn, engine=engine)[\"B\"]\n\n dsk = optimize_read_parquet_getitem(ddf.dask, keys=[ddf._name])\n get, read = sorted(dsk.layers) # keys are getitem-, read-parquet-\n subgraph = dsk.layers[read]\n assert isinstance(subgraph, BlockwiseParquet)\n assert subgraph.columns == [\"B\"]\n\n assert_eq(ddf.compute(optimize_graph=False), ddf.compute())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_getitem_optimization_empty_test_getitem_optimization_empty.assert_subgraph_columns_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_getitem_optimization_empty_test_getitem_optimization_empty.assert_subgraph_columns_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2278, "end_line": 2289, "span_ids": ["test_getitem_optimization_empty"], "tokens": 160}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_getitem_optimization_empty(tmpdir, engine):\n df = pd.DataFrame({\"A\": [1] * 100, \"B\": [2] * 100, \"C\": [3] * 100, \"D\": [4] * 100})\n ddf = dd.from_pandas(df, 2)\n fn = os.path.join(str(tmpdir))\n ddf.to_parquet(fn, engine=engine)\n\n df2 = dd.read_parquet(fn, columns=[], engine=engine)\n dsk = optimize_read_parquet_getitem(df2.dask, keys=[df2._name])\n\n subgraph = list(dsk.layers.values())[0]\n assert isinstance(subgraph, BlockwiseParquet)\n assert subgraph.columns == []", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_getitem_optimization_multi_test_subgraph_getitem.None_2.subgraph_name_3_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_getitem_optimization_multi_test_subgraph_getitem.None_2.subgraph_name_3_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2291, "end_line": 2320, "span_ids": ["test_getitem_optimization_multi", "test_subgraph_getitem"], "tokens": 308}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_getitem_optimization_multi(tmpdir, engine):\n df = pd.DataFrame({\"A\": [1] * 100, \"B\": [2] * 100, \"C\": [3] * 100, \"D\": [4] * 100})\n ddf = dd.from_pandas(df, 2)\n fn = os.path.join(str(tmpdir))\n ddf.to_parquet(fn, engine=engine)\n\n a = dd.read_parquet(fn, engine=engine)[\"B\"]\n b = dd.read_parquet(fn, engine=engine)[[\"C\"]]\n c = dd.read_parquet(fn, engine=engine)[[\"C\", \"A\"]]\n\n a1, a2, a3 = dask.compute(a, b, c)\n b1, b2, b3 = dask.compute(a, b, c, optimize_graph=False)\n\n assert_eq(a1, b1)\n assert_eq(a2, b2)\n assert_eq(a3, b3)\n\n\ndef test_subgraph_getitem():\n meta = pd.DataFrame(columns=[\"a\"])\n subgraph = ParquetSubgraph(\"name\", \"pyarrow\", \"fs\", meta, [], [], [0, 1, 2], {})\n\n with pytest.raises(KeyError):\n subgraph[\"foo\"]\n\n with pytest.raises(KeyError):\n subgraph[(\"name\", -1)]\n\n with pytest.raises(KeyError):\n subgraph[(\"name\", 3)]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_split_row_groups_pyarrow_test_split_row_groups_pyarrow.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_split_row_groups_pyarrow_test_split_row_groups_pyarrow.None_3", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2323, "end_line": 2361, "span_ids": ["test_split_row_groups_pyarrow"], "tokens": 332}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_split_row_groups_pyarrow(tmpdir):\n \"\"\"Test split_row_groups read_parquet kwarg\"\"\"\n check_pyarrow()\n tmp = str(tmpdir)\n df = pd.DataFrame(\n {\"i32\": np.arange(800, dtype=np.int32), \"f\": np.arange(800, dtype=np.float64)}\n )\n df.index.name = \"index\"\n\n half = len(df) // 2\n dd.from_pandas(df.iloc[:half], npartitions=2).to_parquet(\n tmp, engine=\"pyarrow\", row_group_size=100\n )\n\n ddf3 = dd.read_parquet(tmp, engine=\"pyarrow\", split_row_groups=True, chunksize=1)\n assert ddf3.npartitions == 4\n\n ddf3 = dd.read_parquet(\n tmp, engine=\"pyarrow\", gather_statistics=True, split_row_groups=False\n )\n assert ddf3.npartitions == 2\n\n dd.from_pandas(df.iloc[half:], npartitions=2).to_parquet(\n tmp, append=True, engine=\"pyarrow\", row_group_size=50\n )\n\n ddf3 = dd.read_parquet(\n tmp,\n engine=\"pyarrow\",\n gather_statistics=True,\n split_row_groups=True,\n chunksize=1,\n )\n assert ddf3.npartitions == 12\n\n ddf3 = dd.read_parquet(\n tmp, engine=\"pyarrow\", gather_statistics=True, split_row_groups=False\n )\n assert ddf3.npartitions == 4", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_split_row_groups_int_pyarrow_test_split_row_groups_int_pyarrow.assert_ddf2_npartitions_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_split_row_groups_int_pyarrow_test_split_row_groups_int_pyarrow.assert_ddf2_npartitions_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2364, "end_line": 2396, "span_ids": ["test_split_row_groups_int_pyarrow"], "tokens": 294}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"split_row_groups\", [1, 12])\n@pytest.mark.parametrize(\"gather_statistics\", [True, False])\ndef test_split_row_groups_int_pyarrow(tmpdir, split_row_groups, gather_statistics):\n\n check_pyarrow()\n tmp = str(tmpdir)\n engine = \"pyarrow\"\n row_group_size = 10\n npartitions = 4\n half_size = 400\n df = pd.DataFrame(\n {\n \"i32\": np.arange(2 * half_size, dtype=np.int32),\n \"f\": np.arange(2 * half_size, dtype=np.float64),\n }\n )\n half = len(df) // 2\n\n dd.from_pandas(df.iloc[:half], npartitions=npartitions).to_parquet(\n tmp, engine=engine, row_group_size=row_group_size\n )\n dd.from_pandas(df.iloc[half:], npartitions=npartitions).to_parquet(\n tmp, append=True, engine=engine, row_group_size=row_group_size\n )\n\n ddf2 = dd.read_parquet(\n tmp,\n engine=engine,\n split_row_groups=split_row_groups,\n gather_statistics=gather_statistics,\n )\n expected_rg_cout = int(half_size / row_group_size)\n assert ddf2.npartitions == 2 * math.ceil(expected_rg_cout / split_row_groups)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_split_row_groups_filter_pyarrow_test_split_row_groups_filter_pyarrow.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_split_row_groups_filter_pyarrow_test_split_row_groups_filter_pyarrow.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2399, "end_line": 2426, "span_ids": ["test_split_row_groups_filter_pyarrow"], "tokens": 226}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_split_row_groups_filter_pyarrow(tmpdir):\n check_pyarrow()\n tmp = str(tmpdir)\n df = pd.DataFrame(\n {\"i32\": np.arange(800, dtype=np.int32), \"f\": np.arange(800, dtype=np.float64)}\n )\n df.index.name = \"index\"\n search_val = 600\n filters = [(\"f\", \"==\", search_val)]\n\n dd.from_pandas(df, npartitions=4).to_parquet(\n tmp, append=True, engine=\"pyarrow\", row_group_size=50\n )\n\n ddf2 = dd.read_parquet(tmp, engine=\"pyarrow\")\n ddf3 = dd.read_parquet(\n tmp,\n engine=\"pyarrow\",\n gather_statistics=True,\n split_row_groups=True,\n filters=filters,\n )\n\n assert search_val in ddf3[\"i32\"]\n assert_eq(\n ddf2[ddf2[\"i32\"] == search_val].compute(),\n ddf3[ddf3[\"i32\"] == search_val].compute(),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_optimize_getitem_and_nonblockwise_test_optimize_getitem_and_nonblockwise.df2_a_b_rolling_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_optimize_getitem_and_nonblockwise_test_optimize_getitem_and_nonblockwise.df2_a_b_rolling_3", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2429, "end_line": 2439, "span_ids": ["test_optimize_getitem_and_nonblockwise"], "tokens": 135}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_optimize_getitem_and_nonblockwise(tmpdir):\n check_engine()\n path = os.path.join(tmpdir, \"path.parquet\")\n df = pd.DataFrame(\n {\"a\": [3, 4, 2], \"b\": [1, 2, 4], \"c\": [5, 4, 2], \"d\": [1, 2, 3]},\n index=[\"a\", \"b\", \"c\"],\n )\n df.to_parquet(path)\n\n df2 = dd.read_parquet(path)\n df2[[\"a\", \"b\"]].rolling(3).max().compute()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_optimize_and_not_test_optimize_and_not.for_a_b_in_zip_result_e.assert_eq_a_b_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_optimize_and_not_test_optimize_and_not.for_a_b_in_zip_result_e.assert_eq_a_b_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2442, "end_line": 2465, "span_ids": ["test_optimize_and_not"], "tokens": 297}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_optimize_and_not(tmpdir):\n check_engine()\n path = os.path.join(tmpdir, \"path.parquet\")\n df = pd.DataFrame(\n {\"a\": [3, 4, 2], \"b\": [1, 2, 4], \"c\": [5, 4, 2], \"d\": [1, 2, 3]},\n index=[\"a\", \"b\", \"c\"],\n )\n df.to_parquet(path)\n\n df2 = dd.read_parquet(path)\n df2a = df2[\"a\"].groupby(df2[\"c\"]).first().to_delayed()\n df2b = df2[\"b\"].groupby(df2[\"c\"]).first().to_delayed()\n df2c = df2[[\"a\", \"b\"]].rolling(2).max().to_delayed()\n df2d = df2.rolling(2).max().to_delayed()\n (result,) = dask.compute(df2a + df2b + df2c + df2d)\n\n expected = [\n dask.compute(df2a)[0][0],\n dask.compute(df2b)[0][0],\n dask.compute(df2c)[0][0],\n dask.compute(df2d)[0][0],\n ]\n for a, b in zip(result, expected):\n assert_eq(a, b)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_chunksize_test_chunksize.if_not_chunksize_.else_.assert_ddf2_npartitions_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_chunksize_test_chunksize.if_not_chunksize_.else_.assert_ddf2_npartitions_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2468, "end_line": 2523, "span_ids": ["test_chunksize"], "tokens": 454}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"metadata\", [True, False])\n@pytest.mark.parametrize(\"chunksize\", [None, 1024, 4096, \"1MiB\"])\ndef test_chunksize(tmpdir, chunksize, engine, metadata):\n check_pyarrow() # Need pyarrow for write phase in this test\n\n nparts = 2\n df_size = 100\n row_group_size = 5\n row_group_byte_size = 451 # Empirically measured\n\n df = pd.DataFrame(\n {\n \"a\": np.random.choice([\"apple\", \"banana\", \"carrot\"], size=df_size),\n \"b\": np.random.random(size=df_size),\n \"c\": np.random.randint(1, 5, size=df_size),\n \"index\": np.arange(0, df_size),\n }\n ).set_index(\"index\")\n\n ddf1 = dd.from_pandas(df, npartitions=nparts)\n ddf1.to_parquet(\n str(tmpdir),\n engine=\"pyarrow\",\n row_group_size=row_group_size,\n write_metadata_file=metadata,\n )\n\n if metadata:\n path = str(tmpdir)\n else:\n dirname = str(tmpdir)\n files = os.listdir(dirname)\n assert \"_metadata\" not in files\n path = os.path.join(dirname, \"*.parquet\")\n\n ddf2 = dd.read_parquet(\n path,\n engine=engine,\n chunksize=chunksize,\n split_row_groups=True,\n gather_statistics=True,\n index=\"index\",\n )\n\n assert_eq(ddf1, ddf2, check_divisions=False)\n\n num_row_groups = df_size // row_group_size\n if not chunksize:\n assert ddf2.npartitions == num_row_groups\n else:\n # Check that we are really aggregating\n df_byte_size = row_group_byte_size * num_row_groups\n expected = df_byte_size // parse_bytes(chunksize)\n remainder = (df_byte_size % parse_bytes(chunksize)) > 0\n expected += int(remainder) * nparts\n assert ddf2.npartitions == max(nparts, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_roundtrip_pandas_chunksize_test_roundtrip_pandas_chunksize.assert_eq_pdf_ddf_read_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_roundtrip_pandas_chunksize_test_roundtrip_pandas_chunksize.assert_eq_pdf_ddf_read_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2526, "end_line": 2542, "span_ids": ["test_roundtrip_pandas_chunksize"], "tokens": 114}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@write_read_engines()\ndef test_roundtrip_pandas_chunksize(tmpdir, write_engine, read_engine):\n path = str(tmpdir.join(\"test.parquet\"))\n pdf = df.copy()\n pdf.index.name = \"index\"\n pdf.to_parquet(path, engine=write_engine)\n\n ddf_read = dd.read_parquet(\n path,\n engine=read_engine,\n chunksize=\"10 kiB\",\n gather_statistics=True,\n split_row_groups=True,\n index=\"index\",\n )\n\n assert_eq(pdf, ddf_read)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_pandas_fastparquet_partitioned_test_read_pandas_fastparquet_partitioned.assert_len_ddf_read_compu": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_pandas_fastparquet_partitioned_test_read_pandas_fastparquet_partitioned.assert_len_ddf_read_compu", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2545, "end_line": 2556, "span_ids": ["test_read_pandas_fastparquet_partitioned"], "tokens": 127}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_pandas_fastparquet_partitioned(tmpdir, engine):\n check_fastparquet()\n\n pdf = pd.DataFrame(\n [{\"str\": str(i), \"int\": i, \"group\": \"ABC\"[i % 3]} for i in range(6)]\n )\n path = str(tmpdir)\n pdf.to_parquet(path, partition_cols=[\"group\"], engine=\"fastparquet\")\n ddf_read = dd.read_parquet(path, engine=engine)\n\n assert len(ddf_read[\"group\"].compute()) == 6\n assert len(ddf_read.compute().group) == 6", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_parquet_getitem_skip_when_getting_getitem_test_read_parquet_getitem_skip_when_getting_getitem.a_b_dask_optimize_ddf_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_parquet_getitem_skip_when_getting_getitem_test_read_parquet_getitem_skip_when_getting_getitem.a_b_dask_optimize_ddf_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2559, "end_line": 2566, "span_ids": ["test_read_parquet_getitem_skip_when_getting_getitem"], "tokens": 137}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_parquet_getitem_skip_when_getting_getitem(tmpdir, engine):\n # https://github.com/dask/dask/issues/5893\n pdf = pd.DataFrame({\"A\": [1, 2, 3, 4, 5, 6], \"B\": [\"a\", \"b\", \"c\", \"d\", \"e\", \"f\"]})\n path = os.path.join(str(tmpdir), \"data.parquet\")\n pdf.to_parquet(path, engine=engine)\n\n ddf = dd.read_parquet(path, engine=engine)\n a, b = dask.optimize(ddf[\"A\"], ddf)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_filter_nonpartition_columns_test_filter_nonpartition_columns.assert_df_read_time_ma": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_filter_nonpartition_columns_test_filter_nonpartition_columns.assert_df_read_time_ma", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2569, "end_line": 2595, "span_ids": ["test_filter_nonpartition_columns"], "tokens": 236}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"gather_statistics\", [None, True])\n@write_read_engines()\ndef test_filter_nonpartition_columns(\n tmpdir, write_engine, read_engine, gather_statistics\n):\n tmpdir = str(tmpdir)\n df_write = pd.DataFrame(\n {\n \"id\": [1, 2, 3, 4] * 4,\n \"time\": np.arange(16),\n \"random\": np.random.choice([\"cat\", \"dog\"], size=16),\n }\n )\n ddf_write = dd.from_pandas(df_write, npartitions=4)\n ddf_write.to_parquet(\n tmpdir, write_index=False, partition_on=[\"id\"], engine=write_engine\n )\n ddf_read = dd.read_parquet(\n tmpdir,\n index=False,\n engine=read_engine,\n gather_statistics=gather_statistics,\n filters=[((\"time\", \"<\", 5))],\n )\n df_read = ddf_read.compute()\n assert len(df_read) == len(df_read[df_read[\"time\"] < 5])\n assert df_read[\"time\"].max() < 5", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_pandas_metadata_nullable_pyarrow_test_pandas_metadata_nullable_pyarrow.assert_eq_ddf1_ddf2_che": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_pandas_metadata_nullable_pyarrow_test_pandas_metadata_nullable_pyarrow.assert_eq_ddf1_ddf2_che", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2598, "end_line": 2619, "span_ids": ["test_pandas_metadata_nullable_pyarrow"], "tokens": 193}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_pandas_metadata_nullable_pyarrow(tmpdir):\n\n check_pyarrow()\n if pa.__version__ < LooseVersion(\"0.16.0\") or pd.__version__ < LooseVersion(\n \"1.0.0\"\n ):\n pytest.skip(\"PyArrow>=0.16 and Pandas>=1.0.0 Required.\")\n tmpdir = str(tmpdir)\n\n ddf1 = dd.from_pandas(\n pd.DataFrame(\n {\n \"A\": pd.array([1, None, 2], dtype=\"Int64\"),\n \"B\": pd.array([\"dog\", \"cat\", None], dtype=\"str\"),\n }\n ),\n npartitions=1,\n )\n ddf1.to_parquet(tmpdir, engine=\"pyarrow\")\n ddf2 = dd.read_parquet(tmpdir, engine=\"pyarrow\")\n\n assert_eq(ddf1, ddf2, check_index=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_pandas_timestamp_overflow_pyarrow_test_pandas_timestamp_overflow_pyarrow.from_dask_dataframe_io_pa": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_pandas_timestamp_overflow_pyarrow_test_pandas_timestamp_overflow_pyarrow.from_dask_dataframe_io_pa", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2622, "end_line": 2644, "span_ids": ["test_pandas_timestamp_overflow_pyarrow"], "tokens": 221}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_pandas_timestamp_overflow_pyarrow(tmpdir):\n\n check_pyarrow()\n if pa.__version__ < LooseVersion(\"0.17.0\"):\n pytest.skip(\"PyArrow>=0.17 Required.\")\n\n info = np.iinfo(np.dtype(\"int64\"))\n arr_numeric = np.linspace(\n start=info.min + 2, stop=info.max, num=1024, dtype=\"int64\"\n )\n arr_dates = arr_numeric.astype(\"datetime64[ms]\")\n\n table = pa.Table.from_arrays([pa.array(arr_dates)], names=[\"ts\"])\n pa.parquet.write_table(\n table, f\"{tmpdir}/file.parquet\", use_deprecated_int96_timestamps=False\n )\n\n # This will raise by default due to overflow\n with pytest.raises(pa.lib.ArrowInvalid) as e:\n dd.read_parquet(str(tmpdir), engine=\"pyarrow\").compute()\n assert \"out of bounds\" in str(e.value)\n\n from dask.dataframe.io.parquet.arrow import ArrowEngine\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_pandas_timestamp_overflow_pyarrow.ArrowEngineWithTimestampClamp_test_pandas_timestamp_overflow_pyarrow.dd_read_parquet_str_tmpdi": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_pandas_timestamp_overflow_pyarrow.ArrowEngineWithTimestampClamp_test_pandas_timestamp_overflow_pyarrow.dd_read_parquet_str_tmpdi", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2646, "end_line": 2693, "span_ids": ["test_pandas_timestamp_overflow_pyarrow"], "tokens": 431}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_pandas_timestamp_overflow_pyarrow(tmpdir):\n # ... other code\n\n class ArrowEngineWithTimestampClamp(ArrowEngine):\n @classmethod\n def clamp_arrow_datetimes(cls, arrow_table: pa.Table) -> pa.Table:\n \"\"\"Constrain datetimes to be valid for pandas\n\n Since pandas works in ns precision and arrow / parquet defaults to ms\n precision we need to clamp our datetimes to something reasonable\"\"\"\n\n new_columns = []\n for i, col in enumerate(arrow_table.columns):\n if pa.types.is_timestamp(col.type) and (\n col.type.unit in (\"s\", \"ms\", \"us\")\n ):\n multiplier = {\"s\": 1_0000_000_000, \"ms\": 1_000_000, \"us\": 1_000}[\n col.type.unit\n ]\n\n original_type = col.type\n\n series: pd.Series = col.cast(pa.int64()).to_pandas(\n types_mapper={pa.int64(): pd.Int64Dtype}\n )\n info = np.iinfo(np.dtype(\"int64\"))\n # constrain data to be within valid ranges\n series.clip(\n lower=info.min // multiplier + 1,\n upper=info.max // multiplier,\n inplace=True,\n )\n new_array = pa.array(series, pa.int64())\n new_array = new_array.cast(original_type)\n new_columns.append(new_array)\n else:\n new_columns.append(col)\n\n return pa.Table.from_arrays(new_columns, names=arrow_table.column_names)\n\n @classmethod\n def _arrow_table_to_pandas(\n cls, arrow_table: pa.Table, categories, **kwargs\n ) -> pd.DataFrame:\n fixed_arrow_table = cls.clamp_arrow_datetimes(arrow_table)\n return super()._arrow_table_to_pandas(\n fixed_arrow_table, categories, **kwargs\n )\n\n # this should not fail, but instead produce timestamps that are in the valid range\n dd.read_parquet(str(tmpdir), engine=ArrowEngineWithTimestampClamp).compute()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_partitioned_preserve_index_test_partitioned_preserve_index.assert_eq_expect_got_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_partitioned_preserve_index_test_partitioned_preserve_index.assert_eq_expect_got_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2696, "end_line": 2719, "span_ids": ["test_partitioned_preserve_index"], "tokens": 229}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@write_read_engines_xfail\ndef test_partitioned_preserve_index(tmpdir, write_engine, read_engine):\n\n if write_engine == \"pyarrow\" and pa.__version__ < LooseVersion(\"0.15.0\"):\n pytest.skip(\"PyArrow>=0.15 Required.\")\n\n tmp = str(tmpdir)\n size = 1_000\n npartitions = 4\n b = np.arange(npartitions).repeat(size // npartitions)\n data = pd.DataFrame(\n {\n \"myindex\": np.arange(size),\n \"A\": np.random.random(size=size),\n \"B\": pd.Categorical(b),\n }\n ).set_index(\"myindex\")\n data.index.name = None\n df1 = dd.from_pandas(data, npartitions=npartitions)\n df1.to_parquet(tmp, partition_on=\"B\", engine=write_engine)\n\n expect = data[data[\"B\"] == 1]\n got = dd.read_parquet(tmp, engine=read_engine, filters=[(\"B\", \"==\", 1)])\n assert_eq(expect, got)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_from_pandas_preserve_none_index_test_from_pandas_preserve_none_index.assert_eq_expect_got_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_from_pandas_preserve_none_index_test_from_pandas_preserve_none_index.assert_eq_expect_got_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2722, "end_line": 2735, "span_ids": ["test_from_pandas_preserve_none_index"], "tokens": 148}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_from_pandas_preserve_none_index(tmpdir, engine):\n\n check_pyarrow()\n if pa.__version__ < LooseVersion(\"0.15.0\"):\n pytest.skip(\"PyArrow>=0.15 Required.\")\n\n fn = str(tmpdir.join(\"test.parquet\"))\n df = pd.DataFrame({\"a\": [1, 2], \"b\": [4, 5], \"c\": [6, 7]}).set_index(\"c\")\n df.index.name = None\n df.to_parquet(fn, engine=\"pyarrow\", index=True)\n\n expect = pd.read_parquet(fn)\n got = dd.read_parquet(fn, engine=engine)\n assert_eq(expect, got)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_from_pandas_preserve_none_rangeindex_test_from_pandas_preserve_none_rangeindex.assert_eq_df0_df1_comput": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_from_pandas_preserve_none_rangeindex_test_from_pandas_preserve_none_rangeindex.assert_eq_df0_df1_comput", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2738, "end_line": 2746, "span_ids": ["test_from_pandas_preserve_none_rangeindex"], "tokens": 113}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@write_read_engines()\ndef test_from_pandas_preserve_none_rangeindex(tmpdir, write_engine, read_engine):\n # See GitHub Issue#6348\n fn = str(tmpdir.join(\"test.parquet\"))\n df0 = pd.DataFrame({\"t\": [1, 2, 3]}, index=pd.RangeIndex(start=1, stop=4))\n df0.to_parquet(fn, engine=write_engine)\n\n df1 = dd.read_parquet(fn, engine=read_engine)\n assert_eq(df0, df1.compute())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_illegal_column_name_test_illegal_column_name.assert_null_name_in_str_e": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_illegal_column_name_test_illegal_column_name.assert_null_name_in_str_e", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2749, "end_line": 2767, "span_ids": ["test_illegal_column_name"], "tokens": 231}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_illegal_column_name(tmpdir, engine):\n # Make sure user is prevented from preserving a \"None\" index\n # name if there is already a column using the special `null_name`\n null_name = \"__null_dask_index__\"\n fn = str(tmpdir.join(\"test.parquet\"))\n df = pd.DataFrame({\"x\": [1, 2], null_name: [4, 5]}).set_index(\"x\")\n df.index.name = None\n ddf = dd.from_pandas(df, npartitions=2)\n\n # If we don't want to preserve the None index name, the\n # write should work, but the user should be warned\n with pytest.warns(UserWarning, match=null_name):\n ddf.to_parquet(fn, engine=engine, write_index=False)\n\n # If we do want to preserve the None index name, should\n # get a ValueError for having an illegal column name\n with pytest.raises(ValueError) as e:\n ddf.to_parquet(fn, engine=engine)\n assert null_name in str(e.value)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_from_contextlib_import_co_db.with_tmpfile_as_f_.yield_uri": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_from_contextlib_import_co_db.with_tmpfile_as_f_.yield_uri", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_sql.py", "file_name": "test_sql.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 37, "span_ids": ["imports", "db"], "tokens": 239}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from contextlib import contextmanager\nimport io\n\nimport pytest\n\n# import dask\nfrom dask.dataframe.io.sql import read_sql_table\nfrom dask.dataframe.utils import assert_eq, PANDAS_GT_0240\nfrom dask.utils import tmpfile\n\npd = pytest.importorskip(\"pandas\")\ndd = pytest.importorskip(\"dask.dataframe\")\npytest.importorskip(\"sqlalchemy\")\npytest.importorskip(\"sqlite3\")\nnp = pytest.importorskip(\"numpy\")\n\n\ndata = \"\"\"\nname,number,age,negish\nAlice,0,33,-5\nBob,1,40,-3\nChris,2,22,3\nDora,3,16,5\nEdith,4,53,0\nFrancis,5,30,0\nGarreth,6,20,0\n\"\"\"\n\ndf = pd.read_csv(io.StringIO(data), index_col=\"number\")\n\n\n@pytest.yield_fixture\ndef db():\n with tmpfile() as f:\n uri = \"sqlite:///%s\" % f\n df.to_sql(\"test\", uri, index=True, if_exists=\"replace\")\n yield uri", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_empty_test_empty.with_tmpfile_as_f_.assert_pd_dataframe_empty": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_empty_test_empty.with_tmpfile_as_f_.assert_pd_dataframe_empty", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_sql.py", "file_name": "test_sql.py", "file_type": "text/x-python", "category": "test", "start_line": 40, "end_line": 59, "span_ids": ["test_empty"], "tokens": 160}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_empty(db):\n from sqlalchemy import create_engine, MetaData, Table, Column, Integer\n\n with tmpfile() as f:\n uri = \"sqlite:///%s\" % f\n metadata = MetaData()\n engine = create_engine(uri)\n table = Table(\n \"empty_table\",\n metadata,\n Column(\"id\", Integer, primary_key=True),\n Column(\"col2\", Integer),\n )\n metadata.create_all(engine)\n\n dask_df = read_sql_table(table.name, uri, index_col=\"id\", npartitions=1)\n assert dask_df.index.name == \"id\"\n assert dask_df.col2.dtype == np.dtype(\"int64\")\n pd_dataframe = dask_df.compute()\n assert pd_dataframe.empty is True", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_empty_other_schema_test_empty_other_schema.engine_execute_DROP_SCHE": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_empty_other_schema_test_empty_other_schema.engine_execute_DROP_SCHE", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_sql.py", "file_name": "test_sql.py", "file_type": "text/x-python", "category": "test", "start_line": 62, "end_line": 106, "span_ids": ["test_empty_other_schema"], "tokens": 372}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skip(\n reason=\"Requires a postgres server. Sqlite does not support multiple schemas.\"\n)\ndef test_empty_other_schema():\n from sqlalchemy import create_engine, MetaData, Table, Column, Integer, event, DDL\n\n # Database configurations.\n pg_host = \"localhost\"\n pg_port = \"5432\"\n pg_user = \"user\"\n pg_pass = \"pass\"\n pg_db = \"db\"\n db_url = \"postgresql://%s:%s@%s:%s/%s\" % (pg_user, pg_pass, pg_host, pg_port, pg_db)\n\n # Create an empty table in a different schema.\n table_name = \"empty_table\"\n schema_name = \"other_schema\"\n engine = create_engine(db_url)\n metadata = MetaData()\n table = Table(\n table_name,\n metadata,\n Column(\"id\", Integer, primary_key=True),\n Column(\"col2\", Integer),\n schema=schema_name,\n )\n # Create the schema and the table.\n event.listen(\n metadata, \"before_create\", DDL(\"CREATE SCHEMA IF NOT EXISTS %s\" % schema_name)\n )\n metadata.create_all(engine)\n\n # Read the empty table from the other schema.\n dask_df = read_sql_table(\n table.name, db_url, index_col=\"id\", schema=table.schema, npartitions=1\n )\n\n # Validate that the retrieved table is empty.\n assert dask_df.index.name == \"id\"\n assert dask_df.col2.dtype == np.dtype(\"int64\")\n pd_dataframe = dask_df.compute()\n assert pd_dataframe.empty is True\n\n # Drop the schema and the table.\n engine.execute(\"DROP SCHEMA IF EXISTS %s CASCADE\" % schema_name)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_needs_rational_test_needs_rational.with_tmpfile_as_f_.None_4": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_needs_rational_test_needs_rational.with_tmpfile_as_f_.None_4", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_sql.py", "file_name": "test_sql.py", "file_type": "text/x-python", "category": "test", "start_line": 109, "end_line": 154, "span_ids": ["test_needs_rational"], "tokens": 448}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_needs_rational(db):\n import datetime\n\n now = datetime.datetime.now()\n d = datetime.timedelta(seconds=1)\n df = pd.DataFrame(\n {\n \"a\": list(\"ghjkl\"),\n \"b\": [now + i * d for i in range(5)],\n \"c\": [True, True, False, True, True],\n }\n )\n df = df.append(\n [\n {\"a\": \"x\", \"b\": now + d * 1000, \"c\": None},\n {\"a\": None, \"b\": now + d * 1001, \"c\": None},\n ]\n )\n with tmpfile() as f:\n uri = \"sqlite:///%s\" % f\n df.to_sql(\"test\", uri, index=False, if_exists=\"replace\")\n\n # one partition contains NULL\n data = read_sql_table(\"test\", uri, npartitions=2, index_col=\"b\")\n df2 = df.set_index(\"b\")\n assert_eq(data, df2.astype({\"c\": bool})) # bools are coerced\n\n # one partition contains NULL, but big enough head\n data = read_sql_table(\"test\", uri, npartitions=2, index_col=\"b\", head_rows=12)\n df2 = df.set_index(\"b\")\n assert_eq(data, df2)\n\n # empty partitions\n data = read_sql_table(\"test\", uri, npartitions=20, index_col=\"b\")\n part = data.get_partition(12).compute()\n assert part.dtypes.tolist() == [\"O\", bool]\n assert part.empty\n df2 = df.set_index(\"b\")\n assert_eq(data, df2.astype({\"c\": bool}))\n\n # explicit meta\n data = read_sql_table(\"test\", uri, npartitions=2, index_col=\"b\", meta=df2[:0])\n part = data.get_partition(1).compute()\n assert part.dtypes.tolist() == [\"O\", \"O\"]\n df2 = df.set_index(\"b\")\n assert_eq(data, df2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_simple_test_npartitions.None_6": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_simple_test_npartitions.None_6", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_sql.py", "file_name": "test_sql.py", "file_type": "text/x-python", "category": "test", "start_line": 157, "end_line": 204, "span_ids": ["test_npartitions", "test_simple"], "tokens": 347}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_simple(db):\n # single chunk\n data = read_sql_table(\"test\", db, npartitions=2, index_col=\"number\").compute()\n assert (data.name == df.name).all()\n assert data.index.name == \"number\"\n assert_eq(data, df)\n\n\ndef test_npartitions(db):\n data = read_sql_table(\n \"test\", db, columns=list(df.columns), npartitions=2, index_col=\"number\"\n )\n assert len(data.divisions) == 3\n assert (data.name.compute() == df.name).all()\n data = read_sql_table(\n \"test\", db, columns=[\"name\"], npartitions=6, index_col=\"number\"\n )\n assert_eq(data, df[[\"name\"]])\n data = read_sql_table(\n \"test\",\n db,\n columns=list(df.columns),\n bytes_per_chunk=\"2 GiB\",\n index_col=\"number\",\n )\n assert data.npartitions == 1\n assert (data.name.compute() == df.name).all()\n\n data_1 = read_sql_table(\n \"test\",\n db,\n columns=list(df.columns),\n bytes_per_chunk=2 ** 30,\n index_col=\"number\",\n head_rows=1,\n )\n assert data_1.npartitions == 1\n assert (data_1.name.compute() == df.name).all()\n\n data = read_sql_table(\n \"test\",\n db,\n columns=list(df.columns),\n bytes_per_chunk=250,\n index_col=\"number\",\n head_rows=1,\n )\n assert data.npartitions == 2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_divisions_test_division_or_partition.assert_eq_out_df_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_divisions_test_division_or_partition.assert_eq_out_df_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_sql.py", "file_name": "test_sql.py", "file_type": "text/x-python", "category": "test", "start_line": 207, "end_line": 232, "span_ids": ["test_divisions", "test_division_or_partition"], "tokens": 215}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_divisions(db):\n data = read_sql_table(\n \"test\", db, columns=[\"name\"], divisions=[0, 2, 4], index_col=\"number\"\n )\n assert data.divisions == (0, 2, 4)\n assert data.index.max().compute() == 4\n assert_eq(data, df[[\"name\"]][df.index <= 4])\n\n\ndef test_division_or_partition(db):\n with pytest.raises(TypeError):\n read_sql_table(\n \"test\",\n db,\n columns=[\"name\"],\n index_col=\"number\",\n divisions=[0, 2, 4],\n npartitions=3,\n )\n\n out = read_sql_table(\"test\", db, index_col=\"number\", bytes_per_chunk=100)\n m = out.map_partitions(\n lambda d: d.memory_usage(deep=True, index=True).sum()\n ).compute()\n assert (50 < m).all() and (m < 200).all()\n assert_eq(out, df)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_with_func_test_with_func.assert_d_index_d_ne": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_with_func_test_with_func.assert_d_index_d_ne", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_sql.py", "file_name": "test_sql.py", "file_type": "text/x-python", "category": "test", "start_line": 259, "end_line": 286, "span_ids": ["test_with_func"], "tokens": 250}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_with_func(db):\n from sqlalchemy import sql\n\n index = sql.func.abs(sql.column(\"negish\")).label(\"abs\")\n\n # function for the index, get all columns\n data = read_sql_table(\"test\", db, npartitions=2, index_col=index)\n assert data.divisions[0] == 0\n part = data.get_partition(0).compute()\n assert (part.index == 0).all()\n\n # now an arith op for one column too; it's name will be 'age'\n data = read_sql_table(\n \"test\",\n db,\n npartitions=2,\n index_col=index,\n columns=[index, -(sql.column(\"age\"))],\n )\n assert (data.age.compute() < 0).all()\n\n # a column that would have no name, give it a label\n index = (-(sql.column(\"negish\"))).label(\"index\")\n data = read_sql_table(\n \"test\", db, npartitions=2, index_col=index, columns=[\"negish\", \"age\"]\n )\n d = data.compute()\n assert (-d.index == d[\"negish\"]).all()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_no_nameless_index_test_select_from_select.assert_eq_out_df_name_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_no_nameless_index_test_select_from_select.assert_eq_out_df_name_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_sql.py", "file_name": "test_sql.py", "file_type": "text/x-python", "category": "test", "start_line": 289, "end_line": 312, "span_ids": ["test_no_nameless_index", "test_select_from_select"], "tokens": 186}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_no_nameless_index(db):\n from sqlalchemy import sql\n\n index = -(sql.column(\"negish\"))\n with pytest.raises(ValueError):\n read_sql_table(\n \"test\", db, npartitions=2, index_col=index, columns=[\"negish\", \"age\", index]\n )\n\n index = sql.func.abs(sql.column(\"negish\"))\n\n # function for the index, get all columns\n with pytest.raises(ValueError):\n read_sql_table(\"test\", db, npartitions=2, index_col=index)\n\n\ndef test_select_from_select(db):\n from sqlalchemy import sql\n\n s1 = sql.select([sql.column(\"number\"), sql.column(\"name\")]).select_from(\n sql.table(\"test\")\n )\n out = read_sql_table(s1, db, npartitions=2, index_col=\"number\")\n assert_eq(out, df[[\"name\"]])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_extra_connection_engine_keywords_tmp_db_uri.with_tmpfile_as_f_.yield_sqlite_s_f": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_extra_connection_engine_keywords_tmp_db_uri.with_tmpfile_as_f_.yield_sqlite_s_f", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_sql.py", "file_name": "test_sql.py", "file_type": "text/x-python", "category": "test", "start_line": 315, "end_line": 343, "span_ids": ["test_extra_connection_engine_keywords", "tmp_db_uri", "test_no_character_index_without_divisions"], "tokens": 281}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_extra_connection_engine_keywords(capsys, db):\n data = read_sql_table(\n \"test\", db, npartitions=2, index_col=\"number\", engine_kwargs={\"echo\": False}\n ).compute()\n # no captured message from the stdout with the echo=False parameter (this is the default)\n out, err = capsys.readouterr()\n assert \"SELECT\" not in out\n assert_eq(data, df)\n # with the echo=True sqlalchemy parameter, you should get all SQL queries in the stdout\n data = read_sql_table(\n \"test\", db, npartitions=2, index_col=\"number\", engine_kwargs={\"echo\": True}\n ).compute()\n out, err = capsys.readouterr()\n assert \"WHERE test.number >= ? AND test.number < ?\" in out\n assert \"WHERE test.number >= ? AND test.number <= ?\" in out\n assert_eq(data, df)\n\n\ndef test_no_character_index_without_divisions(db):\n\n # attempt to read the sql table with a character index and no divisions\n with pytest.raises(TypeError):\n read_sql_table(\"test\", db, npartitions=2, index_col=\"name\", divisions=None)\n\n\n@contextmanager\ndef tmp_db_uri():\n with tmpfile() as f:\n yield \"sqlite:///%s\" % f", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_to_sql_test_to_sql.None_5.assert_actual_npartiti": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_to_sql_test_to_sql.None_5.assert_actual_npartiti", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_sql.py", "file_name": "test_sql.py", "file_type": "text/x-python", "category": "test", "start_line": 346, "end_line": 416, "span_ids": ["test_to_sql"], "tokens": 633}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"npartitions\", (1, 2))\n@pytest.mark.parametrize(\"parallel\", (False, True))\ndef test_to_sql(npartitions, parallel):\n df_by_age = df.set_index(\"age\")\n df_appended = pd.concat(\n [\n df,\n df,\n ]\n )\n\n ddf = dd.from_pandas(df, npartitions)\n ddf_by_age = ddf.set_index(\"age\")\n\n # Simple round trip test: use existing \"number\" index_col\n with tmp_db_uri() as uri:\n ddf.to_sql(\"test\", uri, parallel=parallel)\n result = read_sql_table(\"test\", uri, \"number\")\n assert_eq(df, result)\n\n # Test writing no index, and reading back in with one of the other columns as index (`read_sql_table` requires\n # an index_col)\n with tmp_db_uri() as uri:\n ddf.to_sql(\"test\", uri, parallel=parallel, index=False)\n\n result = read_sql_table(\"test\", uri, \"negish\")\n assert_eq(df.set_index(\"negish\"), result)\n\n result = read_sql_table(\"test\", uri, \"age\")\n assert_eq(df_by_age, result)\n\n # Index by \"age\" instead\n with tmp_db_uri() as uri:\n ddf_by_age.to_sql(\"test\", uri, parallel=parallel)\n result = read_sql_table(\"test\", uri, \"age\")\n assert_eq(df_by_age, result)\n\n # Index column can't have \"object\" dtype if no partitions are provided\n with tmp_db_uri() as uri:\n ddf.set_index(\"name\").to_sql(\"test\", uri)\n with pytest.raises(\n TypeError,\n match='Provided index column is of type \"object\". If divisions is not provided the index column type must be numeric or datetime.', # noqa: E501\n ):\n read_sql_table(\"test\", uri, \"name\")\n\n # Test various \"if_exists\" values\n with tmp_db_uri() as uri:\n ddf.to_sql(\"test\", uri)\n\n # Writing a table that already exists fails\n with pytest.raises(ValueError, match=\"Table 'test' already exists\"):\n ddf.to_sql(\"test\", uri)\n\n ddf.to_sql(\"test\", uri, parallel=parallel, if_exists=\"append\")\n result = read_sql_table(\"test\", uri, \"number\")\n\n assert_eq(df_appended, result)\n\n ddf_by_age.to_sql(\"test\", uri, parallel=parallel, if_exists=\"replace\")\n result = read_sql_table(\"test\", uri, \"age\")\n assert_eq(df_by_age, result)\n\n # Verify number of partitions returned, when compute=False\n with tmp_db_uri() as uri:\n result = ddf.to_sql(\"test\", uri, parallel=parallel, compute=False)\n\n # the first result is from the \"meta\" insert\n actual = len(result.compute())\n\n assert actual == npartitions", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_to_sql_kwargs_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_to_sql_kwargs_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_sql.py", "file_name": "test_sql.py", "file_type": "text/x-python", "category": "test", "start_line": 419, "end_line": 437, "span_ids": ["test_to_sql_kwargs"], "tokens": 170}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_sql_kwargs():\n ddf = dd.from_pandas(df, 2)\n with tmp_db_uri() as uri:\n # \"method\" keyword is allowed iff pandas>=0.24.0\n if PANDAS_GT_0240:\n ddf.to_sql(\"test\", uri, method=\"multi\")\n else:\n with pytest.raises(\n NotImplementedError,\n match=r\"'method' requires pandas>=0.24.0. You have version 0.23.\\d\",\n ):\n ddf.to_sql(\"test\", uri, method=\"multi\")\n\n # Other, unknown keywords always disallowed\n with pytest.raises(\n TypeError, match=\"to_sql\\\\(\\\\) got an unexpected keyword argument 'unknown'\"\n ):\n ddf.to_sql(\"test\", uri, unknown=None)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/utils.py_pd__get_pyarrow_dtypes.return.dtypes": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/utils.py_pd__get_pyarrow_dtypes.return.dtypes", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 51, "span_ids": ["_get_pyarrow_dtypes", "imports"], "tokens": 354}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pandas as pd\nimport json\nfrom uuid import uuid4\n\n\ndef _get_pyarrow_dtypes(schema, categories):\n \"\"\"Convert a pyarrow.Schema object to pandas dtype dict\"\"\"\n\n # Check for pandas metadata\n has_pandas_metadata = schema.metadata is not None and b\"pandas\" in schema.metadata\n if has_pandas_metadata:\n pandas_metadata = json.loads(schema.metadata[b\"pandas\"].decode(\"utf8\"))\n pandas_metadata_dtypes = {\n c.get(\"field_name\", c.get(\"name\", None)): c[\"numpy_type\"]\n for c in pandas_metadata.get(\"columns\", [])\n }\n tz = {\n c.get(\"field_name\", c.get(\"name\", None)): c[\"metadata\"].get(\n \"timezone\", None\n )\n for c in pandas_metadata.get(\"columns\", [])\n if c[\"metadata\"]\n }\n else:\n pandas_metadata_dtypes = {}\n\n dtypes = {}\n for i in range(len(schema)):\n field = schema[i]\n\n # Get numpy_dtype from pandas metadata if available\n if field.name in pandas_metadata_dtypes:\n if field.name in tz:\n numpy_dtype = (\n pd.Series([], dtype=\"M8[ns]\").dt.tz_localize(tz[field.name]).dtype\n )\n else:\n numpy_dtype = pandas_metadata_dtypes[field.name]\n else:\n try:\n numpy_dtype = field.type.to_pandas_dtype()\n except NotImplementedError:\n continue # Skip this field (in case we aren't reading it anyway)\n\n dtypes[field.name] = numpy_dtype\n\n if categories:\n for cat in categories:\n dtypes[cat] = \"category\"\n\n return dtypes", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/utils.py__meta_from_dtypes_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/utils.py__meta_from_dtypes_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 54, "end_line": 101, "span_ids": ["_guid", "_meta_from_dtypes"], "tokens": 361}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _meta_from_dtypes(to_read_columns, file_dtypes, index_cols, column_index_names):\n \"\"\"Get the final metadata for the dask.dataframe\n\n Parameters\n ----------\n to_read_columns : list\n All the columns to end up with, including index names\n file_dtypes : dict\n Mapping from column name to dtype for every element\n of ``to_read_columns``\n index_cols : list\n Subset of ``to_read_columns`` that should move to the\n index\n column_index_names : list\n The values for df.columns.name for a MultiIndex in the\n columns, or df.index.name for a regular Index in the columns\n\n Returns\n -------\n meta : DataFrame\n \"\"\"\n meta = pd.DataFrame(\n {c: pd.Series([], dtype=d) for (c, d) in file_dtypes.items()},\n columns=to_read_columns,\n )\n df = meta[list(to_read_columns)]\n\n if len(column_index_names) == 1:\n df.columns.name = column_index_names[0]\n if not index_cols:\n return df\n if not isinstance(index_cols, list):\n index_cols = [index_cols]\n df = df.set_index(index_cols)\n # XXX: this means we can't roundtrip dataframes where the index names\n # is actually __index_level_0__\n if len(index_cols) == 1 and index_cols[0] == \"__index_level_0__\":\n df.index.name = None\n\n if len(column_index_names) > 1:\n df.columns.names = column_index_names\n return df\n\n\ndef _guid():\n \"\"\"Simple utility function to get random hex string\"\"\"\n return uuid4().hex", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/methods.py_warnings_try_loc.try_.except_KeyError_.return.df_head_0_loc_cindexe": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/methods.py_warnings_try_loc.try_.except_KeyError_.return.df_head_0_loc_cindexe", "embedding": null, "metadata": {"file_path": "dask/dataframe/methods.py", "file_name": "methods.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 44, "span_ids": ["iloc", "imports", "try_loc", "loc"], "tokens": 220}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import warnings\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.api.types import is_categorical_dtype, union_categoricals\nfrom tlz import partition\n\nfrom .utils import (\n is_series_like,\n is_index_like,\n is_dataframe_like,\n PANDAS_GT_0250,\n hash_object_dispatch,\n group_split_dispatch,\n)\nfrom ..utils import Dispatch\n\n# ---------------------------------\n# indexing\n# ---------------------------------\n\n\ndef loc(df, iindexer, cindexer=None):\n \"\"\"\n .loc for known divisions\n \"\"\"\n if cindexer is None:\n return df.loc[iindexer]\n else:\n return df.loc[iindexer, cindexer]\n\n\ndef iloc(df, cindexer=None):\n return df.iloc[:, cindexer]\n\n\ndef try_loc(df, iindexer, cindexer=None):\n \"\"\"\n .loc for unknown divisions\n \"\"\"\n try:\n return loc(df, iindexer, cindexer)\n except KeyError:\n return df.head(0).loc[:, cindexer]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/methods.py_boundary_slice_boundary_slice.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/methods.py_boundary_slice_boundary_slice.return.result", "embedding": null, "metadata": {"file_path": "dask/dataframe/methods.py", "file_name": "methods.py", "file_type": "text/x-python", "category": "implementation", "start_line": 47, "end_line": 108, "span_ids": ["boundary_slice"], "tokens": 510}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def boundary_slice(\n df, start, stop, right_boundary=True, left_boundary=True, kind=\"loc\"\n):\n \"\"\"Index slice start/stop. Can switch include/exclude boundaries.\n\n Examples\n --------\n >>> df = pd.DataFrame({'x': [10, 20, 30, 40, 50]}, index=[1, 2, 2, 3, 4])\n >>> boundary_slice(df, 2, None)\n x\n 2 20\n 2 30\n 3 40\n 4 50\n >>> boundary_slice(df, 1, 3)\n x\n 1 10\n 2 20\n 2 30\n 3 40\n >>> boundary_slice(df, 1, 3, right_boundary=False)\n x\n 1 10\n 2 20\n 2 30\n\n Empty input DataFrames are returned\n\n >>> df_empty = pd.DataFrame()\n >>> boundary_slice(df_empty, 1, 3)\n Empty DataFrame\n Columns: []\n Index: []\n \"\"\"\n if len(df.index) == 0:\n return df\n\n if kind == \"loc\" and not df.index.is_monotonic:\n # Pandas treats missing keys differently for label-slicing\n # on monotonic vs. non-monotonic indexes\n # If the index is monotonic, `df.loc[start:stop]` is fine.\n # If it's not, `df.loc[start:stop]` raises when `start` is missing\n if start is not None:\n if left_boundary:\n df = df[df.index >= start]\n else:\n df = df[df.index > start]\n if stop is not None:\n if right_boundary:\n df = df[df.index <= stop]\n else:\n df = df[df.index < stop]\n return df\n else:\n result = getattr(df, kind)[start:stop]\n if not right_boundary and stop is not None:\n right_index = result.index.get_slice_bound(stop, \"left\", kind)\n result = result.iloc[:right_index]\n if not left_boundary and start is not None:\n left_index = result.index.get_slice_bound(start, \"right\", kind)\n result = result.iloc[left_index:]\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/methods.py_index_count_describe_aggregate.return.pd_concat_values_axis_1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/methods.py_index_count_describe_aggregate.return.pd_concat_values_axis_1_", "embedding": null, "metadata": {"file_path": "dask/dataframe/methods.py", "file_name": "methods.py", "file_type": "text/x-python", "category": "implementation", "start_line": 111, "end_line": 149, "span_ids": ["wrap_var_reduction", "index_count", "var_mixed_concat", "mean_aggregate", "describe_aggregate"], "tokens": 234}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def index_count(x):\n # Workaround since Index doesn't implement `.count`\n return pd.notnull(x).sum()\n\n\ndef mean_aggregate(s, n):\n try:\n with warnings.catch_warnings(record=True):\n warnings.simplefilter(\"always\")\n return s / n\n except ZeroDivisionError:\n return np.float64(np.nan)\n\n\ndef wrap_var_reduction(array_var, index):\n if isinstance(array_var, np.ndarray) or isinstance(array_var, list):\n return pd.Series(array_var, index=index)\n\n return array_var\n\n\ndef var_mixed_concat(numeric_var, timedelta_var, columns):\n vars = pd.concat([numeric_var, timedelta_var])\n\n return vars.reindex(index=columns)\n\n\ndef describe_aggregate(values):\n assert len(values) > 0\n\n # arrange categorical and numeric stats\n names = []\n values_indexes = sorted((x.index for x in values), key=len)\n for idxnames in values_indexes:\n for name in idxnames:\n if name not in names:\n names.append(name)\n\n return pd.concat(values, axis=1, sort=False).reindex(names)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/methods.py_describe_numeric_aggregate_describe_numeric_aggregate.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/methods.py_describe_numeric_aggregate_describe_numeric_aggregate.return.result", "embedding": null, "metadata": {"file_path": "dask/dataframe/methods.py", "file_name": "methods.py", "file_type": "text/x-python", "category": "implementation", "start_line": 152, "end_line": 180, "span_ids": ["describe_numeric_aggregate"], "tokens": 236}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def describe_numeric_aggregate(stats, name=None, is_timedelta_col=False):\n assert len(stats) == 6\n count, mean, std, min, q, max = stats\n\n if is_series_like(count):\n typ = type(count.to_frame())\n else:\n typ = type(q)\n\n if is_timedelta_col:\n mean = pd.to_timedelta(mean)\n std = pd.to_timedelta(std)\n min = pd.to_timedelta(min)\n max = pd.to_timedelta(max)\n q = q.apply(lambda x: pd.to_timedelta(x))\n\n part1 = typ([count, mean, std, min], index=[\"count\", \"mean\", \"std\", \"min\"])\n\n q.index = [\"{0:g}%\".format(l * 100) for l in tolist(q.index)]\n if is_series_like(q) and typ != type(q):\n q = q.to_frame()\n part3 = typ([max], index=[\"max\"])\n\n result = concat([part1, q, part3], sort=False)\n\n if is_series_like(result):\n result.name = name\n\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/methods.py_describe_nonnumeric_aggregate_describe_nonnumeric_aggregate.return.pd_Series_values_index_i": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/methods.py_describe_nonnumeric_aggregate_describe_nonnumeric_aggregate.return.pd_Series_values_index_i", "embedding": null, "metadata": {"file_path": "dask/dataframe/methods.py", "file_name": "methods.py", "file_type": "text/x-python", "category": "implementation", "start_line": 183, "end_line": 230, "span_ids": ["describe_nonnumeric_aggregate"], "tokens": 366}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def describe_nonnumeric_aggregate(stats, name):\n args_len = len(stats)\n\n is_datetime_column = args_len == 5\n is_categorical_column = args_len == 3\n\n assert is_datetime_column or is_categorical_column\n\n if is_categorical_column:\n nunique, count, top_freq = stats\n else:\n nunique, count, top_freq, min_ts, max_ts = stats\n\n # input was empty dataframe/series\n if len(top_freq) == 0:\n data = [0, 0]\n index = [\"count\", \"unique\"]\n dtype = None\n if PANDAS_GT_0250:\n data.extend([None, None])\n index.extend([\"top\", \"freq\"])\n dtype = object\n result = pd.Series(data, index=index, dtype=dtype, name=name)\n return result\n\n top = top_freq.index[0]\n freq = top_freq.iloc[0]\n\n index = [\"unique\", \"count\", \"top\", \"freq\"]\n values = [nunique, count]\n\n if is_datetime_column:\n tz = top.tz\n top = pd.Timestamp(top)\n if top.tzinfo is not None and tz is not None:\n # Don't tz_localize(None) if key is already tz-aware\n top = top.tz_convert(tz)\n else:\n top = top.tz_localize(tz)\n\n first = pd.Timestamp(min_ts, tz=tz)\n last = pd.Timestamp(max_ts, tz=tz)\n index.extend([\"first\", \"last\"])\n values.extend([top, freq, first, last])\n else:\n values.extend([top, freq])\n\n return pd.Series(values, index=index, name=name)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/methods.py__cum_aggregate_apply_concat_dispatch.Dispatch_concat_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/methods.py__cum_aggregate_apply_concat_dispatch.Dispatch_concat_", "embedding": null, "metadata": {"file_path": "dask/dataframe/methods.py", "file_name": "methods.py", "file_type": "text/x-python", "category": "implementation", "start_line": 233, "end_line": 358, "span_ids": ["cummax_aggregate", "pivot_sum", "fillna_check", "impl", "pivot_count", "_cum_aggregate_apply", "value_counts_aggregate", "values", "cummin_aggregate", "assign", "size", "value_counts_combine", "sample", "drop_columns", "pivot_agg", "unique", "nbytes"], "tokens": 815}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _cum_aggregate_apply(aggregate, x, y):\n \"\"\"Apply aggregation function within a cumulative aggregation\n\n Parameters\n ----------\n aggregate: function (a, a) -> a\n The aggregation function, like add, which is used to and subsequent\n results\n x:\n y:\n \"\"\"\n if y is None:\n return x\n else:\n return aggregate(x, y)\n\n\ndef cummin_aggregate(x, y):\n if is_series_like(x) or is_dataframe_like(x):\n return x.where((x < y) | x.isnull(), y, axis=x.ndim - 1)\n else: # scalar\n return x if x < y else y\n\n\ndef cummax_aggregate(x, y):\n if is_series_like(x) or is_dataframe_like(x):\n return x.where((x > y) | x.isnull(), y, axis=x.ndim - 1)\n else: # scalar\n return x if x > y else y\n\n\ndef assign(df, *pairs):\n # Only deep copy when updating an element\n # (to avoid modifying the original)\n pairs = dict(partition(2, pairs))\n deep = bool(set(pairs) & set(df.columns))\n df = df.copy(deep=bool(deep))\n for name, val in pairs.items():\n df[name] = val\n return df\n\n\ndef unique(x, series_name=None):\n out = x.unique()\n # out can be either an np.ndarray or may already be a series\n # like object. When out is an np.ndarray, it must be wrapped.\n if not (is_series_like(out) or is_index_like(out)):\n out = pd.Series(out, name=series_name)\n return out\n\n\ndef value_counts_combine(x, sort=True, ascending=False, **groupby_kwargs):\n # sort and ascending don't actually matter until the agg step\n return x.groupby(level=0, **groupby_kwargs).sum()\n\n\ndef value_counts_aggregate(x, sort=True, ascending=False, **groupby_kwargs):\n out = value_counts_combine(x, **groupby_kwargs)\n if sort:\n return out.sort_values(ascending=ascending)\n return out\n\n\ndef nbytes(x):\n return x.nbytes\n\n\ndef size(x):\n return x.size\n\n\ndef values(df):\n return df.values\n\n\ndef sample(df, state, frac, replace):\n rs = np.random.RandomState(state)\n return df.sample(random_state=rs, frac=frac, replace=replace) if len(df) > 0 else df\n\n\ndef drop_columns(df, columns, dtype):\n df = df.drop(columns, axis=1)\n df.columns = df.columns.astype(dtype)\n return df\n\n\ndef fillna_check(df, method, check=True):\n out = df.fillna(method=method)\n if check and out.isnull().values.all(axis=0).any():\n raise ValueError(\n \"All NaN partition encountered in `fillna`. Try \"\n \"using ``df.repartition`` to increase the partition \"\n \"size, or specify `limit` in `fillna`.\"\n )\n return out\n\n\n# ---------------------------------\n# reshape\n# ---------------------------------\n\n\ndef pivot_agg(df):\n return df.groupby(level=0).sum()\n\n\ndef pivot_sum(df, index, columns, values):\n return pd.pivot_table(\n df, index=index, columns=columns, values=values, aggfunc=\"sum\", dropna=False\n )\n\n\ndef pivot_count(df, index, columns, values):\n # we cannot determine dtype until concatenationg all partitions.\n # make dtype deterministic, always coerce to np.float64\n return pd.pivot_table(\n df, index=index, columns=columns, values=values, aggfunc=\"count\", dropna=False\n ).astype(np.float64)\n\n\n# ---------------------------------\n# concat\n# ---------------------------------\n\n\nconcat_dispatch = Dispatch(\"concat\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/methods.py_concat_concat.if_len_dfs_1_.else_.return.func_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/methods.py_concat_concat.if_len_dfs_1_.else_.return.func_", "embedding": null, "metadata": {"file_path": "dask/dataframe/methods.py", "file_name": "methods.py", "file_type": "text/x-python", "category": "implementation", "start_line": 361, "end_line": 400, "span_ids": ["concat"], "tokens": 254}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def concat(\n dfs,\n axis=0,\n join=\"outer\",\n uniform=False,\n filter_warning=True,\n ignore_index=False,\n **kwargs\n):\n \"\"\"Concatenate, handling some edge cases:\n\n - Unions categoricals between partitions\n - Ignores empty partitions\n\n Parameters\n ----------\n dfs : list of DataFrame, Series, or Index\n axis : int or str, optional\n join : str, optional\n uniform : bool, optional\n Whether to treat ``dfs[0]`` as representative of ``dfs[1:]``. Set to\n True if all arguments have the same columns and dtypes (but not\n necessarily categories). Default is False.\n ignore_index : bool, optional\n Whether to allow index values to be ignored/droped during\n concatenation. Default is False.\n \"\"\"\n if len(dfs) == 1:\n return dfs[0]\n else:\n func = concat_dispatch.dispatch(type(dfs[0]))\n return func(\n dfs,\n axis=axis,\n join=join,\n uniform=uniform,\n filter_warning=filter_warning,\n ignore_index=ignore_index,\n **kwargs\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/methods.py_concat_pandas_concat_pandas._Concatenate_the_partiti": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/methods.py_concat_pandas_concat_pandas._Concatenate_the_partiti", "embedding": null, "metadata": {"file_path": "dask/dataframe/methods.py", "file_name": "methods.py", "file_type": "text/x-python", "category": "implementation", "start_line": 403, "end_line": 458, "span_ids": ["concat_pandas"], "tokens": 473}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@concat_dispatch.register((pd.DataFrame, pd.Series, pd.Index))\ndef concat_pandas(\n dfs,\n axis=0,\n join=\"outer\",\n uniform=False,\n filter_warning=True,\n ignore_index=False,\n **kwargs\n):\n if axis == 1:\n return pd.concat(dfs, axis=axis, join=join, **kwargs)\n\n # Support concatenating indices along axis 0\n if isinstance(dfs[0], pd.Index):\n if isinstance(dfs[0], pd.CategoricalIndex):\n for i in range(1, len(dfs)):\n if not isinstance(dfs[i], pd.CategoricalIndex):\n dfs[i] = dfs[i].astype(\"category\")\n return pd.CategoricalIndex(union_categoricals(dfs), name=dfs[0].name)\n elif isinstance(dfs[0], pd.MultiIndex):\n first, rest = dfs[0], dfs[1:]\n if all(\n (isinstance(o, pd.MultiIndex) and o.nlevels >= first.nlevels)\n for o in rest\n ):\n arrays = [\n concat([i._get_level_values(n) for i in dfs])\n for n in range(first.nlevels)\n ]\n return pd.MultiIndex.from_arrays(arrays, names=first.names)\n\n to_concat = (first.values,) + tuple(k._values for k in rest)\n new_tuples = np.concatenate(to_concat)\n try:\n return pd.MultiIndex.from_tuples(new_tuples, names=first.names)\n except Exception:\n return pd.Index(new_tuples)\n return dfs[0].append(dfs[1:])\n\n # Handle categorical index separately\n dfs0_index = dfs[0].index\n\n has_categoricalindex = isinstance(dfs0_index, pd.CategoricalIndex) or (\n isinstance(dfs0_index, pd.MultiIndex)\n and any(isinstance(i, pd.CategoricalIndex) for i in dfs0_index.levels)\n )\n\n if has_categoricalindex:\n dfs2 = [df.reset_index(drop=True) for df in dfs]\n ind = concat([df.index for df in dfs])\n else:\n dfs2 = dfs\n ind = None\n\n # Concatenate the partitions together, handling categories as needed\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/methods.py_concat_pandas.if__": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/methods.py_concat_pandas.if__", "embedding": null, "metadata": {"file_path": "dask/dataframe/methods.py", "file_name": "methods.py", "file_type": "text/x-python", "category": "implementation", "start_line": 459, "end_line": 565, "span_ids": ["impl:3", "tolist", "impl:5", "assign_index", "concat_pandas", "tolist_pandas"], "tokens": 842}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@concat_dispatch.register((pd.DataFrame, pd.Series, pd.Index))\ndef concat_pandas(\n dfs,\n axis=0,\n join=\"outer\",\n uniform=False,\n filter_warning=True,\n ignore_index=False,\n **kwargs\n):\n # ... other code\n if (\n isinstance(dfs2[0], pd.DataFrame)\n if uniform\n else any(isinstance(df, pd.DataFrame) for df in dfs2)\n ):\n if uniform:\n dfs3 = dfs2\n cat_mask = dfs2[0].dtypes == \"category\"\n else:\n # When concatenating mixed dataframes and series on axis 1, Pandas\n # converts series to dataframes with a single column named 0, then\n # concatenates.\n dfs3 = [\n df\n if isinstance(df, pd.DataFrame)\n else df.to_frame().rename(columns={df.name: 0})\n for df in dfs2\n ]\n # pandas may raise a RuntimeWarning for comparing ints and strs\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", RuntimeWarning)\n if filter_warning:\n warnings.simplefilter(\"ignore\", FutureWarning)\n cat_mask = pd.concat(\n [(df.dtypes == \"category\").to_frame().T for df in dfs3],\n join=join,\n **kwargs\n ).any()\n\n if cat_mask.any():\n not_cat = cat_mask[~cat_mask].index\n # this should be aligned, so no need to filter warning\n out = pd.concat(\n [df[df.columns.intersection(not_cat)] for df in dfs3],\n join=join,\n **kwargs\n )\n temp_ind = out.index\n for col in cat_mask.index.difference(not_cat):\n # Find an example of categoricals in this column\n for df in dfs3:\n sample = df.get(col)\n if sample is not None:\n break\n # Extract partitions, subbing in missing if needed\n parts = []\n for df in dfs3:\n if col in df.columns:\n parts.append(df[col])\n else:\n codes = np.full(len(df), -1, dtype=\"i8\")\n data = pd.Categorical.from_codes(\n codes, sample.cat.categories, sample.cat.ordered\n )\n parts.append(data)\n out[col] = union_categoricals(parts)\n # Pandas resets index type on assignment if frame is empty\n # https://github.com/pandas-dev/pandas/issues/17101\n if not len(temp_ind):\n out.index = temp_ind\n out = out.reindex(columns=cat_mask.index)\n else:\n # pandas may raise a RuntimeWarning for comparing ints and strs\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", RuntimeWarning)\n if filter_warning:\n warnings.simplefilter(\"ignore\", FutureWarning)\n out = pd.concat(dfs3, join=join, sort=False)\n else:\n if is_categorical_dtype(dfs2[0].dtype):\n if ind is None:\n ind = concat([df.index for df in dfs2])\n return pd.Series(union_categoricals(dfs2), index=ind, name=dfs2[0].name)\n with warnings.catch_warnings():\n if filter_warning:\n warnings.simplefilter(\"ignore\", FutureWarning)\n\n out = pd.concat(dfs2, join=join, **kwargs)\n # Re-add the index if needed\n if ind is not None:\n out.index = ind\n return out\n\n\ntolist_dispatch = Dispatch(\"tolist\")\n\n\ndef tolist(obj):\n func = tolist_dispatch.dispatch(type(obj))\n return func(obj)\n\n\n@tolist_dispatch.register((pd.Series, pd.Index, pd.Categorical))\ndef tolist_pandas(obj):\n return obj.tolist()\n\n\n# cuDF may try to import old dispatch functions\nhash_df = hash_object_dispatch\ngroup_split = group_split_dispatch\n\n\ndef assign_index(df, ind):\n df = df.copy()\n df.index = ind\n return df", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py___M": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py___M", "embedding": null, "metadata": {"file_path": "dask/dataframe/multi.py", "file_name": "multi.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 90, "span_ids": ["imports", "docstring"], "tokens": 695}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\"\nAlgorithms that Involve Multiple DataFrames\n===========================================\n\nThe pandas operations ``concat``, ``join``, and ``merge`` combine multiple\nDataFrames. This module contains analogous algorithms in the parallel case.\n\nThere are two important cases:\n\n1. We combine along a partitioned index\n2. We combine along an unpartitioned index or other column\n\nIn the first case we know which partitions of each dataframe interact with\nwhich others. This lets us be significantly more clever and efficient.\n\nIn the second case each partition from one dataset interacts with all\npartitions from the other. We handle this through a shuffle operation.\n\nPartitioned Joins\n-----------------\n\nIn the first case where we join along a partitioned index we proceed in the\nfollowing stages.\n\n1. Align the partitions of all inputs to be the same. This involves a call\n to ``dd.repartition`` which will split up and concat existing partitions as\n necessary. After this step all inputs have partitions that align with\n each other. This step is relatively cheap.\n See the function ``align_partitions``.\n2. Remove unnecessary partitions based on the type of join we perform (left,\n right, inner, outer). We can do this at the partition level before any\n computation happens. We'll do it again on each partition when we call the\n in-memory function. See the function ``require``.\n3. Embarrassingly parallel calls to ``pd.concat``, ``pd.join``, or\n ``pd.merge``. Now that the data is aligned and unnecessary blocks have\n been removed we can rely on the fast in-memory Pandas join machinery to\n execute joins per-partition. We know that all intersecting records exist\n within the same partition\n\n\nHash Joins via Shuffle\n----------------------\n\nWhen we join along an unpartitioned index or along an arbitrary column any\npartition from one input might interact with any partition in another. In\nthis case we perform a hash-join by shuffling data in each input by that\ncolumn. This results in new inputs with the same partition structure cleanly\nseparated along that column.\n\nWe proceed with hash joins in the following stages:\n\n1. Shuffle each input on the specified column. See the function\n ``dask.dataframe.shuffle.shuffle``.\n2. Perform embarrassingly parallel join across shuffled inputs.\n\"\"\"\nfrom functools import wraps, partial\nimport warnings\n\nfrom tlz import merge_sorted, unique, first\nimport numpy as np\nimport pandas as pd\nfrom pandas.api.types import is_dtype_equal, is_categorical_dtype, union_categoricals\n\nfrom ..base import tokenize, is_dask_collection\nfrom ..highlevelgraph import HighLevelGraph\nfrom ..utils import apply\nfrom ._compat import PANDAS_GT_100\nfrom .core import (\n _Frame,\n DataFrame,\n Series,\n map_partitions,\n Index,\n _maybe_from_pandas,\n new_dd_object,\n is_broadcastable,\n prefix_reduction,\n suffix_reduction,\n)\nfrom .io import from_pandas\nfrom . import methods\nfrom .shuffle import shuffle, rearrange_by_divisions\nfrom .utils import (\n strip_unknown_categories,\n is_series_like,\n asciitable,\n is_dataframe_like,\n make_meta,\n)\nfrom ..utils import M", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_align_partitions_align_partitions.return.dfs2_tuple_divisions_r": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_align_partitions_align_partitions.return.dfs2_tuple_divisions_r", "embedding": null, "metadata": {"file_path": "dask/dataframe/multi.py", "file_name": "multi.py", "file_type": "text/x-python", "category": "implementation", "start_line": 93, "end_line": 149, "span_ids": ["align_partitions"], "tokens": 468}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def align_partitions(*dfs):\n \"\"\"Mutually partition and align DataFrame blocks\n\n This serves as precursor to multi-dataframe operations like join, concat,\n or merge.\n\n Parameters\n ----------\n dfs: sequence of dd.DataFrame, dd.Series and dd.base.Scalar\n Sequence of dataframes to be aligned on their index\n\n Returns\n -------\n dfs: sequence of dd.DataFrame, dd.Series and dd.base.Scalar\n These must have consistent divisions with each other\n divisions: tuple\n Full divisions sequence of the entire result\n result: list\n A list of lists of keys that show which data exist on which\n divisions\n \"\"\"\n _is_broadcastable = partial(is_broadcastable, dfs)\n dfs1 = [df for df in dfs if isinstance(df, _Frame) and not _is_broadcastable(df)]\n if len(dfs) == 0:\n raise ValueError(\"dfs contains no DataFrame and Series\")\n if not all(df.known_divisions for df in dfs1):\n raise ValueError(\n \"Not all divisions are known, can't align \"\n \"partitions. Please use `set_index` \"\n \"to set the index.\"\n )\n\n divisions = list(unique(merge_sorted(*[df.divisions for df in dfs1])))\n if len(divisions) == 1: # single value for index\n divisions = (divisions[0], divisions[0])\n dfs2 = [\n df.repartition(divisions, force=True) if isinstance(df, _Frame) else df\n for df in dfs\n ]\n\n result = list()\n inds = [0 for df in dfs]\n for d in divisions[:-1]:\n L = list()\n for i, df in enumerate(dfs2):\n if isinstance(df, _Frame):\n j = inds[i]\n divs = df.divisions\n if j < len(divs) - 1 and divs[j] == d:\n L.append((df._name, inds[i]))\n inds[i] += 1\n else:\n L.append(None)\n else: # Scalar has no divisions\n L.append(None)\n result.append(L)\n return dfs2, tuple(divisions), result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py__maybe_align_partitions__maybe_align_partitions.return.args": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py__maybe_align_partitions__maybe_align_partitions.return.args", "embedding": null, "metadata": {"file_path": "dask/dataframe/multi.py", "file_name": "multi.py", "file_type": "text/x-python", "category": "implementation", "start_line": 152, "end_line": 167, "span_ids": ["_maybe_align_partitions"], "tokens": 172}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _maybe_align_partitions(args):\n \"\"\"Align DataFrame blocks if divisions are different.\n\n Note that if all divisions are unknown, but have equal npartitions, then\n they will be passed through unchanged. This is different than\n `align_partitions`, which will fail if divisions aren't all known\"\"\"\n _is_broadcastable = partial(is_broadcastable, args)\n dfs = [df for df in args if isinstance(df, _Frame) and not _is_broadcastable(df)]\n if not dfs:\n return args\n\n divisions = dfs[0].divisions\n if not all(df.divisions == divisions for df in dfs):\n dfs2 = iter(align_partitions(*dfs)[0])\n return [a if not isinstance(a, _Frame) else next(dfs2) for a in args]\n return args", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_require_require.return.divisions_parts": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_require_require.return.divisions_parts", "embedding": null, "metadata": {"file_path": "dask/dataframe/multi.py", "file_name": "multi.py", "file_type": "text/x-python", "category": "implementation", "start_line": 170, "end_line": 212, "span_ids": ["require"], "tokens": 476}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def require(divisions, parts, required=None):\n \"\"\"Clear out divisions where required components are not present\n\n In left, right, or inner joins we exclude portions of the dataset if one\n side or the other is not present. We can achieve this at the partition\n level as well\n\n >>> divisions = [1, 3, 5, 7, 9]\n >>> parts = [(('a', 0), None),\n ... (('a', 1), ('b', 0)),\n ... (('a', 2), ('b', 1)),\n ... (None, ('b', 2))]\n\n >>> divisions2, parts2 = require(divisions, parts, required=[0])\n >>> divisions2\n (1, 3, 5, 7)\n >>> parts2 # doctest: +NORMALIZE_WHITESPACE\n ((('a', 0), None),\n (('a', 1), ('b', 0)),\n (('a', 2), ('b', 1)))\n\n >>> divisions2, parts2 = require(divisions, parts, required=[1])\n >>> divisions2\n (3, 5, 7, 9)\n >>> parts2 # doctest: +NORMALIZE_WHITESPACE\n ((('a', 1), ('b', 0)),\n (('a', 2), ('b', 1)),\n (None, ('b', 2)))\n\n >>> divisions2, parts2 = require(divisions, parts, required=[0, 1])\n >>> divisions2\n (3, 5, 7)\n >>> parts2 # doctest: +NORMALIZE_WHITESPACE\n ((('a', 1), ('b', 0)),\n (('a', 2), ('b', 1)))\n \"\"\"\n if not required:\n return divisions, parts\n for i in required:\n present = [j for j, p in enumerate(parts) if p[i] is not None]\n divisions = tuple(divisions[min(present) : max(present) + 2])\n parts = tuple(parts[min(present) : max(present) + 1])\n return divisions, parts", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_None_1_merge_chunk.return.out": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_None_1_merge_chunk.return.out", "embedding": null, "metadata": {"file_path": "dask/dataframe/multi.py", "file_name": "multi.py", "file_type": "text/x-python", "category": "implementation", "start_line": 215, "end_line": 281, "span_ids": ["require", "merge_chunk", "impl"], "tokens": 473}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "###############################################################\n# Join / Merge\n###############################################################\n\n\nrequired = {\n \"left\": [0],\n \"leftsemi\": [0],\n \"leftanti\": [0],\n \"right\": [1],\n \"inner\": [0, 1],\n \"outer\": [],\n}\nallowed_left = (\"inner\", \"left\", \"leftsemi\", \"leftanti\")\nallowed_right = (\"inner\", \"right\")\n\n\ndef merge_chunk(lhs, *args, **kwargs):\n empty_index_dtype = kwargs.pop(\"empty_index_dtype\", None)\n categorical_columns = kwargs.pop(\"categorical_columns\", None)\n\n rhs, *args = args\n left_index = kwargs.get(\"left_index\", False)\n right_index = kwargs.get(\"right_index\", False)\n\n if categorical_columns is not None and PANDAS_GT_100:\n for col in categorical_columns:\n left = None\n right = None\n\n if col in lhs:\n left = lhs[col]\n elif col == kwargs.get(\"right_on\", None) and left_index:\n if is_categorical_dtype(lhs.index):\n left = lhs.index\n\n if col in rhs:\n right = rhs[col]\n elif col == kwargs.get(\"left_on\", None) and right_index:\n if is_categorical_dtype(rhs.index):\n right = rhs.index\n\n dtype = \"category\"\n if left is not None and right is not None:\n dtype = union_categoricals(\n [left.astype(\"category\").values, right.astype(\"category\").values]\n ).dtype\n\n if left is not None:\n if isinstance(left, pd.Index):\n lhs.index = left.astype(dtype)\n else:\n lhs[col] = left.astype(dtype)\n if right is not None:\n if isinstance(right, pd.Index):\n rhs.index = right.astype(dtype)\n else:\n rhs[col] = right.astype(dtype)\n\n out = lhs.merge(rhs, *args, **kwargs)\n\n # Workaround pandas bug where if the output result of a merge operation is\n # an empty dataframe, the output index is `int64` in all cases, regardless\n # of input dtypes.\n if len(out) == 0 and empty_index_dtype is not None:\n out.index = out.index.astype(empty_index_dtype)\n return out", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_merge_indexed_dataframes_shuffle_func.shuffle": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_merge_indexed_dataframes_shuffle_func.shuffle", "embedding": null, "metadata": {"file_path": "dask/dataframe/multi.py", "file_name": "multi.py", "file_type": "text/x-python", "category": "implementation", "start_line": 284, "end_line": 307, "span_ids": ["merge_indexed_dataframes", "impl:7"], "tokens": 237}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def merge_indexed_dataframes(lhs, rhs, left_index=True, right_index=True, **kwargs):\n \"\"\" Join two partitioned dataframes along their index \"\"\"\n how = kwargs.get(\"how\", \"left\")\n kwargs[\"left_index\"] = left_index\n kwargs[\"right_index\"] = right_index\n\n (lhs, rhs), divisions, parts = align_partitions(lhs, rhs)\n divisions, parts = require(divisions, parts, required[how])\n\n name = \"join-indexed-\" + tokenize(lhs, rhs, **kwargs)\n\n meta = lhs._meta_nonempty.merge(rhs._meta_nonempty, **kwargs)\n kwargs[\"empty_index_dtype\"] = meta.index.dtype\n kwargs[\"categorical_columns\"] = meta.select_dtypes(include=\"category\").columns\n\n dsk = dict()\n for i, (a, b) in enumerate(parts):\n dsk[(name, i)] = (apply, merge_chunk, [a, b], kwargs)\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[lhs, rhs])\n return new_dd_object(graph, name, meta, divisions)\n\n\nshuffle_func = shuffle", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py__name_sometimes_conflict_hash_join.return.new_dd_object_graph_name": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py__name_sometimes_conflict_hash_join.return.new_dd_object_graph_name", "embedding": null, "metadata": {"file_path": "dask/dataframe/multi.py", "file_name": "multi.py", "file_type": "text/x-python", "category": "implementation", "start_line": 307, "end_line": 377, "span_ids": ["impl:7", "hash_join"], "tokens": 523}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": " # name sometimes conflicts with keyword argument\n\n\ndef hash_join(\n lhs,\n left_on,\n rhs,\n right_on,\n how=\"inner\",\n npartitions=None,\n suffixes=(\"_x\", \"_y\"),\n shuffle=None,\n indicator=False,\n):\n \"\"\"Join two DataFrames on particular columns with hash join\n\n This shuffles both datasets on the joined column and then performs an\n embarrassingly parallel join partition-by-partition\n\n >>> hash_join(a, 'id', rhs, 'id', how='left', npartitions=10) # doctest: +SKIP\n \"\"\"\n if npartitions is None:\n npartitions = max(lhs.npartitions, rhs.npartitions)\n\n lhs2 = shuffle_func(lhs, left_on, npartitions=npartitions, shuffle=shuffle)\n rhs2 = shuffle_func(rhs, right_on, npartitions=npartitions, shuffle=shuffle)\n\n if isinstance(left_on, Index):\n left_on = None\n left_index = True\n else:\n left_index = False\n\n if isinstance(right_on, Index):\n right_on = None\n right_index = True\n else:\n right_index = False\n\n kwargs = dict(\n how=how,\n left_on=left_on,\n right_on=right_on,\n left_index=left_index,\n right_index=right_index,\n suffixes=suffixes,\n indicator=indicator,\n )\n\n # dummy result\n meta = lhs._meta_nonempty.merge(rhs._meta_nonempty, **kwargs)\n\n if isinstance(left_on, list):\n left_on = (list, tuple(left_on))\n if isinstance(right_on, list):\n right_on = (list, tuple(right_on))\n\n token = tokenize(lhs2, rhs2, npartitions, shuffle, **kwargs)\n name = \"hash-join-\" + token\n\n kwargs[\"empty_index_dtype\"] = meta.index.dtype\n kwargs[\"categorical_columns\"] = meta.select_dtypes(include=\"category\").columns\n\n dsk = {\n (name, i): (apply, merge_chunk, [(lhs2._name, i), (rhs2._name, i)], kwargs)\n for i in range(npartitions)\n }\n\n divisions = [None] * (npartitions + 1)\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[lhs2, rhs2])\n return new_dd_object(graph, name, meta, divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_single_partition_join_single_partition_join.return.new_dd_object_graph_name": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_single_partition_join_single_partition_join.return.new_dd_object_graph_name", "embedding": null, "metadata": {"file_path": "dask/dataframe/multi.py", "file_name": "multi.py", "file_type": "text/x-python", "category": "implementation", "start_line": 380, "end_line": 420, "span_ids": ["single_partition_join"], "tokens": 393}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def single_partition_join(left, right, **kwargs):\n # if the merge is performed on_index, divisions can be kept, otherwise the\n # new index will not necessarily correspond with the current divisions\n\n meta = left._meta_nonempty.merge(right._meta_nonempty, **kwargs)\n kwargs[\"empty_index_dtype\"] = meta.index.dtype\n kwargs[\"categorical_columns\"] = meta.select_dtypes(include=\"category\").columns\n\n name = \"merge-\" + tokenize(left, right, **kwargs)\n if left.npartitions == 1 and kwargs[\"how\"] in allowed_right:\n left_key = first(left.__dask_keys__())\n dsk = {\n (name, i): (apply, merge_chunk, [left_key, right_key], kwargs)\n for i, right_key in enumerate(right.__dask_keys__())\n }\n\n if kwargs.get(\"right_index\") or right._contains_index_name(\n kwargs.get(\"right_on\")\n ):\n divisions = right.divisions\n else:\n divisions = [None for _ in right.divisions]\n\n elif right.npartitions == 1 and kwargs[\"how\"] in allowed_left:\n right_key = first(right.__dask_keys__())\n dsk = {\n (name, i): (apply, merge_chunk, [left_key, right_key], kwargs)\n for i, left_key in enumerate(left.__dask_keys__())\n }\n\n if kwargs.get(\"left_index\") or left._contains_index_name(kwargs.get(\"left_on\")):\n divisions = left.divisions\n else:\n divisions = [None for _ in left.divisions]\n else:\n raise NotImplementedError(\n \"single_partition_join has no fallback for invalid calls\"\n )\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[left, right])\n return new_dd_object(graph, name, meta, divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_warn_dtype_mismatch_warn_dtype_mismatch.if_all_col_in_left_column.if_dtype_mism_.warnings_warn_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_warn_dtype_mismatch_warn_dtype_mismatch.if_all_col_in_left_column.if_dtype_mism_.warnings_warn_", "embedding": null, "metadata": {"file_path": "dask/dataframe/multi.py", "file_name": "multi.py", "file_type": "text/x-python", "category": "implementation", "start_line": 377, "end_line": 405, "span_ids": ["warn_dtype_mismatch"], "tokens": 233}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def warn_dtype_mismatch(left, right, left_on, right_on):\n \"\"\"Checks for merge column dtype mismatches and throws a warning (#4574)\"\"\"\n\n if not isinstance(left_on, list):\n left_on = [left_on]\n if not isinstance(right_on, list):\n right_on = [right_on]\n\n if all(col in left.columns for col in left_on) and all(\n col in right.columns for col in right_on\n ):\n dtype_mism = [\n ((lo, ro), left.dtypes[lo], right.dtypes[ro])\n for lo, ro in zip(left_on, right_on)\n if not is_dtype_equal(left.dtypes[lo], right.dtypes[ro])\n ]\n\n if dtype_mism:\n col_tb = asciitable(\n (\"Merge columns\", \"left dtype\", \"right dtype\"), dtype_mism\n )\n\n warnings.warn(\n (\n \"Merging dataframes with merge column data \"\n \"type mismatches: \\n{}\\nCast dtypes explicitly to \"\n \"avoid unexpected results.\"\n ).format(col_tb)\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_merge_merge._Both_sides_indexed": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_merge_merge._Both_sides_indexed", "embedding": null, "metadata": {"file_path": "dask/dataframe/multi.py", "file_name": "multi.py", "file_type": "text/x-python", "category": "implementation", "start_line": 408, "end_line": 478, "span_ids": ["merge"], "tokens": 506}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@wraps(pd.merge)\ndef merge(\n left,\n right,\n how=\"inner\",\n on=None,\n left_on=None,\n right_on=None,\n left_index=False,\n right_index=False,\n suffixes=(\"_x\", \"_y\"),\n indicator=False,\n npartitions=None,\n shuffle=None,\n max_branch=None,\n):\n for o in [on, left_on, right_on]:\n if isinstance(o, _Frame):\n raise NotImplementedError(\n \"Dask collections not currently allowed in merge columns\"\n )\n if not on and not left_on and not right_on and not left_index and not right_index:\n on = [c for c in left.columns if c in right.columns]\n if not on:\n left_index = right_index = True\n\n if on and not left_on and not right_on:\n left_on = right_on = on\n on = None\n\n if isinstance(left, (pd.Series, pd.DataFrame)) and isinstance(\n right, (pd.Series, pd.DataFrame)\n ):\n return pd.merge(\n left,\n right,\n how=how,\n on=on,\n left_on=left_on,\n right_on=right_on,\n left_index=left_index,\n right_index=right_index,\n suffixes=suffixes,\n indicator=indicator,\n )\n\n # Transform pandas objects into dask.dataframe objects\n if not is_dask_collection(left):\n if right_index and left_on: # change to join on index\n left = left.set_index(left[left_on])\n left_on = False\n left_index = True\n left = from_pandas(left, npartitions=1) # turn into DataFrame\n\n if not is_dask_collection(right):\n if left_index and right_on: # change to join on index\n right = right.set_index(right[right_on])\n right_on = False\n right_index = True\n right = from_pandas(right, npartitions=1) # turn into DataFrame\n\n # Both sides are now dd.DataFrame or dd.Series objects\n merge_indexed_left = (\n left_index or left._contains_index_name(left_on)\n ) and left.known_divisions\n\n merge_indexed_right = (\n right_index or right._contains_index_name(right_on)\n ) and right.known_divisions\n\n # Both sides indexed\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_merge.if_merge_indexed_left_and_merge.if_merge_indexed_left_and.else_.return.hash_join_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_merge.if_merge_indexed_left_and_merge.if_merge_indexed_left_and.else_.return.hash_join_", "embedding": null, "metadata": {"file_path": "dask/dataframe/multi.py", "file_name": "multi.py", "file_type": "text/x-python", "category": "implementation", "start_line": 525, "end_line": 623, "span_ids": ["merge"], "tokens": 706}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@wraps(pd.merge)\ndef merge(\n left,\n right,\n how=\"inner\",\n on=None,\n left_on=None,\n right_on=None,\n left_index=False,\n right_index=False,\n suffixes=(\"_x\", \"_y\"),\n indicator=False,\n npartitions=None,\n shuffle=None,\n max_branch=None,\n):\n # ... other code\n if merge_indexed_left and merge_indexed_right: # Do indexed join\n return merge_indexed_dataframes(\n left,\n right,\n how=how,\n suffixes=suffixes,\n indicator=indicator,\n left_on=left_on,\n right_on=right_on,\n left_index=left_index,\n right_index=right_index,\n )\n\n # Single partition on one side\n # Note that cudf supports \"leftsemi\" and \"leftanti\" joins\n elif (\n left.npartitions == 1\n and how in allowed_right\n or right.npartitions == 1\n and how in allowed_left\n ):\n return single_partition_join(\n left,\n right,\n how=how,\n right_on=right_on,\n left_on=left_on,\n left_index=left_index,\n right_index=right_index,\n suffixes=suffixes,\n indicator=indicator,\n )\n\n # One side is indexed, the other not\n elif (\n left_index\n and left.known_divisions\n and not right_index\n or right_index\n and right.known_divisions\n and not left_index\n ):\n left_empty = left._meta_nonempty\n right_empty = right._meta_nonempty\n meta = left_empty.merge(\n right_empty,\n how=how,\n on=on,\n left_on=left_on,\n right_on=right_on,\n left_index=left_index,\n right_index=right_index,\n suffixes=suffixes,\n indicator=indicator,\n )\n categorical_columns = meta.select_dtypes(include=\"category\").columns\n\n if merge_indexed_left and left.known_divisions:\n right = rearrange_by_divisions(\n right, right_on, left.divisions, max_branch, shuffle=shuffle\n )\n left = left.clear_divisions()\n elif merge_indexed_right and right.known_divisions:\n left = rearrange_by_divisions(\n left, left_on, right.divisions, max_branch, shuffle=shuffle\n )\n right = right.clear_divisions()\n return map_partitions(\n merge_chunk,\n left,\n right,\n meta=meta,\n how=how,\n on=on,\n left_on=left_on,\n right_on=right_on,\n left_index=left_index,\n right_index=right_index,\n suffixes=suffixes,\n indicator=indicator,\n empty_index_dtype=meta.index.dtype,\n categorical_columns=categorical_columns,\n )\n # Catch all hash join\n else:\n if left_on and right_on:\n warn_dtype_mismatch(left, right, left_on, right_on)\n\n return hash_join(\n left,\n left.index if left_index else left_on,\n right,\n right.index if right_index else right_on,\n how,\n npartitions,\n suffixes,\n shuffle=shuffle,\n indicator=indicator,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_None_5_compute_heads.if_by_is_None_.else_.return.suffix_reduction_most_rec": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_None_5_compute_heads.if_by_is_None_.else_.return.suffix_reduction_most_rec", "embedding": null, "metadata": {"file_path": "dask/dataframe/multi.py", "file_name": "multi.py", "file_type": "text/x-python", "category": "implementation", "start_line": 577, "end_line": 625, "span_ids": ["most_recent_head_summary", "most_recent_tail", "compute_heads", "compute_tails", "merge", "most_recent_head", "most_recent_tail_summary"], "tokens": 318}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "###############################################################\n# ASOF Join\n###############################################################\n\n\ndef most_recent_tail(left, right):\n if len(right.index) == 0:\n return left\n return right.tail(1)\n\n\ndef most_recent_tail_summary(left, right, by=None):\n return pd.concat([left, right]).drop_duplicates(subset=by, keep=\"last\")\n\n\ndef compute_tails(ddf, by=None):\n \"\"\"For each partition, returns the last row of the most recent nonempty\n partition.\n \"\"\"\n empty = ddf._meta.iloc[0:0]\n\n if by is None:\n return prefix_reduction(most_recent_tail, ddf, empty)\n else:\n kwargs = {\"by\": by}\n return prefix_reduction(most_recent_tail_summary, ddf, empty, **kwargs)\n\n\ndef most_recent_head(left, right):\n if len(left.index) == 0:\n return right\n return left.head(1)\n\n\ndef most_recent_head_summary(left, right, by=None):\n return pd.concat([left, right]).drop_duplicates(subset=by, keep=\"first\")\n\n\ndef compute_heads(ddf, by=None):\n \"\"\"For each partition, returns the first row of the next nonempty\n partition.\n \"\"\"\n empty = ddf._meta.iloc[0:0]\n\n if by is None:\n return suffix_reduction(most_recent_head, ddf, empty)\n else:\n kwargs = {\"by\": by}\n return suffix_reduction(most_recent_head_summary, ddf, empty, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_pair_partitions_pair_partitions.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_pair_partitions_pair_partitions.return.result", "embedding": null, "metadata": {"file_path": "dask/dataframe/multi.py", "file_name": "multi.py", "file_type": "text/x-python", "category": "implementation", "start_line": 628, "end_line": 661, "span_ids": ["pair_partitions"], "tokens": 349}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def pair_partitions(L, R):\n \"\"\"Returns which partitions to pair for the merge_asof algorithm and the\n bounds on which to split them up\n \"\"\"\n result = []\n\n n, m = len(L) - 1, len(R) - 1\n i, j = 0, -1\n while j + 1 < m and R[j + 1] <= L[i]:\n j += 1\n J = []\n while i < n:\n partition = max(0, min(m - 1, j))\n lower = R[j] if j >= 0 and R[j] > L[i] else None\n upper = (\n R[j + 1]\n if j + 1 < m\n and (R[j + 1] < L[i + 1] or R[j + 1] == L[i + 1] and i == n - 1)\n else None\n )\n\n J.append((partition, lower, upper))\n\n i1 = i + 1 if j + 1 == m or (i + 1 < n and R[j + 1] >= L[i + 1]) else i\n j1 = j + 1 if i + 1 == n or (j + 1 < m and L[i + 1] >= R[j + 1]) else j\n if i1 > i:\n result.append(J)\n J = []\n elif i == n - 1 and R[j1] > L[n]:\n result.append(J)\n break\n i, j = i1, j1\n\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_merge_asof_padded_merge_asof_padded.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_merge_asof_padded_merge_asof_padded.return.result", "embedding": null, "metadata": {"file_path": "dask/dataframe/multi.py", "file_name": "multi.py", "file_type": "text/x-python", "category": "implementation", "start_line": 664, "end_line": 678, "span_ids": ["merge_asof_padded"], "tokens": 131}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def merge_asof_padded(left, right, prev=None, next=None, **kwargs):\n \"\"\" merge_asof but potentially adding rows to the beginning/end of right \"\"\"\n frames = []\n if prev is not None:\n frames.append(prev)\n frames.append(right)\n if next is not None:\n frames.append(next)\n\n frame = pd.concat(frames)\n result = pd.merge_asof(left, frame, **kwargs)\n # pd.merge_asof() resets index name (and dtype) if left is empty df\n if result.index.name != left.index.name:\n result.index.name = left.index.name\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_get_unsorted_columns_concat_and_unsort.return.pd_concat_frames_columns": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_get_unsorted_columns_concat_and_unsort.return.pd_concat_frames_columns", "embedding": null, "metadata": {"file_path": "dask/dataframe/multi.py", "file_name": "multi.py", "file_type": "text/x-python", "category": "implementation", "start_line": 681, "end_line": 705, "span_ids": ["concat_and_unsort", "get_unsorted_columns"], "tokens": 157}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def get_unsorted_columns(frames):\n \"\"\"\n Determine the unsorted colunn order.\n\n This should match the output of concat([frames], sort=False)\n for pandas >=0.23\n \"\"\"\n new_columns = pd.concat([frame._meta for frame in frames]).columns\n order = []\n for frame in frames:\n order.append(new_columns.get_indexer_for(frame.columns))\n\n order = np.concatenate(order)\n order = pd.unique(order)\n order = new_columns.take(order)\n return order\n\n\ndef concat_and_unsort(frames, columns):\n \"\"\"\n Compatibility concat for Pandas <0.23.0\n\n Concatenates and then selects the desired (unsorted) column order.\n \"\"\"\n return pd.concat(frames)[columns]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py__concat_compat__concat_compat.if_PANDAS_GT_100_.else_.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py__concat_compat__concat_compat.if_PANDAS_GT_100_.else_.return._", "embedding": null, "metadata": {"file_path": "dask/dataframe/multi.py", "file_name": "multi.py", "file_type": "text/x-python", "category": "implementation", "start_line": 708, "end_line": 727, "span_ids": ["_concat_compat"], "tokens": 132}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _concat_compat(frames, left, right):\n if PANDAS_GT_100:\n # join_axes removed\n return (pd.concat, frames, 0, \"outer\", False, None, None, None, False, False)\n else:\n # (axis, join, join_axis, ignore_index, keys, levels, names, verify_integrity, sort)\n # we only care about sort, to silence warnings.\n return (\n pd.concat,\n frames,\n 0,\n \"outer\",\n None,\n False,\n None,\n None,\n None,\n False,\n False,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_merge_asof_indexed_merge_asof_indexed.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_merge_asof_indexed_merge_asof_indexed.return.result", "embedding": null, "metadata": {"file_path": "dask/dataframe/multi.py", "file_name": "multi.py", "file_type": "text/x-python", "category": "implementation", "start_line": 730, "end_line": 763, "span_ids": ["merge_asof_indexed"], "tokens": 329}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def merge_asof_indexed(left, right, **kwargs):\n dsk = dict()\n name = \"asof-join-indexed-\" + tokenize(left, right, **kwargs)\n meta = pd.merge_asof(left._meta_nonempty, right._meta_nonempty, **kwargs)\n\n dependencies = [left, right]\n tails = heads = None\n if kwargs[\"direction\"] in [\"backward\", \"nearest\"]:\n tails = compute_tails(right, by=kwargs[\"right_by\"])\n dependencies.append(tails)\n if kwargs[\"direction\"] in [\"forward\", \"nearest\"]:\n heads = compute_heads(right, by=kwargs[\"right_by\"])\n dependencies.append(heads)\n\n for i, J in enumerate(pair_partitions(left.divisions, right.divisions)):\n frames = []\n for j, lower, upper in J:\n slice = (methods.boundary_slice, (left._name, i), lower, upper, False)\n tail = (tails._name, j) if tails is not None else None\n head = (heads._name, j) if heads is not None else None\n frames.append(\n (\n apply,\n merge_asof_padded,\n [slice, (right._name, j), tail, head],\n kwargs,\n )\n )\n args = _concat_compat(frames, left, right)\n dsk[(name, i)] = args\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=dependencies)\n result = new_dd_object(graph, name, meta, left.divisions)\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_merge_asof_merge_asof.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_merge_asof_merge_asof.return.result", "embedding": null, "metadata": {"file_path": "dask/dataframe/multi.py", "file_name": "multi.py", "file_type": "text/x-python", "category": "implementation", "start_line": 766, "end_line": 854, "span_ids": ["merge_asof"], "tokens": 664}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@wraps(pd.merge_asof)\ndef merge_asof(\n left,\n right,\n on=None,\n left_on=None,\n right_on=None,\n left_index=False,\n right_index=False,\n by=None,\n left_by=None,\n right_by=None,\n suffixes=(\"_x\", \"_y\"),\n tolerance=None,\n allow_exact_matches=True,\n direction=\"backward\",\n):\n if direction not in [\"backward\", \"forward\", \"nearest\"]:\n raise ValueError(\n \"Invalid merge_asof direction. Choose from 'backward'\"\n \" 'forward', or 'nearest'\"\n )\n\n kwargs = {\n \"on\": on,\n \"left_on\": left_on,\n \"right_on\": right_on,\n \"left_index\": left_index,\n \"right_index\": right_index,\n \"by\": by,\n \"left_by\": left_by,\n \"right_by\": right_by,\n \"suffixes\": suffixes,\n \"tolerance\": tolerance,\n \"allow_exact_matches\": allow_exact_matches,\n \"direction\": direction,\n }\n\n if left is None or right is None:\n raise ValueError(\"Cannot merge_asof on empty DataFrames\")\n\n # if is_dataframe_like(left) and is_dataframe_like(right):\n if isinstance(left, pd.DataFrame) and isinstance(right, pd.DataFrame):\n return pd.merge_asof(left, right, **kwargs)\n\n if on is not None:\n left_on = right_on = on\n for o in [left_on, right_on]:\n if isinstance(o, _Frame):\n raise NotImplementedError(\n \"Dask collections not currently allowed in merge columns\"\n )\n\n if not is_dask_collection(left):\n left = from_pandas(left, npartitions=1)\n ixname = ixcol = divs = None\n if left_on is not None:\n if right_index:\n divs = left.divisions if left.known_divisions else None\n ixname = left.index.name\n left = left.reset_index()\n ixcol = left.columns[0]\n left = left.set_index(left_on, sorted=True)\n\n if not is_dask_collection(right):\n right = from_pandas(right, npartitions=1)\n if right_on is not None:\n right = right.set_index(right_on, sorted=True)\n\n if by is not None:\n kwargs[\"left_by\"] = kwargs[\"right_by\"] = by\n\n del kwargs[\"on\"], kwargs[\"left_on\"], kwargs[\"right_on\"], kwargs[\"by\"]\n kwargs[\"left_index\"] = kwargs[\"right_index\"] = True\n\n if not left.known_divisions or not right.known_divisions:\n raise ValueError(\"merge_asof input must be sorted!\")\n\n result = merge_asof_indexed(left, right, **kwargs)\n if left_on or right_on:\n result = result.reset_index()\n if ixcol is not None:\n if divs is not None:\n result = result.set_index(ixcol, sorted=True, divisions=divs)\n else:\n result = result.map_partitions(M.set_index, ixcol)\n result = result.map_partitions(M.rename_axis, ixname)\n\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_None_8_concat_unindexed_dataframes.return.new_dd_object_graph_name": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_None_8_concat_unindexed_dataframes.return.new_dd_object_graph_name", "embedding": null, "metadata": {"file_path": "dask/dataframe/multi.py", "file_name": "multi.py", "file_type": "text/x-python", "category": "implementation", "start_line": 857, "end_line": 879, "span_ids": ["merge_asof", "concat_and_check", "concat_unindexed_dataframes"], "tokens": 175}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "###############################################################\n# Concat\n###############################################################\n\n\ndef concat_and_check(dfs):\n if len(set(map(len, dfs))) != 1:\n raise ValueError(\"Concatenated DataFrames of different lengths\")\n return methods.concat(dfs, axis=1)\n\n\ndef concat_unindexed_dataframes(dfs, **kwargs):\n name = \"concat-\" + tokenize(*dfs)\n\n dsk = {\n (name, i): (concat_and_check, [(df._name, i) for df in dfs])\n for i in range(dfs[0].npartitions)\n }\n\n meta = methods.concat([df._meta for df in dfs], axis=1, **kwargs)\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=dfs)\n return new_dd_object(graph, name, meta, dfs[0].divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_concat_indexed_dataframes_concat_indexed_dataframes.return.new_dd_object_dsk_name_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_concat_indexed_dataframes_concat_indexed_dataframes.return.new_dd_object_dsk_name_", "embedding": null, "metadata": {"file_path": "dask/dataframe/multi.py", "file_name": "multi.py", "file_type": "text/x-python", "category": "implementation", "start_line": 882, "end_line": 909, "span_ids": ["concat_indexed_dataframes"], "tokens": 232}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def concat_indexed_dataframes(dfs, axis=0, join=\"outer\", **kwargs):\n \"\"\" Concatenate indexed dataframes together along the index \"\"\"\n warn = axis != 0\n meta = methods.concat(\n [df._meta for df in dfs], axis=axis, join=join, filter_warning=warn, **kwargs\n )\n empties = [strip_unknown_categories(df._meta) for df in dfs]\n\n dfs2, divisions, parts = align_partitions(*dfs)\n\n name = \"concat-indexed-\" + tokenize(join, *dfs)\n\n parts2 = [\n [df if df is not None else empty for df, empty in zip(part, empties)]\n for part in parts\n ]\n\n filter_warning = True\n uniform = False\n\n dsk = dict(\n ((name, i), (methods.concat, part, axis, join, uniform, filter_warning))\n for i, part in enumerate(parts2)\n )\n for df in dfs2:\n dsk.update(df.dask)\n\n return new_dd_object(dsk, name, meta, divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_stack_partitions_stack_partitions.return.new_dd_object_dsk_name_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_stack_partitions_stack_partitions.return.new_dd_object_dsk_name_", "embedding": null, "metadata": {"file_path": "dask/dataframe/multi.py", "file_name": "multi.py", "file_type": "text/x-python", "category": "implementation", "start_line": 912, "end_line": 967, "span_ids": ["stack_partitions"], "tokens": 500}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def stack_partitions(dfs, divisions, join=\"outer\", **kwargs):\n \"\"\"Concatenate partitions on axis=0 by doing a simple stack\"\"\"\n # Use _meta_nonempty as pandas.concat will incorrectly cast float to datetime\n # for empty data frames. See https://github.com/pandas-dev/pandas/issues/32934.\n meta = make_meta(\n methods.concat(\n [df._meta_nonempty for df in dfs], join=join, filter_warning=False, **kwargs\n )\n )\n empty = strip_unknown_categories(meta)\n\n name = \"concat-{0}\".format(tokenize(*dfs))\n dsk = {}\n i = 0\n for df in dfs:\n # dtypes of all dfs need to be coherent\n # refer to https://github.com/dask/dask/issues/4685\n # and https://github.com/dask/dask/issues/5968.\n if is_dataframe_like(df):\n\n shared_columns = df.columns.intersection(meta.columns)\n needs_astype = [\n col\n for col in shared_columns\n if df[col].dtype != meta[col].dtype\n and not is_categorical_dtype(df[col].dtype)\n ]\n\n if needs_astype:\n # Copy to avoid mutating the caller inplace\n df = df.copy()\n df[needs_astype] = df[needs_astype].astype(meta[needs_astype].dtypes)\n\n if is_series_like(df) and is_series_like(meta):\n if not df.dtype == meta.dtype and not is_categorical_dtype(df.dtype):\n df = df.astype(meta.dtype)\n else:\n pass # TODO: there are other non-covered cases here\n dsk.update(df.dask)\n # An error will be raised if the schemas or categories don't match. In\n # this case we need to pass along the meta object to transform each\n # partition, so they're all equivalent.\n try:\n df._meta == meta\n match = True\n except (ValueError, TypeError):\n match = False\n\n for key in df.__dask_keys__():\n if match:\n dsk[(name, i)] = key\n else:\n dsk[(name, i)] = (methods.concat, [empty, key], 0, join)\n i += 1\n\n return new_dd_object(dsk, name, meta, divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_concat_concat._Concatenate_DataFrames": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_concat_concat._Concatenate_DataFrames", "embedding": null, "metadata": {"file_path": "dask/dataframe/multi.py", "file_name": "multi.py", "file_type": "text/x-python", "category": "implementation", "start_line": 970, "end_line": 1066, "span_ids": ["concat"], "tokens": 900}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def concat(\n dfs,\n axis=0,\n join=\"outer\",\n interleave_partitions=False,\n ignore_unknown_divisions=False,\n **kwargs\n):\n \"\"\"Concatenate DataFrames along rows.\n\n - When axis=0 (default), concatenate DataFrames row-wise:\n\n - If all divisions are known and ordered, concatenate DataFrames keeping\n divisions. When divisions are not ordered, specifying\n interleave_partition=True allows concatenate divisions each by each.\n\n - If any of division is unknown, concatenate DataFrames resetting its\n division to unknown (None)\n\n - When axis=1, concatenate DataFrames column-wise:\n\n - Allowed if all divisions are known.\n\n - If any of division is unknown, it raises ValueError.\n\n Parameters\n ----------\n dfs : list\n List of dask.DataFrames to be concatenated\n axis : {0, 1, 'index', 'columns'}, default 0\n The axis to concatenate along\n join : {'inner', 'outer'}, default 'outer'\n How to handle indexes on other axis\n interleave_partitions : bool, default False\n Whether to concatenate DataFrames ignoring its order. If True, every\n divisions are concatenated each by each.\n ignore_unknown_divisions : bool, default False\n By default a warning is raised if any input has unknown divisions.\n Set to True to disable this warning.\n\n Notes\n -----\n This differs in from ``pd.concat`` in the when concatenating Categoricals\n with different categories. Pandas currently coerces those to objects\n before concatenating. Coercing to objects is very expensive for large\n arrays, so dask preserves the Categoricals by taking the union of\n the categories.\n\n Examples\n --------\n If all divisions are known and ordered, divisions are kept.\n\n >>> a # doctest: +SKIP\n dd.DataFrame\n >>> b # doctest: +SKIP\n dd.DataFrame\n >>> dd.concat([a, b]) # doctest: +SKIP\n dd.DataFrame\n\n Unable to concatenate if divisions are not ordered.\n\n >>> a # doctest: +SKIP\n dd.DataFrame\n >>> b # doctest: +SKIP\n dd.DataFrame\n >>> dd.concat([a, b]) # doctest: +SKIP\n ValueError: All inputs have known divisions which cannot be concatenated\n in order. Specify interleave_partitions=True to ignore order\n\n Specify interleave_partitions=True to ignore the division order.\n\n >>> dd.concat([a, b], interleave_partitions=True) # doctest: +SKIP\n dd.DataFrame\n\n If any of division is unknown, the result division will be unknown\n\n >>> a # doctest: +SKIP\n dd.DataFrame\n >>> b # doctest: +SKIP\n dd.DataFrame\n >>> dd.concat([a, b]) # doctest: +SKIP\n dd.DataFrame\n\n By default concatenating with unknown divisions will raise a warning.\n Set ``ignore_unknown_divisions=True`` to disable this:\n\n >>> dd.concat([a, b], ignore_unknown_divisions=True)# doctest: +SKIP\n dd.DataFrame\n\n Different categoricals are unioned\n\n >> dd.concat([ # doctest: +SKIP\n ... dd.from_pandas(pd.Series(['a', 'b'], dtype='category'), 1),\n ... dd.from_pandas(pd.Series(['a', 'c'], dtype='category'), 1),\n ... ], interleave_partitions=True).dtype\n CategoricalDtype(categories=['a', 'b', 'c'], ordered=False)\n \"\"\"\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_concat.if_not_isinstance_dfs_li_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_concat.if_not_isinstance_dfs_li_", "embedding": null, "metadata": {"file_path": "dask/dataframe/multi.py", "file_name": "multi.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1067, "end_line": 1126, "span_ids": ["concat"], "tokens": 603}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def concat(\n dfs,\n axis=0,\n join=\"outer\",\n interleave_partitions=False,\n ignore_unknown_divisions=False,\n **kwargs\n):\n if not isinstance(dfs, list):\n raise TypeError(\"dfs must be a list of DataFrames/Series objects\")\n if len(dfs) == 0:\n raise ValueError(\"No objects to concatenate\")\n if len(dfs) == 1:\n if axis == 1 and isinstance(dfs[0], Series):\n return dfs[0].to_frame()\n else:\n return dfs[0]\n\n if join not in (\"inner\", \"outer\"):\n raise ValueError(\"'join' must be 'inner' or 'outer'\")\n\n axis = DataFrame._validate_axis(axis)\n dasks = [df for df in dfs if isinstance(df, _Frame)]\n dfs = _maybe_from_pandas(dfs)\n\n if axis == 1:\n if all(df.known_divisions for df in dasks):\n return concat_indexed_dataframes(dfs, axis=axis, join=join, **kwargs)\n elif (\n len(dasks) == len(dfs)\n and all(not df.known_divisions for df in dfs)\n and len({df.npartitions for df in dasks}) == 1\n ):\n if not ignore_unknown_divisions:\n warnings.warn(\n \"Concatenating dataframes with unknown divisions.\\n\"\n \"We're assuming that the indexes of each dataframes\"\n \" are \\n aligned. This assumption is not generally \"\n \"safe.\"\n )\n return concat_unindexed_dataframes(dfs, **kwargs)\n else:\n raise ValueError(\n \"Unable to concatenate DataFrame with unknown \"\n \"division specifying axis=1\"\n )\n else:\n if all(df.known_divisions for df in dasks):\n # each DataFrame's division must be greater than previous one\n if all(\n dfs[i].divisions[-1] < dfs[i + 1].divisions[0]\n for i in range(len(dfs) - 1)\n ):\n divisions = []\n for df in dfs[:-1]:\n # remove last to concatenate with next\n divisions += df.divisions[:-1]\n divisions += dfs[-1].divisions\n return stack_partitions(dfs, divisions, join=join, **kwargs)\n elif interleave_partitions:\n return concat_indexed_dataframes(dfs, join=join, **kwargs)\n else:\n divisions = [None] * (sum([df.npartitions for df in dfs]) + 1)\n return stack_partitions(dfs, divisions, join=join, **kwargs)\n else:\n divisions = [None] * (sum([df.npartitions for df in dfs]) + 1)\n return stack_partitions(dfs, divisions, join=join, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/numeric.py_pd_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/numeric.py_pd_", "embedding": null, "metadata": {"file_path": "dask/dataframe/numeric.py", "file_name": "numeric.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 55, "span_ids": ["imports", "to_numeric"], "tokens": 355}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pandas as pd\nfrom pandas.api.types import is_scalar as pd_is_scalar\n\nfrom ..utils import derived_from\nfrom ..delayed import delayed\nfrom ..array import Array\nfrom .core import Series\n\n\n__all__ = (\"to_numeric\",)\n\n\n@derived_from(pd, ua_args=[\"downcast\"])\ndef to_numeric(arg, errors=\"raise\", meta=None):\n \"\"\"\n Return type depends on input. Delayed if scalar, otherwise same as input.\n For errors, only \"raise\" and \"coerce\" are allowed.\n \"\"\"\n if errors not in (\"raise\", \"coerce\"):\n raise ValueError(\"invalid error value specified\")\n\n is_series = isinstance(arg, Series)\n is_array = isinstance(arg, Array)\n is_scalar = pd_is_scalar(arg)\n\n if not any([is_series, is_array, is_scalar]):\n raise TypeError(\n \"arg must be a list, tuple, dask.array.Array, or dask.dataframe.Series\"\n )\n\n if meta is not None:\n if is_scalar:\n raise KeyError(\"``meta`` is not allowed when input is a scalar.\")\n else:\n if is_series or is_array:\n meta = pd.to_numeric(arg._meta)\n\n if is_series:\n return arg.map_partitions(\n pd.to_numeric,\n token=arg._name + \"-to_numeric\",\n meta=meta,\n enforce_metadata=False,\n errors=errors,\n )\n if is_array:\n return arg.map_blocks(\n pd.to_numeric,\n name=arg._name + \"-to_numeric\",\n meta=meta,\n errors=errors,\n )\n if is_scalar:\n return delayed(pd.to_numeric, pure=True)(arg, errors=errors)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/optimize.py__Dataframe_optimizatio_optimize.return.dsk": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/optimize.py__Dataframe_optimizatio_optimize.return.dsk", "embedding": null, "metadata": {"file_path": "dask/dataframe/optimize.py", "file_name": "optimize.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 41, "span_ids": ["optimize", "docstring"], "tokens": 299}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\" Dataframe optimizations \"\"\"\nimport operator\n\nfrom dask.base import tokenize\nfrom ..optimization import cull, fuse\nfrom .. import config, core\nfrom ..highlevelgraph import HighLevelGraph\nfrom ..utils import ensure_dict\nfrom ..blockwise import optimize_blockwise, fuse_roots, Blockwise\n\n\ndef optimize(dsk, keys, **kwargs):\n if not isinstance(keys, (list, set)):\n keys = [keys]\n keys = list(core.flatten(keys))\n\n if not isinstance(dsk, HighLevelGraph):\n dsk = HighLevelGraph.from_collections(id(dsk), dsk, dependencies=())\n\n dsk = optimize_read_parquet_getitem(dsk, keys=keys)\n dsk = optimize_blockwise(dsk, keys=keys)\n dsk = fuse_roots(dsk, keys=keys)\n dsk = dsk.cull(set(keys))\n\n if not config.get(\"optimization.fuse.active\"):\n return dsk\n\n dependencies = dsk.get_all_dependencies()\n dsk = ensure_dict(dsk)\n\n fuse_subgraphs = config.get(\"optimization.fuse.subgraphs\")\n if fuse_subgraphs is None:\n fuse_subgraphs = True\n dsk, _ = fuse(\n dsk,\n keys,\n dependencies=dependencies,\n fuse_subgraphs=fuse_subgraphs,\n )\n dsk, _ = cull(dsk, keys)\n return dsk", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/optimize.py_optimize_read_parquet_getitem_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/optimize.py_optimize_read_parquet_getitem_", "embedding": null, "metadata": {"file_path": "dask/dataframe/optimize.py", "file_name": "optimize.py", "file_type": "text/x-python", "category": "implementation", "start_line": 44, "end_line": 132, "span_ids": ["optimize_read_parquet_getitem"], "tokens": 674}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def optimize_read_parquet_getitem(dsk, keys):\n # find the keys to optimize\n from .io.parquet.core import BlockwiseParquet\n\n read_parquets = [\n k for k, v in dsk.layers.items() if isinstance(v, BlockwiseParquet)\n ]\n\n layers = dsk.layers.copy()\n dependencies = dsk.dependencies.copy()\n\n for k in read_parquets:\n columns = set()\n update_blocks = {}\n\n for dep in dsk.dependents[k]:\n block = dsk.layers[dep]\n\n # Check if we're a read_parquet followed by a getitem\n if not isinstance(block, Blockwise):\n # getitem are Blockwise...\n return dsk\n\n if len(block.dsk) != 1:\n # ... with a single item...\n return dsk\n\n if list(block.dsk.values())[0][0] != operator.getitem:\n # ... where this value is __getitem__...\n return dsk\n\n if any(block.output == x[0] for x in keys if isinstance(x, tuple)):\n # if any(block.output == x[0] for x in keys if isinstance(x, tuple)):\n # ... but bail on the optimization if the getitem is what's requested\n # These keys are structured like [('getitem-', 0), ...]\n # so we check for the first item of the tuple.\n # See https://github.com/dask/dask/issues/5893\n return dsk\n\n block_columns = block.indices[1][0]\n if isinstance(block_columns, str):\n block_columns = [block_columns]\n\n columns |= set(block_columns)\n update_blocks[dep] = block\n\n old = layers[k]\n\n if columns and columns < set(old.meta.columns):\n columns = list(columns)\n meta = old.meta[columns]\n name = \"read-parquet-\" + tokenize(old.name, columns)\n assert len(update_blocks)\n\n for block_key, block in update_blocks.items():\n # (('read-parquet-old', (.,)), ( ... )) ->\n # (('read-parquet-new', (.,)), ( ... ))\n new_indices = ((name, block.indices[0][1]), block.indices[1])\n numblocks = {name: block.numblocks[old.name]}\n new_block = Blockwise(\n block.output,\n block.output_indices,\n block.dsk,\n new_indices,\n numblocks,\n block.concatenate,\n block.new_axes,\n )\n layers[block_key] = new_block\n dependencies[block_key] = {name}\n dependencies[name] = dependencies.pop(k)\n\n else:\n # Things like df[df.A == 'a'], where the argument to\n # getitem is not a column name\n name = old.name\n meta = old.meta\n columns = list(meta.columns)\n\n new = BlockwiseParquet(\n name, old.engine, old.fs, meta, columns, old.index, old.parts, old.kwargs\n )\n layers[name] = new\n if name != old.name:\n del layers[old.name]\n\n new_hlg = HighLevelGraph(layers, dependencies)\n return new_hlg", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/partitionquantiles.py__Determine_new_partitio_math": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/partitionquantiles.py__Determine_new_partitio_math", "embedding": null, "metadata": {"file_path": "dask/dataframe/partitionquantiles.py", "file_name": "partitionquantiles.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 71, "span_ids": ["imports", "docstring"], "tokens": 943}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\"Determine new partition divisions using approximate percentiles.\n\nWe use a custom algorithm to calculate approximate, evenly-distributed\npercentiles of arbitrarily-ordered data for any dtype in a distributed\nfashion with one pass over the data. This is used to determine new\npartition divisions when changing the index of a dask.dataframe. We claim\nno statistical guarantees, but we use a variety of heuristics to try to\nprovide reliable, robust results that are \"good enough\" and can scale to\nlarge number of partitions.\n\nOur approach is similar to standard approaches such as t- and q-digest,\nGK, and sampling-based algorithms, which consist of three parts:\n\n1. **Summarize:** create summaries of subsets of data\n2. **Merge:** combine summaries to make a new summary\n3. **Compress:** periodically compress a summary into a smaller summary\n\nWe summarize the data in each partition by calculating several percentiles.\nThe value at each percentile is given a weight proportional to the length\nof the partition and the differences between the current percentile and\nthe adjacent percentiles. Merging summaries is simply a ``merge_sorted``\nof the values and their weights, which we do with a reduction tree.\n\nPercentiles is a good choice for our case, because we are given a numpy\narray of the partition's data, and percentiles is a relatively cheap\noperation. Moreover, percentiles are, by definition, much less\nsusceptible to the underlying distribution of the data, so the weights\ngiven to each value--even across partitions--should be comparable.\n\nLet us describe this to a child of five. We are given many small cubes\n(of equal size) with numbers on them. Split these into many piles. This\nis like the original data. Let's sort and stack the cubes from one of the\npiles. Next, we are given a bunch of unlabeled blocks of different sizes,\nand most are much larger than the the original cubes. Stack these blocks\nuntil they're the same height as our first stack. Let's write a number on\neach block of the new stack. To do this, choose the number of the cube in\nthe first stack that is located in the middle of an unlabeled block. We\nare finished with this stack once all blocks have a number written on them.\nRepeat this for all the piles of cubes. Finished already? Great! Now\ntake all the stacks of the larger blocks you wrote on and throw them into\na single pile. We'll be sorting these blocks next, which may be easier if\nyou carefully move the blocks over and organize... ah, nevermind--too late.\nOkay, sort and stack all the blocks from that amazing, disorganized pile\nyou just made. This will be very tall, so we had better stack it sideways\non the floor like so. This will also make it easier for us to split the\nstack into groups of approximately equal size, which is our final task...\n\nThis, in a nutshell, is the algorithm we deploy. The main difference\nis that we don't always assign a block the number at its median (ours\nfluctuates around the median). The numbers at the edges of the final\ngroups is what we use as divisions for repartitioning. We also need\nthe overall min and max, so we take the 0th and 100th percentile of\neach partition, and another sample near each edge so we don't give\ndisproportionate weights to extreme values.\n\nChoosing appropriate percentiles to take in each partition is where things\nget interesting. The data is arbitrarily ordered, which means it may be\nsorted, random, or follow some pathological distribution--who knows. We\nhope all partitions are of similar length, but we ought to expect some\nvariation in lengths. The number of partitions may also be changing\nsignificantly, which could affect the optimal choice of percentiles. For\nimproved robustness, we use both evenly-distributed and random percentiles.\nIf the number of partitions isn't changing, then the total number of\npercentiles across all partitions scales as ``npartitions**1.5``. Although\nwe only have a simple compression operation (step 3 above) that combines\nweights of equal values, a more sophisticated one could be added if needed,\nsuch as for extremely large ``npartitions`` or if we find we need to\nincrease the sample size for each partition.\n\n\"\"\"\nimport math", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/partitionquantiles.py_np_sample_percentiles.return.qs": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/partitionquantiles.py_np_sample_percentiles.return.qs", "embedding": null, "metadata": {"file_path": "dask/dataframe/partitionquantiles.py", "file_name": "partitionquantiles.py", "file_type": "text/x-python", "category": "implementation", "start_line": 72, "end_line": 157, "span_ids": ["sample_percentiles", "imports"], "tokens": 890}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import numpy as np\nimport pandas as pd\nfrom pandas.api.types import is_datetime64tz_dtype\n\nfrom tlz import merge, merge_sorted, take\n\nfrom ..utils import random_state_data\nfrom ..base import tokenize\nfrom .core import Series\nfrom .utils import is_categorical_dtype\n\n\ndef sample_percentiles(num_old, num_new, chunk_length, upsample=1.0, random_state=None):\n \"\"\"Construct percentiles for a chunk for repartitioning.\n\n Adapt the number of total percentiles calculated based on the number\n of current and new partitions. Returned percentiles include equally\n spaced percentiles between [0, 100], and random percentiles. See\n detailed discussion below.\n\n Parameters\n ----------\n num_old: int\n Number of partitions of the current object\n num_new: int\n Number of partitions of the new object\n chunk_length: int\n Number of rows of the partition\n upsample : float\n Multiplicative factor to increase the number of samples\n\n Returns\n -------\n qs : numpy.ndarray of sorted percentiles between 0, 100\n\n Constructing ordered (i.e., not hashed) partitions is hard. Calculating\n approximate percentiles for generic objects in an out-of-core fashion is\n also hard. Fortunately, partition boundaries don't need to be perfect\n in order for partitioning to be effective, so we strive for a \"good enough\"\n method that can scale to many partitions and is reasonably well-behaved for\n a wide variety of scenarios.\n\n Two similar approaches come to mind: (1) take a subsample of every\n partition, then find the best new partitions for the combined subsamples;\n and (2) calculate equally-spaced percentiles on every partition (a\n relatively cheap operation), then merge the results. We do both, but\n instead of random samples, we use random percentiles.\n\n If the number of partitions isn't changing, then the ratio of fixed\n percentiles to random percentiles is 2 to 1. If repartitioning goes from\n a very high number of partitions to a very low number of partitions, then\n we use more random percentiles, because a stochastic approach will be more\n stable to potential correlations in the data that may cause a few equally-\n spaced partitions to under-sample the data.\n\n The more partitions there are, then the more total percentiles will get\n calculated across all partitions. Squaring the number of partitions\n approximately doubles the number of total percentiles calculated, so\n num_total_percentiles ~ sqrt(num_partitions). We assume each partition\n is approximately the same length. This should provide adequate resolution\n and allow the number of partitions to scale.\n\n For numeric data, one could instead use T-Digest for floats and Q-Digest\n for ints to calculate approximate percentiles. Our current method works\n for any dtype.\n \"\"\"\n # *waves hands*\n random_percentage = 1 / (1 + (4 * num_new / num_old) ** 0.5)\n num_percentiles = upsample * num_new * (num_old + 22) ** 0.55 / num_old\n num_fixed = int(num_percentiles * (1 - random_percentage)) + 2\n num_random = int(num_percentiles * random_percentage) + 2\n\n if num_fixed + num_random + 5 >= chunk_length:\n return np.linspace(0, 100, chunk_length + 1)\n\n if not isinstance(random_state, np.random.RandomState):\n random_state = np.random.RandomState(random_state)\n\n q_fixed = np.linspace(0, 100, num_fixed)\n q_random = random_state.rand(num_random) * 100\n q_edges = [60 / (num_fixed - 1), 100 - 60 / (num_fixed - 1)]\n qs = np.concatenate([q_fixed, q_random, q_edges, [0, 100]])\n qs.sort()\n # Make the divisions between percentiles a little more even\n qs = 0.5 * (qs[:-1] + qs[1:])\n return qs", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/partitionquantiles.py_tree_width_tree_width.if_to_binary_or_num_group.else_.return.num_groups": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/partitionquantiles.py_tree_width_tree_width.if_to_binary_or_num_group.else_.return.num_groups", "embedding": null, "metadata": {"file_path": "dask/dataframe/partitionquantiles.py", "file_name": "partitionquantiles.py", "file_type": "text/x-python", "category": "implementation", "start_line": 160, "end_line": 175, "span_ids": ["tree_width"], "tokens": 126}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def tree_width(N, to_binary=False):\n \"\"\"Generate tree width suitable for ``merge_sorted`` given N inputs\n\n The larger N is, the more tasks are reduced in a single task.\n\n In theory, this is designed so all tasks are of comparable effort.\n \"\"\"\n if N < 32:\n group_size = 2\n else:\n group_size = int(math.log(N))\n num_groups = N // group_size\n if to_binary or num_groups < 16:\n return 2 ** int(math.log(N / group_size, 2))\n else:\n return num_groups", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/partitionquantiles.py_tree_groups_tree_groups.return.rv": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/partitionquantiles.py_tree_groups_tree_groups.return.rv", "embedding": null, "metadata": {"file_path": "dask/dataframe/partitionquantiles.py", "file_name": "partitionquantiles.py", "file_type": "text/x-python", "category": "implementation", "start_line": 178, "end_line": 197, "span_ids": ["tree_groups"], "tokens": 155}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def tree_groups(N, num_groups):\n \"\"\"Split an integer N into evenly sized and spaced groups.\n\n >>> tree_groups(16, 6)\n [3, 2, 3, 3, 2, 3]\n \"\"\"\n # Bresenham, you so smooth!\n group_size = N // num_groups\n dx = num_groups\n dy = N - group_size * num_groups\n D = 2 * dy - dx\n rv = []\n for _ in range(num_groups):\n if D < 0:\n rv.append(group_size)\n else:\n rv.append(group_size + 1)\n D -= 2 * dx\n D += 2 * dy\n return rv", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/partitionquantiles.py_create_merge_tree_create_merge_tree.return.rv": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/partitionquantiles.py_create_merge_tree_create_merge_tree.return.rv", "embedding": null, "metadata": {"file_path": "dask/dataframe/partitionquantiles.py", "file_name": "partitionquantiles.py", "file_type": "text/x-python", "category": "implementation", "start_line": 200, "end_line": 235, "span_ids": ["create_merge_tree"], "tokens": 314}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def create_merge_tree(func, keys, token):\n \"\"\"Create a task tree that merges all the keys with a reduction function.\n\n Parameters\n ----------\n func: callable\n Reduction function that accepts a single list of values to reduce.\n keys: iterable\n Keys to reduce from the source dask graph.\n token: object\n Included in each key of the returned dict.\n\n This creates a k-ary tree where k depends on the current level and is\n greater the further away a node is from the root node. This reduces the\n total number of nodes (thereby reducing scheduler overhead), but still\n has beneficial properties of trees.\n\n For reasonable numbers of keys, N < 1e5, the total number of nodes in the\n tree is roughly ``N**0.78``. For 1e5 < N < 2e5, is it roughly ``N**0.8``.\n \"\"\"\n level = 0\n prev_width = len(keys)\n prev_keys = iter(keys)\n rv = {}\n while prev_width > 1:\n width = tree_width(prev_width)\n groups = tree_groups(prev_width, width)\n keys = [(token, level, i) for i in range(width)]\n\n for num, key in zip(groups, keys):\n rv[key] = (func, list(take(num, prev_keys)))\n\n prev_width = width\n prev_keys = iter(keys)\n level += 1\n return rv", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/partitionquantiles.py_percentiles_to_weights_percentiles_to_weights.return.vals_tolist_weights_to": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/partitionquantiles.py_percentiles_to_weights_percentiles_to_weights.return.vals_tolist_weights_to", "embedding": null, "metadata": {"file_path": "dask/dataframe/partitionquantiles.py", "file_name": "partitionquantiles.py", "file_type": "text/x-python", "category": "implementation", "start_line": 238, "end_line": 263, "span_ids": ["percentiles_to_weights"], "tokens": 310}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def percentiles_to_weights(qs, vals, length):\n \"\"\"Weigh percentile values by length and the difference between percentiles\n\n >>> percentiles = np.array([0, 25, 50, 90, 100])\n >>> values = np.array([2, 3, 5, 8, 13])\n >>> length = 10\n >>> percentiles_to_weights(percentiles, values, length)\n ([2, 3, 5, 8, 13], [125.0, 250.0, 325.0, 250.0, 50.0])\n\n The weight of the first element, ``2``, is determined by the difference\n between the first and second percentiles, and then scaled by length:\n\n >>> 0.5 * length * (percentiles[1] - percentiles[0])\n 125.0\n\n The second weight uses the difference of percentiles on both sides, so\n it will be twice the first weight if the percentiles are equally spaced:\n\n >>> 0.5 * length * (percentiles[2] - percentiles[0])\n 250.0\n \"\"\"\n if length == 0:\n return ()\n diff = np.ediff1d(qs, 0.0, 0.0)\n weights = 0.5 * length * (diff[1:] + diff[:-1])\n return vals.tolist(), weights.tolist()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/partitionquantiles.py_merge_and_compress_summaries_merge_and_compress_summaries.return.vals_weights": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/partitionquantiles.py_merge_and_compress_summaries_merge_and_compress_summaries.return.vals_weights", "embedding": null, "metadata": {"file_path": "dask/dataframe/partitionquantiles.py", "file_name": "partitionquantiles.py", "file_type": "text/x-python", "category": "implementation", "start_line": 266, "end_line": 293, "span_ids": ["merge_and_compress_summaries"], "tokens": 219}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def merge_and_compress_summaries(vals_and_weights):\n \"\"\"Merge and sort percentile summaries that are already sorted.\n\n Each item is a tuple like ``(vals, weights)`` where vals and weights\n are lists. We sort both by vals.\n\n Equal values will be combined, their weights summed together.\n \"\"\"\n vals_and_weights = [x for x in vals_and_weights if x]\n if not vals_and_weights:\n return ()\n it = merge_sorted(*[zip(x, y) for x, y in vals_and_weights])\n vals = []\n weights = []\n vals_append = vals.append\n weights_append = weights.append\n val, weight = prev_val, prev_weight = next(it)\n for val, weight in it:\n if val == prev_val:\n prev_weight += weight\n else:\n vals_append(prev_val)\n weights_append(prev_weight)\n prev_val, prev_weight = val, weight\n if val == prev_val:\n vals_append(prev_val)\n weights_append(prev_weight)\n return vals, weights", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/partitionquantiles.py_process_val_weights_process_val_weights.return.rv": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/partitionquantiles.py_process_val_weights_process_val_weights.return.rv", "embedding": null, "metadata": {"file_path": "dask/dataframe/partitionquantiles.py", "file_name": "partitionquantiles.py", "file_type": "text/x-python", "category": "implementation", "start_line": 296, "end_line": 383, "span_ids": ["process_val_weights"], "tokens": 888}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def process_val_weights(vals_and_weights, npartitions, dtype_info):\n \"\"\"Calculate final approximate percentiles given weighted vals\n\n ``vals_and_weights`` is assumed to be sorted. We take a cumulative\n sum of the weights, which makes them percentile-like (their scale is\n [0, N] instead of [0, 100]). Next we find the divisions to create\n partitions of approximately equal size.\n\n It is possible for adjacent values of the result to be the same. Since\n these determine the divisions of the new partitions, some partitions\n may be empty. This can happen if we under-sample the data, or if there\n aren't enough unique values in the column. Increasing ``upsample``\n keyword argument in ``df.set_index`` may help.\n \"\"\"\n dtype, info = dtype_info\n\n if not vals_and_weights:\n try:\n return np.array(None, dtype=dtype)\n except Exception:\n # dtype does not support None value so allow it to change\n return np.array(None, dtype=np.float_)\n\n vals, weights = vals_and_weights\n vals = np.array(vals)\n weights = np.array(weights)\n\n # We want to create exactly `npartition` number of groups of `vals` that\n # are approximately the same weight and non-empty if possible. We use a\n # simple approach (more accurate algorithms exist):\n # 1. Remove all the values with weights larger than the relative\n # percentile width from consideration (these are `jumbo`s)\n # 2. Calculate percentiles with \"interpolation=left\" of percentile-like\n # weights of the remaining values. These are guaranteed to be unique.\n # 3. Concatenate the values from (1) and (2), sort, and return.\n #\n # We assume that all values are unique, which happens in the previous\n # step `merge_and_compress_summaries`.\n\n if len(vals) == npartitions + 1:\n rv = vals\n elif len(vals) < npartitions + 1:\n # The data is under-sampled\n if np.issubdtype(vals.dtype, np.number) and not is_categorical_dtype(dtype):\n # Interpolate extra divisions\n q_weights = np.cumsum(weights)\n q_target = np.linspace(q_weights[0], q_weights[-1], npartitions + 1)\n rv = np.interp(q_target, q_weights, vals)\n else:\n # Distribute the empty partitions\n duplicated_index = np.linspace(\n 0, len(vals) - 1, npartitions - len(vals) + 1, dtype=int\n )\n duplicated_vals = vals[duplicated_index]\n rv = np.concatenate([vals, duplicated_vals])\n rv.sort()\n else:\n target_weight = weights.sum() / npartitions\n jumbo_mask = weights >= target_weight\n jumbo_vals = vals[jumbo_mask]\n\n trimmed_vals = vals[~jumbo_mask]\n trimmed_weights = weights[~jumbo_mask]\n trimmed_npartitions = npartitions - len(jumbo_vals)\n\n # percentile-like, but scaled by weights\n q_weights = np.cumsum(trimmed_weights)\n q_target = np.linspace(0, q_weights[-1], trimmed_npartitions + 1)\n\n left = np.searchsorted(q_weights, q_target, side=\"left\")\n right = np.searchsorted(q_weights, q_target, side=\"right\") - 1\n # stay inbounds\n np.maximum(right, 0, right)\n lower = np.minimum(left, right)\n trimmed = trimmed_vals[lower]\n\n rv = np.concatenate([trimmed, jumbo_vals])\n rv.sort()\n\n if is_categorical_dtype(dtype):\n rv = pd.Categorical.from_codes(rv, info[0], info[1])\n elif is_datetime64tz_dtype(dtype):\n rv = pd.DatetimeIndex(rv).tz_localize(dtype.tz)\n elif \"datetime64\" in str(dtype):\n rv = pd.DatetimeIndex(rv, dtype=dtype)\n elif rv.dtype != dtype:\n rv = rv.astype(dtype)\n return rv", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/partitionquantiles.py_percentiles_summary_dtype_info.return.df_dtype_info": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/partitionquantiles.py_percentiles_summary_dtype_info.return.df_dtype_info", "embedding": null, "metadata": {"file_path": "dask/dataframe/partitionquantiles.py", "file_name": "partitionquantiles.py", "file_type": "text/x-python", "category": "implementation", "start_line": 386, "end_line": 428, "span_ids": ["percentiles_summary", "dtype_info"], "tokens": 324}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def percentiles_summary(df, num_old, num_new, upsample, state):\n \"\"\"Summarize data using percentiles and derived weights.\n\n These summaries can be merged, compressed, and converted back into\n approximate percentiles.\n\n Parameters\n ----------\n df: pandas.Series\n Data to summarize\n num_old: int\n Number of partitions of the current object\n num_new: int\n Number of partitions of the new object\n upsample: float\n Scale factor to increase the number of percentiles calculated in\n each partition. Use to improve accuracy.\n \"\"\"\n from dask.array.percentile import _percentile\n\n length = len(df)\n if length == 0:\n return ()\n random_state = np.random.RandomState(state)\n qs = sample_percentiles(num_old, num_new, length, upsample, random_state)\n data = df.values\n interpolation = \"linear\"\n if is_categorical_dtype(data):\n data = data.codes\n interpolation = \"nearest\"\n vals, n = _percentile(data, qs, interpolation=interpolation)\n if interpolation == \"linear\" and np.issubdtype(data.dtype, np.integer):\n vals = np.round(vals).astype(data.dtype)\n vals_and_weights = percentiles_to_weights(qs, vals, length)\n return vals_and_weights\n\n\ndef dtype_info(df):\n info = None\n if is_categorical_dtype(df):\n data = df.values\n info = (data.categories, data.ordered)\n return df.dtype, info", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/partitionquantiles.py_partition_quantiles_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/partitionquantiles.py_partition_quantiles_", "embedding": null, "metadata": {"file_path": "dask/dataframe/partitionquantiles.py", "file_name": "partitionquantiles.py", "file_type": "text/x-python", "category": "implementation", "start_line": 431, "end_line": 484, "span_ids": ["partition_quantiles"], "tokens": 517}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def partition_quantiles(df, npartitions, upsample=1.0, random_state=None):\n \"\"\"Approximate quantiles of Series used for repartitioning\"\"\"\n assert isinstance(df, Series)\n # currently, only Series has quantile method\n # Index.quantile(list-like) must be pd.Series, not pd.Index\n return_type = Series\n\n qs = np.linspace(0, 1, npartitions + 1)\n token = tokenize(df, qs, upsample)\n if random_state is None:\n random_state = int(token, 16) % np.iinfo(np.int32).max\n state_data = random_state_data(df.npartitions, random_state)\n\n df_keys = df.__dask_keys__()\n\n name0 = \"re-quantiles-0-\" + token\n dtype_dsk = {(name0, 0): (dtype_info, df_keys[0])}\n\n name1 = \"re-quantiles-1-\" + token\n val_dsk = {\n (name1, i): (\n percentiles_summary,\n key,\n df.npartitions,\n npartitions,\n upsample,\n state,\n )\n for i, (state, key) in enumerate(zip(state_data, df_keys))\n }\n\n name2 = \"re-quantiles-2-\" + token\n merge_dsk = create_merge_tree(merge_and_compress_summaries, sorted(val_dsk), name2)\n if not merge_dsk:\n # Compress the data even if we only have one partition\n merge_dsk = {(name2, 0, 0): (merge_and_compress_summaries, [list(val_dsk)[0]])}\n\n merged_key = max(merge_dsk)\n\n name3 = \"re-quantiles-3-\" + token\n last_dsk = {\n (name3, 0): (\n pd.Series, # TODO: Use `type(df._meta)` when cudf adds `tolist()`\n (process_val_weights, merged_key, npartitions, (name0, 0)),\n qs,\n None,\n df.name,\n )\n }\n\n dsk = merge(df.dask, dtype_dsk, val_dsk, merge_dsk, last_dsk)\n new_divisions = [0.0, 1.0]\n return return_type(dsk, name3, df._meta, new_divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/reshape.py_np_get_dummies._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/reshape.py_np_get_dummies._", "embedding": null, "metadata": {"file_path": "dask/dataframe/reshape.py", "file_name": "reshape.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 106, "span_ids": ["imports", "get_dummies"], "tokens": 821}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import numpy as np\nimport pandas as pd\n\nfrom .core import Series, DataFrame, map_partitions, apply_concat_apply\nfrom . import methods\nfrom .utils import is_categorical_dtype, is_scalar, has_known_categories\nfrom ..utils import M\nimport sys\nfrom pandas.api.types import is_list_like\n\n###############################################################\n# Dummies\n###############################################################\n\n\ndef get_dummies(\n data,\n prefix=None,\n prefix_sep=\"_\",\n dummy_na=False,\n columns=None,\n sparse=False,\n drop_first=False,\n dtype=np.uint8,\n **kwargs\n):\n \"\"\"\n Convert categorical variable into dummy/indicator variables.\n\n Data must have category dtype to infer result's ``columns``.\n\n Parameters\n ----------\n data : Series, or DataFrame\n For Series, the dtype must be categorical.\n For DataFrame, at least one column must be categorical.\n prefix : string, list of strings, or dict of strings, default None\n String to append DataFrame column names.\n Pass a list with length equal to the number of columns\n when calling get_dummies on a DataFrame. Alternatively, `prefix`\n can be a dictionary mapping column names to prefixes.\n prefix_sep : string, default '_'\n If appending prefix, separator/delimiter to use. Or pass a\n list or dictionary as with `prefix.`\n dummy_na : bool, default False\n Add a column to indicate NaNs, if False NaNs are ignored.\n columns : list-like, default None\n Column names in the DataFrame to be encoded.\n If `columns` is None then all the columns with\n `category` dtype will be converted.\n sparse : bool, default False\n Whether the dummy columns should be sparse or not. Returns\n SparseDataFrame if `data` is a Series or if all columns are included.\n Otherwise returns a DataFrame with some SparseBlocks.\n\n .. versionadded:: 0.18.2\n\n drop_first : bool, default False\n Whether to get k-1 dummies out of k categorical levels by removing the\n first level.\n\n dtype : dtype, default np.uint8\n Data type for new columns. Only a single dtype is allowed.\n Only valid if pandas is 0.23.0 or newer.\n\n .. versionadded:: 0.18.2\n\n Returns\n -------\n dummies : DataFrame\n\n Examples\n --------\n Dask's version only works with Categorical data, as this is the only way to\n know the output shape without computing all the data.\n\n >>> import pandas as pd\n >>> import dask.dataframe as dd\n >>> s = dd.from_pandas(pd.Series(list('abca')), npartitions=2)\n >>> dd.get_dummies(s)\n Traceback (most recent call last):\n ...\n NotImplementedError: `get_dummies` with non-categorical dtypes is not supported...\n\n With categorical data:\n\n >>> s = dd.from_pandas(pd.Series(list('abca'), dtype='category'), npartitions=2)\n >>> dd.get_dummies(s) # doctest: +NORMALIZE_WHITESPACE\n Dask DataFrame Structure:\n a b c\n npartitions=2\n 0 uint8 uint8 uint8\n 2 ... ... ...\n 3 ... ... ...\n Dask Name: get_dummies, 4 tasks\n >>> dd.get_dummies(s).compute() # doctest: +ELLIPSIS\n a b c\n 0 1 0 0\n 1 0 1 0\n 2 0 0 1\n 3 1 0 0\n\n See Also\n --------\n pandas.get_dummies\n \"\"\"\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/reshape.py_get_dummies.if_isinstance_data_pd_S_get_dummies.return.map_partitions_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/reshape.py_get_dummies.if_isinstance_data_pd_S_get_dummies.return.map_partitions_", "embedding": null, "metadata": {"file_path": "dask/dataframe/reshape.py", "file_name": "reshape.py", "file_type": "text/x-python", "category": "implementation", "start_line": 107, "end_line": 178, "span_ids": ["get_dummies"], "tokens": 535}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def get_dummies(\n data,\n prefix=None,\n prefix_sep=\"_\",\n dummy_na=False,\n columns=None,\n sparse=False,\n drop_first=False,\n dtype=np.uint8,\n **kwargs\n):\n if isinstance(data, (pd.Series, pd.DataFrame)):\n return pd.get_dummies(\n data,\n prefix=prefix,\n prefix_sep=prefix_sep,\n dummy_na=dummy_na,\n columns=columns,\n sparse=sparse,\n drop_first=drop_first,\n dtype=dtype,\n **kwargs\n )\n\n not_cat_msg = (\n \"`get_dummies` with non-categorical dtypes is not \"\n \"supported. Please use `df.categorize()` beforehand to \"\n \"convert to categorical dtype.\"\n )\n\n unknown_cat_msg = (\n \"`get_dummies` with unknown categories is not \"\n \"supported. Please use `column.cat.as_known()` or \"\n \"`df.categorize()` beforehand to ensure known \"\n \"categories\"\n )\n\n if isinstance(data, Series):\n if not is_categorical_dtype(data):\n raise NotImplementedError(not_cat_msg)\n if not has_known_categories(data):\n raise NotImplementedError(unknown_cat_msg)\n elif isinstance(data, DataFrame):\n if columns is None:\n if (data.dtypes == \"object\").any():\n raise NotImplementedError(not_cat_msg)\n columns = data._meta.select_dtypes(include=[\"category\"]).columns\n else:\n if not all(is_categorical_dtype(data[c]) for c in columns):\n raise NotImplementedError(not_cat_msg)\n\n if not all(has_known_categories(data[c]) for c in columns):\n raise NotImplementedError(unknown_cat_msg)\n\n # We explicitly create `meta` on `data._meta` (the empty version) to\n # work around https://github.com/pandas-dev/pandas/issues/21993\n package_name = data._meta.__class__.__module__.split(\".\")[0]\n dummies = sys.modules[package_name].get_dummies\n meta = dummies(\n data._meta,\n prefix=prefix,\n prefix_sep=prefix_sep,\n dummy_na=dummy_na,\n columns=columns,\n sparse=sparse,\n drop_first=drop_first,\n dtype=dtype,\n **kwargs\n )\n\n return map_partitions(\n dummies,\n data,\n prefix=prefix,\n prefix_sep=prefix_sep,\n dummy_na=dummy_na,\n columns=columns,\n sparse=sparse,\n drop_first=drop_first,\n meta=meta,\n dtype=dtype,\n **kwargs\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/reshape.py_None_3_pivot_table.if_aggfunc_sum_.else_.raise_ValueError": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/reshape.py_None_3_pivot_table.if_aggfunc_sum_.else_.raise_ValueError", "embedding": null, "metadata": {"file_path": "dask/dataframe/reshape.py", "file_name": "reshape.py", "file_type": "text/x-python", "category": "implementation", "start_line": 181, "end_line": 278, "span_ids": ["get_dummies", "pivot_table"], "tokens": 676}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "###############################################################\n# Pivot table\n###############################################################\n\n\ndef pivot_table(df, index=None, columns=None, values=None, aggfunc=\"mean\"):\n \"\"\"\n Create a spreadsheet-style pivot table as a DataFrame. Target ``columns``\n must have category dtype to infer result's ``columns``.\n ``index``, ``columns``, and ``aggfunc`` must be all scalar.\n ``values`` can be scalar or list-like.\n\n Parameters\n ----------\n df : DataFrame\n index : scalar\n column to be index\n columns : scalar\n column to be columns\n values : scalar or list(scalar)\n column(s) to aggregate\n aggfunc : {'mean', 'sum', 'count'}, default 'mean'\n\n Returns\n -------\n table : DataFrame\n\n See Also\n --------\n pandas.DataFrame.pivot_table\n \"\"\"\n\n if not is_scalar(index) or index is None:\n raise ValueError(\"'index' must be the name of an existing column\")\n if not is_scalar(columns) or columns is None:\n raise ValueError(\"'columns' must be the name of an existing column\")\n if not is_categorical_dtype(df[columns]):\n raise ValueError(\"'columns' must be category dtype\")\n if not has_known_categories(df[columns]):\n raise ValueError(\n \"'columns' must have known categories. Please use \"\n \"`df[columns].cat.as_known()` beforehand to ensure \"\n \"known categories\"\n )\n if not (\n is_list_like(values)\n and all([is_scalar(v) for v in values])\n or is_scalar(values)\n ):\n raise ValueError(\"'values' must refer to an existing column or columns\")\n if not is_scalar(aggfunc) or aggfunc not in (\"mean\", \"sum\", \"count\"):\n raise ValueError(\"aggfunc must be either 'mean', 'sum' or 'count'\")\n\n # _emulate can't work for empty data\n # the result must have CategoricalIndex columns\n\n columns_contents = pd.CategoricalIndex(df[columns].cat.categories, name=columns)\n if is_scalar(values):\n new_columns = columns_contents\n else:\n new_columns = pd.MultiIndex.from_product(\n (sorted(values), columns_contents), names=[None, columns]\n )\n\n meta = pd.DataFrame(\n columns=new_columns, dtype=np.float64, index=pd.Index(df._meta[index])\n )\n\n kwargs = {\"index\": index, \"columns\": columns, \"values\": values}\n\n if aggfunc in [\"sum\", \"mean\"]:\n pv_sum = apply_concat_apply(\n [df],\n chunk=methods.pivot_sum,\n aggregate=methods.pivot_agg,\n meta=meta,\n token=\"pivot_table_sum\",\n chunk_kwargs=kwargs,\n )\n\n if aggfunc in [\"count\", \"mean\"]:\n pv_count = apply_concat_apply(\n [df],\n chunk=methods.pivot_count,\n aggregate=methods.pivot_agg,\n meta=meta,\n token=\"pivot_table_count\",\n chunk_kwargs=kwargs,\n )\n\n if aggfunc == \"sum\":\n return pv_sum\n elif aggfunc == \"count\":\n return pv_count\n elif aggfunc == \"mean\":\n return pv_sum / pv_count\n else:\n raise ValueError", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/reshape.py_None_6_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/reshape.py_None_6_", "embedding": null, "metadata": {"file_path": "dask/dataframe/reshape.py", "file_name": "reshape.py", "file_type": "text/x-python", "category": "implementation", "start_line": 281, "end_line": 339, "span_ids": ["pivot_table", "melt"], "tokens": 380}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "###############################################################\n# Melt\n###############################################################\n\n\ndef melt(\n frame,\n id_vars=None,\n value_vars=None,\n var_name=None,\n value_name=\"value\",\n col_level=None,\n):\n \"\"\"\n Unpivots a DataFrame from wide format to long format, optionally leaving identifier variables set.\n\n This function is useful to massage a DataFrame into a format where one or more columns are identifier variables\n (``id_vars``), while all other columns, considered measured variables (``value_vars``), are \"unpivoted\" to the row\n axis, leaving just two non-identifier columns, 'variable' and 'value'.\n\n Parameters\n ----------\n frame : DataFrame\n id_vars : tuple, list, or ndarray, optional\n Column(s) to use as identifier variables.\n value_vars : tuple, list, or ndarray, optional\n Column(s) to unpivot. If not specified, uses all columns that\n are not set as `id_vars`.\n var_name : scalar\n Name to use for the 'variable' column. If None it uses\n ``frame.columns.name`` or 'variable'.\n value_name : scalar, default 'value'\n Name to use for the 'value' column.\n col_level : int or string, optional\n If columns are a MultiIndex then use this level to melt.\n\n Returns\n -------\n DataFrame\n Unpivoted DataFrame.\n\n See Also\n --------\n pandas.DataFrame.melt\n \"\"\"\n\n from dask.dataframe.core import no_default\n\n return frame.map_partitions(\n M.melt,\n meta=no_default,\n id_vars=id_vars,\n value_vars=value_vars,\n var_name=var_name,\n value_name=value_name,\n col_level=col_level,\n token=\"melt\",\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/rolling.py_datetime_overlap_chunk.return.out_iloc_before_after_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/rolling.py_datetime_overlap_chunk.return.out_iloc_before_after_", "embedding": null, "metadata": {"file_path": "dask/dataframe/rolling.py", "file_name": "rolling.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 54, "span_ids": ["imports", "overlap_chunk"], "tokens": 368}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import datetime\nimport inspect\n\nimport pandas as pd\nfrom pandas.core.window import Rolling as pd_Rolling\nfrom numbers import Integral\n\nfrom ..base import tokenize\nfrom ..utils import M, funcname, derived_from, has_keyword\nfrom ..highlevelgraph import HighLevelGraph\nfrom ._compat import PANDAS_VERSION\nfrom .core import _emulate\nfrom .utils import make_meta\nfrom . import methods\n\n\ndef overlap_chunk(\n func, prev_part, current_part, next_part, before, after, args, kwargs\n):\n\n msg = (\n \"Partition size is less than overlapping \"\n \"window size. Try using ``df.repartition`` \"\n \"to increase the partition size.\"\n )\n\n if prev_part is not None and isinstance(before, Integral):\n if prev_part.shape[0] != before:\n raise NotImplementedError(msg)\n\n if next_part is not None and isinstance(after, Integral):\n if next_part.shape[0] != after:\n raise NotImplementedError(msg)\n\n parts = [p for p in (prev_part, current_part, next_part) if p is not None]\n combined = methods.concat(parts)\n out = func(combined, *args, **kwargs)\n if prev_part is None:\n before = None\n if isinstance(before, datetime.timedelta):\n before = len(prev_part)\n\n expansion = None\n if combined.shape[0] != 0:\n expansion = out.shape[0] // combined.shape[0]\n if before and expansion:\n before *= expansion\n if next_part is None:\n return out.iloc[before:]\n if isinstance(after, datetime.timedelta):\n after = len(next_part)\n if after and expansion:\n after *= expansion\n return out.iloc[before:-after]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/rolling.py_map_overlap_map_overlap.timedelta_partition_message._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/rolling.py_map_overlap_map_overlap.timedelta_partition_message._", "embedding": null, "metadata": {"file_path": "dask/dataframe/rolling.py", "file_name": "rolling.py", "file_type": "text/x-python", "category": "implementation", "start_line": 57, "end_line": 117, "span_ids": ["map_overlap"], "tokens": 483}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def map_overlap(func, df, before, after, *args, **kwargs):\n \"\"\"Apply a function to each partition, sharing rows with adjacent partitions.\n\n Parameters\n ----------\n func : function\n Function applied to each partition.\n df : dd.DataFrame, dd.Series\n before : int or timedelta\n The rows to prepend to partition ``i`` from the end of\n partition ``i - 1``.\n after : int or timedelta\n The rows to append to partition ``i`` from the beginning\n of partition ``i + 1``.\n args, kwargs :\n Arguments and keywords to pass to the function. The partition will\n be the first argument, and these will be passed *after*.\n\n See Also\n --------\n dd.DataFrame.map_overlap\n \"\"\"\n if isinstance(before, datetime.timedelta) or isinstance(after, datetime.timedelta):\n if not df.index._meta_nonempty.is_all_dates:\n raise TypeError(\n \"Must have a `DatetimeIndex` when using string offset \"\n \"for `before` and `after`\"\n )\n else:\n if not (\n isinstance(before, Integral)\n and before >= 0\n and isinstance(after, Integral)\n and after >= 0\n ):\n raise ValueError(\"before and after must be positive integers\")\n\n if \"token\" in kwargs:\n func_name = kwargs.pop(\"token\")\n token = tokenize(df, before, after, *args, **kwargs)\n else:\n func_name = \"overlap-\" + funcname(func)\n token = tokenize(func, df, before, after, *args, **kwargs)\n\n if \"meta\" in kwargs:\n meta = kwargs.pop(\"meta\")\n else:\n meta = _emulate(func, df, *args, **kwargs)\n meta = make_meta(meta, index=df._meta.index)\n\n name = \"{0}-{1}\".format(func_name, token)\n name_a = \"overlap-prepend-\" + tokenize(df, before)\n name_b = \"overlap-append-\" + tokenize(df, after)\n df_name = df._name\n\n dsk = {}\n\n timedelta_partition_message = (\n \"Partition size is less than specified window. \"\n \"Try using ``df.repartition`` to increase the partition size\"\n )\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/rolling.py_map_overlap.if_before_and_isinstance__map_overlap.return.df__constructor_graph_na": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/rolling.py_map_overlap.if_before_and_isinstance__map_overlap.return.df__constructor_graph_na", "embedding": null, "metadata": {"file_path": "dask/dataframe/rolling.py", "file_name": "rolling.py", "file_type": "text/x-python", "category": "implementation", "start_line": 119, "end_line": 211, "span_ids": ["map_overlap"], "tokens": 778}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def map_overlap(func, df, before, after, *args, **kwargs):\n # ... other code\n\n if before and isinstance(before, Integral):\n\n prevs = [None]\n for i in range(df.npartitions - 1):\n key = (name_a, i)\n dsk[key] = (M.tail, (df_name, i), before)\n prevs.append(key)\n\n elif isinstance(before, datetime.timedelta):\n # Assumes monotonic (increasing?) index\n divs = pd.Series(df.divisions)\n deltas = divs.diff().iloc[1:-1]\n\n # In the first case window-size is larger than at least one partition, thus it is\n # necessary to calculate how many partitions must be used for each rolling task.\n # Otherwise, these calculations can be skipped (faster)\n\n if (before > deltas).any():\n pt_z = divs[0]\n prevs = [None]\n for i in range(df.npartitions - 1):\n # Select all indexes of relevant partitions between the current partition and\n # the partition with the highest division outside the rolling window (before)\n pt_i = divs[i + 1]\n\n # lower-bound the search to the first division\n lb = max(pt_i - before, pt_z)\n\n first, j = divs[i], i\n while first > lb and j > 0:\n first = first - deltas[j]\n j = j - 1\n\n key = (name_a, i)\n dsk[key] = (\n _tail_timedelta,\n [(df_name, k) for k in range(j, i + 1)],\n (df_name, i + 1),\n before,\n )\n prevs.append(key)\n\n else:\n prevs = [None]\n for i in range(df.npartitions - 1):\n key = (name_a, i)\n dsk[key] = (\n _tail_timedelta,\n [(df_name, i)],\n (df_name, i + 1),\n before,\n )\n prevs.append(key)\n else:\n prevs = [None] * df.npartitions\n\n if after and isinstance(after, Integral):\n nexts = []\n for i in range(1, df.npartitions):\n key = (name_b, i)\n dsk[key] = (M.head, (df_name, i), after)\n nexts.append(key)\n nexts.append(None)\n elif isinstance(after, datetime.timedelta):\n # TODO: Do we have a use-case for this? Pandas doesn't allow negative rolling windows\n deltas = pd.Series(df.divisions).diff().iloc[1:-1]\n if (after > deltas).any():\n raise ValueError(timedelta_partition_message)\n\n nexts = []\n for i in range(1, df.npartitions):\n key = (name_b, i)\n dsk[key] = (_head_timedelta, (df_name, i - 0), (df_name, i), after)\n nexts.append(key)\n nexts.append(None)\n else:\n nexts = [None] * df.npartitions\n\n for i, (prev, current, next) in enumerate(zip(prevs, df.__dask_keys__(), nexts)):\n dsk[(name, i)] = (\n overlap_chunk,\n func,\n prev,\n current,\n next,\n before,\n after,\n args,\n kwargs,\n )\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[df])\n return df._constructor(graph, name, meta, df.divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/rolling.py__head_timedelta_pandas_rolling_method.return.getattr_rolling_name_a": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/rolling.py__head_timedelta_pandas_rolling_method.return.getattr_rolling_name_a", "embedding": null, "metadata": {"file_path": "dask/dataframe/rolling.py", "file_name": "rolling.py", "file_type": "text/x-python", "category": "implementation", "start_line": 214, "end_line": 253, "span_ids": ["_head_timedelta", "_tail_timedelta", "pandas_rolling_method"], "tokens": 244}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _head_timedelta(current, next_, after):\n \"\"\"Return rows of ``next_`` whose index is before the last\n observation in ``current`` + ``after``.\n\n Parameters\n ----------\n current : DataFrame\n next_ : DataFrame\n after : timedelta\n\n Returns\n -------\n overlapped : DataFrame\n \"\"\"\n return next_[next_.index < (current.index.max() + after)]\n\n\ndef _tail_timedelta(prevs, current, before):\n \"\"\"Return the concatenated rows of each dataframe in ``prevs`` whose\n index is after the first observation in ``current`` - ``before``.\n\n Parameters\n ----------\n current : DataFrame\n prevs : list of DataFrame objects\n before : timedelta\n\n Returns\n -------\n overlapped : DataFrame\n \"\"\"\n selected = methods.concat(\n [prev[prev.index > (current.index.min() - before)] for prev in prevs]\n )\n return selected\n\n\ndef pandas_rolling_method(df, rolling_kwargs, name, *args, **kwargs):\n rolling = df.rolling(**rolling_kwargs)\n return getattr(rolling, name)(*args, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/rolling.py_Rolling_Rolling._has_single_partition.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/rolling.py_Rolling_Rolling._has_single_partition.return._", "embedding": null, "metadata": {"file_path": "dask/dataframe/rolling.py", "file_name": "rolling.py", "file_type": "text/x-python", "category": "implementation", "start_line": 256, "end_line": 298, "span_ids": ["Rolling._has_single_partition", "Rolling.__init__", "Rolling", "Rolling._rolling_kwargs"], "tokens": 340}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Rolling(object):\n \"\"\"Provides rolling window calculations.\"\"\"\n\n def __init__(\n self, obj, window=None, min_periods=None, center=False, win_type=None, axis=0\n ):\n self.obj = obj # dataframe or series\n self.window = window\n self.min_periods = min_periods\n self.center = center\n self.axis = axis\n self.win_type = win_type\n # Allow pandas to raise if appropriate\n pd_roll = obj._meta.rolling(**self._rolling_kwargs())\n # Using .rolling(window='2s'), pandas will convert the\n # offset str to a window in nanoseconds. But pandas doesn't\n # accept the integer window with win_type='freq', so we store\n # that information here.\n # See https://github.com/pandas-dev/pandas/issues/15969\n self._window = pd_roll.window\n self._win_type = pd_roll.win_type\n self._min_periods = pd_roll.min_periods\n\n def _rolling_kwargs(self):\n return {\n \"window\": self.window,\n \"min_periods\": self.min_periods,\n \"center\": self.center,\n \"win_type\": self.win_type,\n \"axis\": self.axis,\n }\n\n @property\n def _has_single_partition(self):\n \"\"\"\n Indicator for whether the object has a single partition (True)\n or multiple (False).\n \"\"\"\n return (\n self.axis in (1, \"columns\")\n or (isinstance(self.window, Integral) and self.window <= 1)\n or self.obj.npartitions == 1\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/rolling.py_Rolling._call_method_Rolling._call_method.return.map_overlap_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/rolling.py_Rolling._call_method_Rolling._call_method.return.map_overlap_", "embedding": null, "metadata": {"file_path": "dask/dataframe/rolling.py", "file_name": "rolling.py", "file_type": "text/x-python", "category": "implementation", "start_line": 300, "end_line": 338, "span_ids": ["Rolling._call_method"], "tokens": 246}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Rolling(object):\n\n def _call_method(self, method_name, *args, **kwargs):\n rolling_kwargs = self._rolling_kwargs()\n meta = pandas_rolling_method(\n self.obj._meta_nonempty, rolling_kwargs, method_name, *args, **kwargs\n )\n\n if self._has_single_partition:\n # There's no overlap just use map_partitions\n return self.obj.map_partitions(\n pandas_rolling_method,\n rolling_kwargs,\n method_name,\n *args,\n token=method_name,\n meta=meta,\n **kwargs,\n )\n # Convert window to overlap\n if self.center:\n before = self.window // 2\n after = self.window - before - 1\n elif self._win_type == \"freq\":\n before = pd.Timedelta(self.window)\n after = 0\n else:\n before = self.window - 1\n after = 0\n return map_overlap(\n pandas_rolling_method,\n self.obj,\n before,\n after,\n rolling_kwargs,\n method_name,\n *args,\n token=method_name,\n meta=meta,\n **kwargs,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/rolling.py_Rolling.count_Rolling.quantile.return.self__call_method_quanti": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/rolling.py_Rolling.count_Rolling.quantile.return.self__call_method_quanti", "embedding": null, "metadata": {"file_path": "dask/dataframe/rolling.py", "file_name": "rolling.py", "file_type": "text/x-python", "category": "implementation", "start_line": 340, "end_line": 386, "span_ids": ["Rolling.min", "Rolling.quantile", "Rolling.count", "Rolling.sum", "Rolling.median", "Rolling.skew", "Rolling.cov", "Rolling.var", "Rolling.max", "Rolling.mean", "Rolling.kurt", "Rolling.std"], "tokens": 311}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Rolling(object):\n\n @derived_from(pd_Rolling)\n def count(self):\n return self._call_method(\"count\")\n\n @derived_from(pd_Rolling)\n def cov(self):\n return self._call_method(\"cov\")\n\n @derived_from(pd_Rolling)\n def sum(self):\n return self._call_method(\"sum\")\n\n @derived_from(pd_Rolling)\n def mean(self):\n return self._call_method(\"mean\")\n\n @derived_from(pd_Rolling)\n def median(self):\n return self._call_method(\"median\")\n\n @derived_from(pd_Rolling)\n def min(self):\n return self._call_method(\"min\")\n\n @derived_from(pd_Rolling)\n def max(self):\n return self._call_method(\"max\")\n\n @derived_from(pd_Rolling)\n def std(self, ddof=1):\n return self._call_method(\"std\", ddof=1)\n\n @derived_from(pd_Rolling)\n def var(self, ddof=1):\n return self._call_method(\"var\", ddof=1)\n\n @derived_from(pd_Rolling)\n def skew(self):\n return self._call_method(\"skew\")\n\n @derived_from(pd_Rolling)\n def kurt(self):\n return self._call_method(\"kurt\")\n\n @derived_from(pd_Rolling)\n def quantile(self, quantile):\n return self._call_method(\"quantile\", quantile)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/rolling.py_Rolling.apply_Rolling.apply.return.self__call_method_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/rolling.py_Rolling.apply_Rolling.apply.return.self__call_method_", "embedding": null, "metadata": {"file_path": "dask/dataframe/rolling.py", "file_name": "rolling.py", "file_type": "text/x-python", "category": "implementation", "start_line": 388, "end_line": 415, "span_ids": ["Rolling.apply"], "tokens": 222}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Rolling(object):\n\n @derived_from(pd_Rolling)\n def apply(\n self,\n func,\n raw=None,\n engine=\"cython\",\n engine_kwargs=None,\n args=None,\n kwargs=None,\n ):\n compat_kwargs = {}\n kwargs = kwargs or {}\n args = args or ()\n meta = self.obj._meta.rolling(0)\n if has_keyword(meta.apply, \"engine\"):\n # PANDAS_GT_100\n compat_kwargs = dict(engine=engine, engine_kwargs=engine_kwargs)\n elif engine != \"cython\" or engine_kwargs is not None:\n raise NotImplementedError(\n f\"Specifying the engine requires pandas>=1.0.0. Version '{PANDAS_VERSION}' installed.\"\n )\n if raw is None:\n # PANDAS_GT_100: The default changed from None to False\n raw = inspect.signature(meta.apply).parameters[\"raw\"]\n\n return self._call_method(\n \"apply\", func, raw=raw, args=args, kwargs=kwargs, **compat_kwargs\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/rolling.py_Rolling.aggregate_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/rolling.py_Rolling.aggregate_", "embedding": null, "metadata": {"file_path": "dask/dataframe/rolling.py", "file_name": "rolling.py", "file_type": "text/x-python", "category": "implementation", "start_line": 417, "end_line": 446, "span_ids": ["Rolling:3", "Rolling.aggregate", "Rolling.__repr__"], "tokens": 223}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Rolling(object):\n\n @derived_from(pd_Rolling)\n def aggregate(self, func, args=(), kwargs={}, **kwds):\n return self._call_method(\"agg\", func, args=args, kwargs=kwargs, **kwds)\n\n agg = aggregate\n\n def __repr__(self):\n def order(item):\n k, v = item\n _order = {\n \"window\": 0,\n \"min_periods\": 1,\n \"center\": 2,\n \"win_type\": 3,\n \"axis\": 4,\n }\n return _order[k]\n\n rolling_kwargs = self._rolling_kwargs()\n # pandas translates the '2S' offset to nanoseconds\n rolling_kwargs[\"window\"] = self._window\n rolling_kwargs[\"win_type\"] = self._win_type\n return \"Rolling [{}]\".format(\n \",\".join(\n \"{}={}\".format(k, v)\n for k, v in sorted(rolling_kwargs.items(), key=order)\n if v is not None\n )\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_contextlib_logger.logging_getLogger___name_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_contextlib_logger.logging_getLogger___name_", "embedding": null, "metadata": {"file_path": "dask/dataframe/shuffle.py", "file_name": "shuffle.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 26, "span_ids": ["imports"], "tokens": 160}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import contextlib\nfrom collections import defaultdict\nimport logging\nimport math\nimport shutil\nfrom operator import getitem\nimport uuid\nimport tempfile\n\nimport tlz as toolz\nimport numpy as np\nimport pandas as pd\n\nfrom .core import DataFrame, Series, _Frame, _concat, map_partitions, new_dd_object\n\nfrom .. import base, config\nfrom ..core import keys_in_tasks\nfrom ..base import tokenize, compute, compute_as_if_collection, is_dask_collection\nfrom ..delayed import delayed\nfrom ..highlevelgraph import HighLevelGraph, Layer\nfrom ..sizeof import sizeof\nfrom ..utils import digit, insert, M\nfrom .utils import hash_object_dispatch, group_split_dispatch\nfrom . import methods\n\nlogger = logging.getLogger(__name__)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_set_index_set_index.return.set_partition_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_set_index_set_index.return.set_partition_", "embedding": null, "metadata": {"file_path": "dask/dataframe/shuffle.py", "file_name": "shuffle.py", "file_type": "text/x-python", "category": "implementation", "start_line": 447, "end_line": 543, "span_ids": ["set_index"], "tokens": 833}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def set_index(\n df,\n index,\n npartitions=None,\n shuffle=None,\n compute=False,\n drop=True,\n upsample=1.0,\n divisions=None,\n partition_size=128e6,\n **kwargs,\n):\n \"\"\" See _Frame.set_index for docstring \"\"\"\n if isinstance(index, Series) and index._name == df.index._name:\n return df\n if isinstance(index, (DataFrame, tuple, list)):\n # Accept [\"a\"], but not [[\"a\"]]\n if (\n isinstance(index, list)\n and len(index) == 1\n and not isinstance(index[0], list) # if index = [[\"a\"]], leave it that way\n ):\n index = index[0]\n else:\n raise NotImplementedError(\n \"Dask dataframe does not yet support multi-indexes.\\n\"\n \"You tried to index with this index: %s\\n\"\n \"Indexes must be single columns only.\" % str(index)\n )\n\n if npartitions == \"auto\":\n repartition = True\n npartitions = max(100, df.npartitions)\n else:\n if npartitions is None:\n npartitions = df.npartitions\n repartition = False\n\n if not isinstance(index, Series):\n index2 = df[index]\n else:\n index2 = index\n\n if divisions is None:\n if repartition:\n index2, df = base.optimize(index2, df)\n parts = df.to_delayed(optimize_graph=False)\n sizes = [delayed(sizeof)(part) for part in parts]\n else:\n (index2,) = base.optimize(index2)\n sizes = []\n\n divisions = index2._repartition_quantiles(npartitions, upsample=upsample)\n iparts = index2.to_delayed(optimize_graph=False)\n mins = [ipart.min() for ipart in iparts]\n maxes = [ipart.max() for ipart in iparts]\n sizes, mins, maxes = base.optimize(sizes, mins, maxes)\n divisions, sizes, mins, maxes = base.compute(\n divisions, sizes, mins, maxes, optimize_graph=False\n )\n divisions = methods.tolist(divisions)\n\n empty_dataframe_detected = pd.isnull(divisions).all()\n if repartition or empty_dataframe_detected:\n total = sum(sizes)\n npartitions = max(math.ceil(total / partition_size), 1)\n npartitions = min(npartitions, df.npartitions)\n n = len(divisions)\n try:\n divisions = np.interp(\n x=np.linspace(0, n - 1, npartitions + 1),\n xp=np.linspace(0, n - 1, n),\n fp=divisions,\n ).tolist()\n except (TypeError, ValueError): # str type\n indexes = np.linspace(0, n - 1, npartitions + 1).astype(int)\n divisions = [divisions[i] for i in indexes]\n\n mins = remove_nans(mins)\n maxes = remove_nans(maxes)\n if pd.api.types.is_categorical_dtype(index2.dtype):\n dtype = index2.dtype\n mins = pd.Categorical(mins, dtype=dtype).codes.tolist()\n maxes = pd.Categorical(maxes, dtype=dtype).codes.tolist()\n\n if (\n mins == sorted(mins)\n and maxes == sorted(maxes)\n and all(mx < mn for mx, mn in zip(maxes[:-1], mins[1:]))\n ):\n divisions = mins + [maxes[-1]]\n result = set_sorted_index(df, index, drop=drop, divisions=divisions)\n return result.map_partitions(M.sort_index)\n\n return set_partition(\n df, index, divisions, shuffle=shuffle, drop=drop, compute=compute, **kwargs\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_remove_nans_remove_nans.return.divisions": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_remove_nans_remove_nans.return.divisions", "embedding": null, "metadata": {"file_path": "dask/dataframe/shuffle.py", "file_name": "shuffle.py", "file_type": "text/x-python", "category": "implementation", "start_line": 126, "end_line": 152, "span_ids": ["remove_nans"], "tokens": 213}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def remove_nans(divisions):\n \"\"\"Remove nans from divisions\n\n These sometime pop up when we call min/max on an empty partition\n\n Examples\n --------\n >>> remove_nans((np.nan, 1, 2))\n [1, 1, 2]\n >>> remove_nans((1, np.nan, 2))\n [1, 2, 2]\n >>> remove_nans((1, 2, np.nan))\n [1, 2, 2]\n \"\"\"\n divisions = list(divisions)\n\n for i in range(len(divisions) - 2, -1, -1):\n if pd.isnull(divisions[i]):\n divisions[i] = divisions[i + 1]\n\n for i in range(len(divisions) - 1, -1, -1):\n if not pd.isnull(divisions[i]):\n for j in range(i + 1, len(divisions)):\n divisions[j] = divisions[i]\n break\n\n return divisions", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_set_partition_set_partition.return.df4_map_partitions_M_sort": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_set_partition_set_partition.return.df4_map_partitions_M_sort", "embedding": null, "metadata": {"file_path": "dask/dataframe/shuffle.py", "file_name": "shuffle.py", "file_type": "text/x-python", "category": "implementation", "start_line": 155, "end_line": 243, "span_ids": ["set_partition"], "tokens": 649}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def set_partition(\n df, index, divisions, max_branch=32, drop=True, shuffle=None, compute=None\n):\n \"\"\"Group DataFrame by index\n\n Sets a new index and partitions data along that index according to\n divisions. Divisions are often found by computing approximate quantiles.\n The function ``set_index`` will do both of these steps.\n\n Parameters\n ----------\n df: DataFrame/Series\n Data that we want to re-partition\n index: string or Series\n Column to become the new index\n divisions: list\n Values to form new divisions between partitions\n drop: bool, default True\n Whether to delete columns to be used as the new index\n shuffle: str (optional)\n Either 'disk' for an on-disk shuffle or 'tasks' to use the task\n scheduling framework. Use 'disk' if you are on a single machine\n and 'tasks' if you are on a distributed cluster.\n max_branch: int (optional)\n If using the task-based shuffle, the amount of splitting each\n partition undergoes. Increase this for fewer copies but more\n scheduler overhead.\n\n See Also\n --------\n set_index\n shuffle\n partd\n \"\"\"\n meta = df._meta._constructor_sliced([0])\n if isinstance(divisions, tuple):\n # pd.isna considers tuples to be scalars. Convert to a list.\n divisions = list(divisions)\n\n if np.isscalar(index):\n dtype = df[index].dtype\n else:\n dtype = index.dtype\n\n if pd.isna(divisions).any() and pd.api.types.is_integer_dtype(dtype):\n # Can't construct a Series[int64] when any / all of the divisions are NaN.\n divisions = df._meta._constructor_sliced(divisions)\n else:\n divisions = df._meta._constructor_sliced(divisions, dtype=dtype)\n\n if np.isscalar(index):\n partitions = df[index].map_partitions(\n set_partitions_pre, divisions=divisions, meta=meta\n )\n df2 = df.assign(_partitions=partitions)\n else:\n partitions = index.map_partitions(\n set_partitions_pre, divisions=divisions, meta=meta\n )\n df2 = df.assign(_partitions=partitions, _index=index)\n\n df3 = rearrange_by_column(\n df2,\n \"_partitions\",\n max_branch=max_branch,\n npartitions=len(divisions) - 1,\n shuffle=shuffle,\n compute=compute,\n ignore_index=True,\n )\n\n if np.isscalar(index):\n df4 = df3.map_partitions(\n set_index_post_scalar,\n index_name=index,\n drop=drop,\n column_dtype=df.columns.dtype,\n )\n else:\n df4 = df3.map_partitions(\n set_index_post_series,\n index_name=index.name,\n drop=drop,\n column_dtype=df.columns.dtype,\n )\n\n df4.divisions = methods.tolist(divisions)\n\n return df4.map_partitions(M.sort_index)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_shuffle_shuffle.return.df3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_shuffle_shuffle.return.df3", "embedding": null, "metadata": {"file_path": "dask/dataframe/shuffle.py", "file_name": "shuffle.py", "file_type": "text/x-python", "category": "implementation", "start_line": 246, "end_line": 313, "span_ids": ["shuffle"], "tokens": 459}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def shuffle(\n df,\n index,\n shuffle=None,\n npartitions=None,\n max_branch=32,\n ignore_index=False,\n compute=None,\n):\n \"\"\"Group DataFrame by index\n\n Hash grouping of elements. After this operation all elements that have\n the same index will be in the same partition. Note that this requires\n full dataset read, serialization and shuffle. This is expensive. If\n possible you should avoid shuffles.\n\n This does not preserve a meaningful index/partitioning scheme. This is not\n deterministic if done in parallel.\n\n See Also\n --------\n set_index\n set_partition\n shuffle_disk\n \"\"\"\n list_like = pd.api.types.is_list_like(index) and not is_dask_collection(index)\n if shuffle == \"tasks\" and (isinstance(index, str) or list_like):\n # Avoid creating the \"_partitions\" column if possible.\n # We currently do this if the user is passing in\n # specific column names (and shuffle == \"tasks\").\n if isinstance(index, str):\n index = [index]\n else:\n index = list(index)\n nset = set(index)\n if nset.intersection(set(df.columns)) == nset:\n return rearrange_by_column(\n df,\n index,\n npartitions=npartitions,\n max_branch=max_branch,\n shuffle=shuffle,\n ignore_index=ignore_index,\n compute=compute,\n )\n\n if not isinstance(index, _Frame):\n index = df._select_columns_or_index(index)\n\n partitions = index.map_partitions(\n partitioning_index,\n npartitions=npartitions or df.npartitions,\n meta=df._meta._constructor_sliced([0]),\n transform_divisions=False,\n )\n df2 = df.assign(_partitions=partitions)\n df2._meta.index.name = df._meta.index.name\n df3 = rearrange_by_column(\n df2,\n \"_partitions\",\n npartitions=npartitions,\n max_branch=max_branch,\n shuffle=shuffle,\n compute=compute,\n ignore_index=ignore_index,\n )\n del df3[\"_partitions\"]\n return df3", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_rearrange_by_divisions_rearrange_by_divisions.return.df3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_rearrange_by_divisions_rearrange_by_divisions.return.df3", "embedding": null, "metadata": {"file_path": "dask/dataframe/shuffle.py", "file_name": "shuffle.py", "file_type": "text/x-python", "category": "implementation", "start_line": 316, "end_line": 335, "span_ids": ["rearrange_by_divisions"], "tokens": 164}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def rearrange_by_divisions(df, column, divisions, max_branch=None, shuffle=None):\n \"\"\" Shuffle dataframe so that column separates along divisions \"\"\"\n divisions = df._meta._constructor_sliced(divisions)\n meta = df._meta._constructor_sliced([0])\n # Assign target output partitions to every row\n partitions = df[column].map_partitions(\n set_partitions_pre, divisions=divisions, meta=meta\n )\n df2 = df.assign(_partitions=partitions)\n\n # Perform shuffle\n df3 = rearrange_by_column(\n df2,\n \"_partitions\",\n max_branch=max_branch,\n npartitions=len(divisions) - 1,\n shuffle=shuffle,\n )\n del df3[\"_partitions\"]\n return df3", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_rearrange_by_column_rearrange_by_column.if_shuffle_disk_.else_.raise_NotImplementedError": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_rearrange_by_column_rearrange_by_column.if_shuffle_disk_.else_.raise_NotImplementedError", "embedding": null, "metadata": {"file_path": "dask/dataframe/shuffle.py", "file_name": "shuffle.py", "file_type": "text/x-python", "category": "implementation", "start_line": 338, "end_line": 358, "span_ids": ["rearrange_by_column"], "tokens": 157}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def rearrange_by_column(\n df,\n col,\n npartitions=None,\n max_branch=None,\n shuffle=None,\n compute=None,\n ignore_index=False,\n):\n shuffle = shuffle or config.get(\"shuffle\", None) or \"disk\"\n if shuffle == \"disk\":\n return rearrange_by_column_disk(df, col, npartitions, compute=compute)\n elif shuffle == \"tasks\":\n df2 = rearrange_by_column_tasks(\n df, col, max_branch, npartitions, ignore_index=ignore_index\n )\n if ignore_index:\n df2._meta = df2._meta.reset_index(drop=True)\n return df2\n else:\n raise NotImplementedError(\"Unknown shuffle method %s\" % shuffle)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_maybe_buffered_partd_maybe_buffered_partd.__reduce__.if_self_tempdir_.else_.return._maybe_buffered_partd_F": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_maybe_buffered_partd_maybe_buffered_partd.__reduce__.if_self_tempdir_.else_.return._maybe_buffered_partd_F", "embedding": null, "metadata": {"file_path": "dask/dataframe/shuffle.py", "file_name": "shuffle.py", "file_type": "text/x-python", "category": "implementation", "start_line": 361, "end_line": 375, "span_ids": ["maybe_buffered_partd.__reduce__", "maybe_buffered_partd", "maybe_buffered_partd.__init__"], "tokens": 128}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class maybe_buffered_partd(object):\n \"\"\"\n If serialized, will return non-buffered partd. Otherwise returns a buffered partd\n \"\"\"\n\n def __init__(self, buffer=True, tempdir=None):\n self.tempdir = tempdir or config.get(\"temporary_directory\", None)\n self.buffer = buffer\n self.compression = config.get(\"dataframe.shuffle-compression\", None)\n\n def __reduce__(self):\n if self.tempdir:\n return (maybe_buffered_partd, (False, self.tempdir))\n else:\n return (maybe_buffered_partd, (False,))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_maybe_buffered_partd.__call___maybe_buffered_partd.__call__.if_self_buffer_.else_.return.partd_PandasBlocks_file_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_maybe_buffered_partd.__call___maybe_buffered_partd.__call__.if_self_buffer_.else_.return.partd_PandasBlocks_file_", "embedding": null, "metadata": {"file_path": "dask/dataframe/shuffle.py", "file_name": "shuffle.py", "file_type": "text/x-python", "category": "implementation", "start_line": 377, "end_line": 403, "span_ids": ["maybe_buffered_partd.__call__"], "tokens": 211}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class maybe_buffered_partd(object):\n\n def __call__(self, *args, **kwargs):\n import partd\n\n path = tempfile.mkdtemp(suffix=\".partd\", dir=self.tempdir)\n\n try:\n partd_compression = (\n getattr(partd.compressed, self.compression)\n if self.compression\n else None\n )\n except AttributeError as e:\n raise ImportError(\n \"Not able to import and load {0} as compression algorithm.\"\n \"Please check if the library is installed and supported by Partd.\".format(\n self.compression\n )\n ) from e\n file = partd.File(path)\n partd.file.cleanup_files.append(path)\n # Envelope partd file with compression, if set and available\n if partd_compression:\n file = partd_compression(file)\n if self.buffer:\n return partd.PandasBlocks(partd.Buffer(partd.Dict(), file))\n else:\n return partd.PandasBlocks(file)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py___partitioning_index.return.hash_object_dispatch_df_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py___partitioning_index.return.hash_object_dispatch_df_", "embedding": null, "metadata": {"file_path": "dask/dataframe/shuffle.py", "file_name": "shuffle.py", "file_type": "text/x-python", "category": "implementation", "start_line": 720, "end_line": 742, "span_ids": ["rearrange_by_column_tasks", "partitioning_index"], "tokens": 121}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "########################################################\n# Various convenience functions to be run by the above #\n########################################################\n\n\ndef partitioning_index(df, npartitions):\n \"\"\"\n Computes a deterministic index mapping each record to a partition.\n\n Identical rows are mapped to the same partition.\n\n Parameters\n ----------\n df : DataFrame/Series/Index\n npartitions : int\n The number of partitions to group into.\n\n Returns\n -------\n partitions : ndarray\n An array of int64 values mapping each record to a partition.\n \"\"\"\n return hash_object_dispatch(df, index=False) % int(npartitions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_barrier_set_partitions_pre.return.partitions": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_barrier_set_partitions_pre.return.partitions", "embedding": null, "metadata": {"file_path": "dask/dataframe/shuffle.py", "file_name": "shuffle.py", "file_type": "text/x-python", "category": "implementation", "start_line": 745, "end_line": 787, "span_ids": ["collect", "set_partitions_pre", "barrier", "cleanup_partd_files"], "tokens": 242}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def barrier(args):\n list(args)\n return 0\n\n\ndef cleanup_partd_files(p, keys):\n \"\"\"\n Cleanup the files in a partd.File dataset.\n\n Parameters\n ----------\n p : partd.Interface\n File or Encode wrapping a file should be OK.\n keys: List\n Just for scheduling purposes, not actually used.\n \"\"\"\n import partd\n\n if isinstance(p, partd.Encode):\n maybe_file = p.partd\n else:\n maybe_file\n\n if isinstance(maybe_file, partd.File):\n path = maybe_file.path\n else:\n path = None\n\n if path:\n shutil.rmtree(path, ignore_errors=True)\n\n\ndef collect(p, part, meta, barrier_token):\n \"\"\" Collect partitions from partd, yield dataframes \"\"\"\n with ensure_cleanup_on_exception(p):\n res = p.get(part)\n return res if len(res) > 0 else meta\n\n\ndef set_partitions_pre(s, divisions):\n partitions = divisions.searchsorted(s, side=\"right\") - 1\n partitions[(s >= divisions.iloc[-1]).values] = len(divisions) - 2\n return partitions", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_shuffle_group_2_shuffle_group_get.if_i_in_g_.else_.return.head": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_shuffle_group_2_shuffle_group_get.if_i_in_g_.else_.return.head", "embedding": null, "metadata": {"file_path": "dask/dataframe/shuffle.py", "file_name": "shuffle.py", "file_type": "text/x-python", "category": "implementation", "start_line": 790, "end_line": 814, "span_ids": ["shuffle_group_get", "shuffle_group_2"], "tokens": 176}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def shuffle_group_2(df, cols, ignore_index, nparts):\n if not len(df):\n return {}, df\n\n if isinstance(cols, str):\n cols = [cols]\n\n if cols and cols[0] == \"_partitions\":\n ind = df[cols[0]].astype(np.int32)\n else:\n ind = (\n hash_object_dispatch(df[cols] if cols else df, index=False) % int(nparts)\n ).astype(np.int32)\n\n n = ind.max() + 1\n result2 = group_split_dispatch(df, ind.values.view(), n, ignore_index=ignore_index)\n return result2, df.iloc[:0]\n\n\ndef shuffle_group_get(g_head, i):\n g, head = g_head\n if i in g:\n return g[i]\n else:\n return head", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_shuffle_group_shuffle_group.return.group_split_dispatch_df_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_shuffle_group_shuffle_group.return.group_split_dispatch_df_", "embedding": null, "metadata": {"file_path": "dask/dataframe/shuffle.py", "file_name": "shuffle.py", "file_type": "text/x-python", "category": "implementation", "start_line": 817, "end_line": 863, "span_ids": ["shuffle_group"], "tokens": 403}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def shuffle_group(df, cols, stage, k, npartitions, ignore_index, nfinal):\n \"\"\"Splits dataframe into groups\n\n The group is determined by their final partition, and which stage we are in\n in the shuffle\n\n Parameters\n ----------\n df: DataFrame\n cols: str or list\n Column name(s) on which to split the dataframe. If ``cols`` is not\n \"_partitions\", hashing will be used to determine target partition\n stage: int\n We shuffle dataframes with many partitions we in a few stages to avoid\n a quadratic number of tasks. This number corresponds to which stage\n we're in, starting from zero up to some small integer\n k: int\n Desired number of splits from this dataframe\n npartition: int\n Total number of output partitions for the full dataframe\n nfinal: int\n Total number of output partitions after repartitioning\n\n Returns\n -------\n out: Dict[int, DataFrame]\n A dictionary mapping integers in {0..k} to dataframes such that the\n hash values of ``df[col]`` are well partitioned.\n \"\"\"\n if isinstance(cols, str):\n cols = [cols]\n\n if cols and cols[0] == \"_partitions\":\n ind = df[cols[0]]\n else:\n ind = hash_object_dispatch(df[cols] if cols else df, index=False)\n if nfinal and nfinal != npartitions:\n ind = ind % int(nfinal)\n\n c = ind.values\n typ = np.min_scalar_type(npartitions * 2)\n\n c = np.mod(c, npartitions).astype(typ, copy=False)\n np.floor_divide(c, k ** stage, out=c)\n np.mod(c, k, out=c)\n\n return group_split_dispatch(df, c, k, ignore_index=ignore_index)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_ensure_cleanup_on_exception_ensure_cleanup_on_exception.try_.except_Exception_.raise": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_ensure_cleanup_on_exception_ensure_cleanup_on_exception.try_.except_Exception_.raise", "embedding": null, "metadata": {"file_path": "dask/dataframe/shuffle.py", "file_name": "shuffle.py", "file_type": "text/x-python", "category": "implementation", "start_line": 866, "end_line": 883, "span_ids": ["ensure_cleanup_on_exception"], "tokens": 140}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@contextlib.contextmanager\ndef ensure_cleanup_on_exception(p):\n \"\"\"Ensure a partd.File is cleaned up.\n\n We have several tasks referring to a `partd.File` instance. We want to\n ensure that the file is cleaned up if and only if there's an exception\n in the tasks using the `partd.File`.\n \"\"\"\n try:\n yield\n except Exception:\n # the function (e.g. shuffle_group_3) had an internal exception.\n # We'll cleanup our temporary files and re-raise.\n try:\n p.drop()\n except Exception:\n logger.exception(\"ignoring exception in ensure_cleanup_on_exception\")\n raise", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_shuffle_group_3_get_overlap.return.df_loc_index_if_index_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_shuffle_group_3_get_overlap.return.df_loc_index_if_index_", "embedding": null, "metadata": {"file_path": "dask/dataframe/shuffle.py", "file_name": "shuffle.py", "file_type": "text/x-python", "category": "implementation", "start_line": 886, "end_line": 911, "span_ids": ["shuffle_group_3", "get_overlap", "set_index_post_scalar", "set_index_post_series", "drop_overlap"], "tokens": 219}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def shuffle_group_3(df, col, npartitions, p):\n with ensure_cleanup_on_exception(p):\n g = df.groupby(col)\n d = {i: g.get_group(i) for i in g.groups}\n p.append(d, fsync=True)\n\n\ndef set_index_post_scalar(df, index_name, drop, column_dtype):\n df2 = df.drop(\"_partitions\", axis=1).set_index(index_name, drop=drop)\n df2.columns = df2.columns.astype(column_dtype)\n return df2\n\n\ndef set_index_post_series(df, index_name, drop, column_dtype):\n df2 = df.drop(\"_partitions\", axis=1).set_index(\"_index\", drop=True)\n df2.index.name = index_name\n df2.columns = df2.columns.astype(column_dtype)\n return df2\n\n\ndef drop_overlap(df, index):\n return df.drop(index) if index in df.index else df\n\n\ndef get_overlap(df, index):\n return df.loc[[index]] if index in df.index else df._constructor()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_fix_overlap_fix_overlap.return.new_dd_object_graph_name": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_fix_overlap_fix_overlap.return.new_dd_object_graph_name", "embedding": null, "metadata": {"file_path": "dask/dataframe/shuffle.py", "file_name": "shuffle.py", "file_type": "text/x-python", "category": "implementation", "start_line": 914, "end_line": 926, "span_ids": ["fix_overlap"], "tokens": 194}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def fix_overlap(ddf, overlap):\n \"\"\" Ensures that the upper bound on each partition of ddf is exclusive \"\"\"\n name = \"fix-overlap-\" + tokenize(ddf, overlap)\n n = len(ddf.divisions) - 1\n dsk = {(name, i): (ddf._name, i) for i in range(n)}\n\n for i in overlap:\n frames = [(get_overlap, (ddf._name, i - 1), ddf.divisions[i]), (ddf._name, i)]\n dsk[(name, i)] = (methods.concat, frames)\n dsk[(name, i - 1)] = (drop_overlap, dsk[(name, i - 1)], ddf.divisions[i])\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[ddf])\n return new_dd_object(graph, name, ddf._meta, ddf.divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_compute_and_set_divisions_compute_and_set_divisions.return.fix_overlap_df_overlap_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_compute_and_set_divisions_compute_and_set_divisions.return.fix_overlap_df_overlap_", "embedding": null, "metadata": {"file_path": "dask/dataframe/shuffle.py", "file_name": "shuffle.py", "file_type": "text/x-python", "category": "implementation", "start_line": 929, "end_line": 946, "span_ids": ["compute_and_set_divisions"], "tokens": 181}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def compute_and_set_divisions(df, **kwargs):\n mins = df.index.map_partitions(M.min, meta=df.index)\n maxes = df.index.map_partitions(M.max, meta=df.index)\n mins, maxes = compute(mins, maxes, **kwargs)\n\n if (\n sorted(mins) != list(mins)\n or sorted(maxes) != list(maxes)\n or any(a > b for a, b in zip(mins, maxes))\n ):\n raise ValueError(\n \"Partitions must be sorted ascending with the index\", mins, maxes\n )\n\n df.divisions = tuple(mins) + (list(maxes)[-1],)\n\n overlap = [i for i in range(1, len(mins)) if mins[i] >= maxes[i - 1]]\n return fix_overlap(df, overlap) if overlap else df", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_set_sorted_index_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_set_sorted_index_", "embedding": null, "metadata": {"file_path": "dask/dataframe/shuffle.py", "file_name": "shuffle.py", "file_type": "text/x-python", "category": "implementation", "start_line": 949, "end_line": 973, "span_ids": ["set_sorted_index"], "tokens": 235}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def set_sorted_index(df, index, drop=True, divisions=None, **kwargs):\n if not isinstance(index, Series):\n meta = df._meta.set_index(index, drop=drop)\n else:\n meta = df._meta.set_index(index._meta, drop=drop)\n\n result = map_partitions(M.set_index, df, index, drop=drop, meta=meta)\n\n if not divisions:\n return compute_and_set_divisions(result, **kwargs)\n elif len(divisions) != len(df.divisions):\n msg = (\n \"When doing `df.set_index(col, sorted=True, divisions=...)`, \"\n \"divisions indicates known splits in the index column. In this \"\n \"case divisions must be the same length as the existing \"\n \"divisions in `df`\\n\\n\"\n \"If the intent is to repartition into new divisions after \"\n \"setting the index, you probably want:\\n\\n\"\n \"`df.set_index(col, sorted=True).repartition(divisions=divisions)`\"\n )\n raise ValueError(msg)\n\n result.divisions = tuple(divisions)\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_accessors.py_contextlib_MyAccessor.method.return.self_item": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_accessors.py_contextlib_MyAccessor.method.return.self_item", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_accessors.py", "file_name": "test_accessors.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 35, "span_ids": ["MyAccessor", "imports", "MyAccessor.method", "MyAccessor.prop", "MyAccessor.__init__", "ensure_removed"], "tokens": 155}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import contextlib\n\nimport pytest\n\n\npd = pytest.importorskip(\"pandas\")\nimport dask.dataframe as dd\nfrom dask.dataframe.utils import assert_eq\n\n\n@contextlib.contextmanager\ndef ensure_removed(obj, attr):\n \"\"\"Ensure that an attribute added to 'obj' during the test is\n removed when we're done\"\"\"\n try:\n yield\n finally:\n try:\n delattr(obj, attr)\n except AttributeError:\n pass\n obj._accessors.discard(attr)\n\n\nclass MyAccessor:\n def __init__(self, obj):\n self.obj = obj\n self.item = \"item\"\n\n @property\n def prop(self):\n return self.item\n\n def method(self):\n return self.item", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_accessors.py_test_register_test_accessor_works.with_ensure_removed_dd_Se.assert_b_mine_method_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_accessors.py_test_register_test_accessor_works.with_ensure_removed_dd_Se.assert_b_mine_method_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_accessors.py", "file_name": "test_accessors.py", "file_type": "text/x-python", "category": "test", "start_line": 38, "end_line": 66, "span_ids": ["test_register", "test_accessor_works"], "tokens": 217}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"obj, registrar\",\n [\n (dd.Series, dd.extensions.register_series_accessor),\n (dd.DataFrame, dd.extensions.register_dataframe_accessor),\n (dd.Index, dd.extensions.register_index_accessor),\n ],\n)\ndef test_register(obj, registrar):\n with ensure_removed(obj, \"mine\"):\n before = set(dir(obj))\n registrar(\"mine\")(MyAccessor)\n instance = dd.from_pandas(obj._partition_type([], dtype=float), 2)\n assert instance.mine.prop == \"item\"\n after = set(dir(obj))\n assert (before ^ after) == {\"mine\"}\n assert \"mine\" in obj._accessors\n\n\ndef test_accessor_works():\n with ensure_removed(dd.Series, \"mine\"):\n dd.extensions.register_series_accessor(\"mine\")(MyAccessor)\n\n a = pd.Series([1, 2])\n b = dd.from_pandas(a, 2)\n assert b.mine.obj is b\n\n assert b.mine.prop == \"item\"\n assert b.mine.method() == \"item\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_accessors.py_df_ddf_df_ddf.return.df_ddf": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_accessors.py_df_ddf_df_ddf.return.df_ddf", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_accessors.py", "file_name": "test_accessors.py", "file_type": "text/x-python", "category": "test", "start_line": 69, "end_line": 90, "span_ids": ["df_ddf"], "tokens": 181}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.fixture\ndef df_ddf():\n import numpy as np\n\n df = pd.DataFrame(\n {\n \"str_col\": [\"abc\", \"bcd\", \"cdef\", \"DEFG\"],\n \"int_col\": [1, 2, 3, 4],\n \"dt_col\": np.array(\n [int(1e9), int(1.1e9), int(1.2e9), None], dtype=\"M8[ns]\"\n ),\n },\n index=[\"E\", \"f\", \"g\", \"h\"],\n )\n\n if dd._compat.PANDAS_GT_100:\n df[\"string_col\"] = df[\"str_col\"].astype(\"string\")\n df.loc[\"E\", \"string_col\"] = pd.NA\n\n ddf = dd.from_pandas(df, 2)\n\n return df, ddf", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_accessors.py_test_dt_accessor_test_dt_accessor_not_available.assert_dt_accessor_in_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_accessors.py_test_dt_accessor_test_dt_accessor_not_available.assert_dt_accessor_in_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_accessors.py", "file_name": "test_accessors.py", "file_type": "text/x-python", "category": "test", "start_line": 93, "end_line": 120, "span_ids": ["test_dt_accessor_not_available", "test_dt_accessor"], "tokens": 244}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_dt_accessor(df_ddf):\n df, ddf = df_ddf\n\n assert \"date\" in dir(ddf.dt_col.dt)\n\n # pandas loses Series.name via datetime accessor\n # see https://github.com/pydata/pandas/issues/10712\n assert_eq(ddf.dt_col.dt.date, df.dt_col.dt.date, check_names=False)\n\n # to_pydatetime returns a numpy array in pandas, but a Series in dask\n assert_eq(\n ddf.dt_col.dt.to_pydatetime(),\n pd.Series(df.dt_col.dt.to_pydatetime(), index=df.index, dtype=object),\n )\n\n assert set(ddf.dt_col.dt.date.dask) == set(ddf.dt_col.dt.date.dask)\n assert set(ddf.dt_col.dt.to_pydatetime().dask) == set(\n ddf.dt_col.dt.to_pydatetime().dask\n )\n\n\ndef test_dt_accessor_not_available(df_ddf):\n df, ddf = df_ddf\n\n # Not available on invalid dtypes\n with pytest.raises(AttributeError) as exc:\n ddf.str_col.dt\n assert \".dt accessor\" in str(exc.value)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_accessors.py_test_str_accessor_test_str_accessor.for_regex_in_True_False.assert_set_ddf_str_col_st": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_accessors.py_test_str_accessor_test_str_accessor.for_regex_in_True_False.assert_set_ddf_str_col_st", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_accessors.py", "file_name": "test_accessors.py", "file_type": "text/x-python", "category": "test", "start_line": 123, "end_line": 180, "span_ids": ["test_str_accessor"], "tokens": 596}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_str_accessor(df_ddf):\n df, ddf = df_ddf\n\n # implemented methods are present in tab completion\n assert \"upper\" in dir(ddf.str_col.str)\n if dd._compat.PANDAS_GT_100:\n assert \"upper\" in dir(ddf.string_col.str)\n assert \"upper\" in dir(ddf.index.str)\n\n # not implemented methods don't show up\n assert \"get_dummies\" not in dir(ddf.str_col.str)\n assert not hasattr(ddf.str_col.str, \"get_dummies\")\n\n # Test simple method on both series and index\n assert_eq(ddf.str_col.str.upper(), df.str_col.str.upper())\n assert set(ddf.str_col.str.upper().dask) == set(ddf.str_col.str.upper().dask)\n\n if dd._compat.PANDAS_GT_100:\n assert_eq(ddf.string_col.str.upper(), df.string_col.str.upper())\n assert set(ddf.string_col.str.upper().dask) == set(\n ddf.string_col.str.upper().dask\n )\n\n assert_eq(ddf.index.str.upper(), df.index.str.upper())\n assert set(ddf.index.str.upper().dask) == set(ddf.index.str.upper().dask)\n\n # make sure to pass through args & kwargs\n assert_eq(ddf.str_col.str.contains(\"a\"), df.str_col.str.contains(\"a\"))\n if dd._compat.PANDAS_GT_100:\n assert_eq(ddf.string_col.str.contains(\"a\"), df.string_col.str.contains(\"a\"))\n assert set(ddf.str_col.str.contains(\"a\").dask) == set(\n ddf.str_col.str.contains(\"a\").dask\n )\n\n assert_eq(\n ddf.str_col.str.contains(\"d\", case=False),\n df.str_col.str.contains(\"d\", case=False),\n )\n assert set(ddf.str_col.str.contains(\"d\", case=False).dask) == set(\n ddf.str_col.str.contains(\"d\", case=False).dask\n )\n\n for na in [True, False]:\n assert_eq(\n ddf.str_col.str.contains(\"a\", na=na), df.str_col.str.contains(\"a\", na=na)\n )\n assert set(ddf.str_col.str.contains(\"a\", na=na).dask) == set(\n ddf.str_col.str.contains(\"a\", na=na).dask\n )\n\n for regex in [True, False]:\n assert_eq(\n ddf.str_col.str.contains(\"a\", regex=regex),\n df.str_col.str.contains(\"a\", regex=regex),\n )\n assert set(ddf.str_col.str.contains(\"a\", regex=regex).dask) == set(\n ddf.str_col.str.contains(\"a\", regex=regex).dask\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_accessors.py_test_str_accessor_not_available_test_str_accessor_extractall.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_accessors.py_test_str_accessor_not_available_test_str_accessor_extractall.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_accessors.py", "file_name": "test_accessors.py", "file_type": "text/x-python", "category": "test", "start_line": 183, "end_line": 204, "span_ids": ["test_str_accessor_getitem", "test_str_accessor_not_available", "test_str_accessor_extractall"], "tokens": 180}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_str_accessor_not_available(df_ddf):\n df, ddf = df_ddf\n\n # Not available on invalid dtypes\n with pytest.raises(AttributeError) as exc:\n ddf.int_col.str\n assert \".str accessor\" in str(exc.value)\n\n assert \"str\" not in dir(ddf.int_col)\n\n\ndef test_str_accessor_getitem(df_ddf):\n df, ddf = df_ddf\n assert_eq(ddf.str_col.str[:2], df.str_col.str[:2])\n assert_eq(ddf.str_col.str[1], df.str_col.str[1])\n\n\ndef test_str_accessor_extractall(df_ddf):\n df, ddf = df_ddf\n assert_eq(\n ddf.str_col.str.extractall(\"(.*)b(.*)\"), df.str_col.str.extractall(\"(.*)b(.*)\")\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_accessors.py_test_str_accessor_cat_test_str_accessor_noexpand.assert_ds_str_split_n_1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_accessors.py_test_str_accessor_cat_test_str_accessor_noexpand.assert_ds_str_split_n_1_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_accessors.py", "file_name": "test_accessors.py", "file_type": "text/x-python", "category": "test", "start_line": 207, "end_line": 232, "span_ids": ["test_str_accessor_cat", "test_str_accessor_noexpand"], "tokens": 282}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_str_accessor_cat(df_ddf):\n df, ddf = df_ddf\n sol = df.str_col.str.cat(df.str_col.str.upper(), sep=\":\")\n assert_eq(ddf.str_col.str.cat(ddf.str_col.str.upper(), sep=\":\"), sol)\n assert_eq(ddf.str_col.str.cat(df.str_col.str.upper(), sep=\":\"), sol)\n assert_eq(\n ddf.str_col.str.cat([ddf.str_col.str.upper(), df.str_col.str.lower()], sep=\":\"),\n df.str_col.str.cat([df.str_col.str.upper(), df.str_col.str.lower()], sep=\":\"),\n )\n\n for o in [\"foo\", [\"foo\"]]:\n with pytest.raises(TypeError):\n ddf.str_col.str.cat(o)\n\n with pytest.raises(NotImplementedError):\n ddf.str_col.str.cat(sep=\":\")\n\n\ndef test_str_accessor_noexpand():\n s = pd.Series([\"a b c d\", \"aa bb cc dd\", \"aaa bbb ccc dddd\"], name=\"foo\")\n ds = dd.from_pandas(s, npartitions=2)\n\n for n in [1, 2, 3]:\n assert_eq(s.str.split(n=n, expand=False), ds.str.split(n=n, expand=False))\n\n assert ds.str.split(n=1, expand=False).name == \"foo\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_accessors.py_test_str_accessor_expand_test_str_accessor_expand.None_2.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_accessors.py_test_str_accessor_expand_test_str_accessor_expand.None_2.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_accessors.py", "file_name": "test_accessors.py", "file_type": "text/x-python", "category": "test", "start_line": 235, "end_line": 254, "span_ids": ["test_str_accessor_expand"], "tokens": 205}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_str_accessor_expand():\n s = pd.Series([\"a b c d\", \"aa bb cc dd\", \"aaa bbb ccc dddd\"])\n ds = dd.from_pandas(s, npartitions=2)\n\n for n in [1, 2, 3]:\n assert_eq(s.str.split(n=n, expand=True), ds.str.split(n=n, expand=True))\n\n with pytest.raises(NotImplementedError) as info:\n ds.str.split(expand=True)\n\n assert \"n=\" in str(info.value)\n\n s = pd.Series([\"a,bcd,zz,f\", \"aabb,ccdd,z,kk\", \"aaabbb,cccdddd,l,pp\"])\n ds = dd.from_pandas(s, npartitions=2)\n\n for n in [1, 2, 3]:\n assert_eq(\n s.str.split(pat=\",\", n=n, expand=True),\n ds.str.split(pat=\",\", n=n, expand=True),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_accessors.py_test_str_accessor_expand_more_columns_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_accessors.py_test_str_accessor_expand_more_columns_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_accessors.py", "file_name": "test_accessors.py", "file_type": "text/x-python", "category": "test", "start_line": 257, "end_line": 275, "span_ids": ["test_str_accessor_expand_more_columns", "test_string_nullable_types"], "tokens": 209}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.xfail(reason=\"Need to pad columns\")\ndef test_str_accessor_expand_more_columns():\n s = pd.Series([\"a b c d\", \"aa\", \"aaa bbb ccc dddd\"])\n ds = dd.from_pandas(s, npartitions=2)\n\n assert_eq(s.str.split(n=3, expand=True), ds.str.split(n=3, expand=True))\n\n s = pd.Series([\"a b c\", \"aa bb cc\", \"aaa bbb ccc\"])\n ds = dd.from_pandas(s, npartitions=2)\n\n ds.str.split(n=10, expand=True).compute()\n\n\n@pytest.mark.skipif(not dd._compat.PANDAS_GT_100, reason=\"No StringDtype\")\ndef test_string_nullable_types(df_ddf):\n df, ddf = df_ddf\n assert_eq(ddf.string_col.str.count(\"A\"), df.string_col.str.count(\"A\"))\n assert_eq(ddf.string_col.str.isalpha(), df.string_col.str.isalpha())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_from_datetime_import_date_test_arithmetics._Arithmetics": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_from_datetime_import_date_test_arithmetics._Arithmetics", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 44, "span_ids": ["imports", "test_arithmetics"], "tokens": 643}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from datetime import datetime\nimport warnings\n\nimport pytest\nimport numpy as np\nimport pandas as pd\nfrom pandas.api.types import is_datetime64_ns_dtype\n\nimport dask.dataframe as dd\nfrom dask.dataframe._compat import PANDAS_GT_100, PANDAS_VERSION\nfrom dask.dataframe.utils import (\n assert_eq,\n assert_dask_graph,\n make_meta,\n HAS_INT_NA,\n PANDAS_GT_0250,\n)\n\n\n@pytest.mark.slow\ndef test_arithmetics():\n dsk = {\n (\"x\", 0): pd.DataFrame({\"a\": [1, 2, 3], \"b\": [4, 5, 6]}, index=[0, 1, 3]),\n (\"x\", 1): pd.DataFrame({\"a\": [4, 5, 6], \"b\": [3, 2, 1]}, index=[5, 6, 8]),\n (\"x\", 2): pd.DataFrame({\"a\": [7, 8, 9], \"b\": [0, 0, 0]}, index=[9, 9, 9]),\n }\n meta = make_meta({\"a\": \"i8\", \"b\": \"i8\"}, index=pd.Index([], \"i8\"))\n ddf1 = dd.DataFrame(dsk, \"x\", meta, [0, 4, 9, 9])\n pdf1 = ddf1.compute()\n\n pdf2 = pd.DataFrame({\"a\": [1, 2, 3, 4, 5, 6, 7, 8], \"b\": [5, 6, 7, 8, 1, 2, 3, 4]})\n pdf3 = pd.DataFrame({\"a\": [5, 6, 7, 8, 4, 3, 2, 1], \"b\": [2, 4, 5, 3, 4, 2, 1, 0]})\n ddf2 = dd.from_pandas(pdf2, 3)\n ddf3 = dd.from_pandas(pdf3, 2)\n\n dsk4 = {\n (\"y\", 0): pd.DataFrame({\"a\": [3, 2, 1], \"b\": [7, 8, 9]}, index=[0, 1, 3]),\n (\"y\", 1): pd.DataFrame({\"a\": [5, 2, 8], \"b\": [4, 2, 3]}, index=[5, 6, 8]),\n (\"y\", 2): pd.DataFrame({\"a\": [1, 4, 10], \"b\": [1, 0, 5]}, index=[9, 9, 9]),\n }\n ddf4 = dd.DataFrame(dsk4, \"y\", meta, [0, 4, 9, 9])\n pdf4 = ddf4.compute()\n\n # Arithmetics\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_arithmetics.cases_test_arithmetics.ddf8.dd_from_pandas_pdf8_4_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_arithmetics.cases_test_arithmetics.ddf8.dd_from_pandas_pdf8_4_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 45, "end_line": 86, "span_ids": ["test_arithmetics"], "tokens": 728}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.slow\ndef test_arithmetics():\n # ... other code\n cases = [\n (ddf1, ddf1, pdf1, pdf1),\n (ddf1, ddf1.repartition([0, 1, 3, 6, 9]), pdf1, pdf1),\n (ddf2, ddf3, pdf2, pdf3),\n (ddf2.repartition([0, 3, 6, 7]), ddf3.repartition([0, 7]), pdf2, pdf3),\n (ddf2.repartition([0, 7]), ddf3.repartition([0, 2, 4, 5, 7]), pdf2, pdf3),\n (ddf1, ddf4, pdf1, pdf4),\n (ddf1, ddf4.repartition([0, 9]), pdf1, pdf4),\n (ddf1.repartition([0, 3, 9]), ddf4.repartition([0, 5, 9]), pdf1, pdf4),\n # dask + pandas\n (ddf1, pdf4, pdf1, pdf4),\n (ddf2, pdf3, pdf2, pdf3),\n ]\n\n for (l, r, el, er) in cases:\n check_series_arithmetics(l.a, r.b, el.a, er.b)\n check_frame_arithmetics(l, r, el, er)\n\n # different index, pandas raises ValueError in comparison ops\n\n pdf5 = pd.DataFrame(\n {\"a\": [3, 2, 1, 5, 2, 8, 1, 4, 10], \"b\": [7, 8, 9, 4, 2, 3, 1, 0, 5]},\n index=[0, 1, 3, 5, 6, 8, 9, 9, 9],\n )\n ddf5 = dd.from_pandas(pdf5, 2)\n\n pdf6 = pd.DataFrame(\n {\"a\": [3, 2, 1, 5, 2, 8, 1, 4, 10], \"b\": [7, 8, 9, 5, 7, 8, 4, 2, 5]},\n index=[0, 1, 2, 3, 4, 5, 6, 7, 9],\n )\n ddf6 = dd.from_pandas(pdf6, 4)\n\n pdf7 = pd.DataFrame(\n {\"a\": [1, 2, 3, 4, 5, 6, 7, 8], \"b\": [5, 6, 7, 8, 1, 2, 3, 4]},\n index=list(\"aaabcdeh\"),\n )\n pdf8 = pd.DataFrame(\n {\"a\": [5, 6, 7, 8, 4, 3, 2, 1], \"b\": [2, 4, 5, 3, 4, 2, 1, 0]},\n index=list(\"abcdefgh\"),\n )\n ddf7 = dd.from_pandas(pdf7, 3)\n ddf8 = dd.from_pandas(pdf8, 4)\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_arithmetics.pdf9_test_arithmetics.None_1.check_frame_arithmetics_l": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_arithmetics.pdf9_test_arithmetics.None_1.check_frame_arithmetics_l", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 88, "end_line": 130, "span_ids": ["test_arithmetics"], "tokens": 587}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.slow\ndef test_arithmetics():\n # ... other code\n\n pdf9 = pd.DataFrame(\n {\n \"a\": [1, 2, 3, 4, 5, 6, 7, 8],\n \"b\": [5, 6, 7, 8, 1, 2, 3, 4],\n \"c\": [5, 6, 7, 8, 1, 2, 3, 4],\n },\n index=list(\"aaabcdeh\"),\n )\n pdf10 = pd.DataFrame(\n {\n \"b\": [5, 6, 7, 8, 4, 3, 2, 1],\n \"c\": [2, 4, 5, 3, 4, 2, 1, 0],\n \"d\": [2, 4, 5, 3, 4, 2, 1, 0],\n },\n index=list(\"abcdefgh\"),\n )\n ddf9 = dd.from_pandas(pdf9, 3)\n ddf10 = dd.from_pandas(pdf10, 4)\n\n # Arithmetics with different index\n cases = [\n (ddf5, ddf6, pdf5, pdf6),\n (ddf5.repartition([0, 9]), ddf6, pdf5, pdf6),\n (ddf5.repartition([0, 5, 9]), ddf6.repartition([0, 7, 9]), pdf5, pdf6),\n (ddf7, ddf8, pdf7, pdf8),\n (ddf7.repartition([\"a\", \"c\", \"h\"]), ddf8.repartition([\"a\", \"h\"]), pdf7, pdf8),\n (\n ddf7.repartition([\"a\", \"b\", \"e\", \"h\"]),\n ddf8.repartition([\"a\", \"e\", \"h\"]),\n pdf7,\n pdf8,\n ),\n (ddf9, ddf10, pdf9, pdf10),\n (ddf9.repartition([\"a\", \"c\", \"h\"]), ddf10.repartition([\"a\", \"h\"]), pdf9, pdf10),\n # dask + pandas\n (ddf5, pdf6, pdf5, pdf6),\n (ddf7, pdf8, pdf7, pdf8),\n (ddf9, pdf10, pdf9, pdf10),\n ]\n\n for (l, r, el, er) in cases:\n check_series_arithmetics(l.a, r.b, el.a, er.b, allow_comparison_ops=False)\n check_frame_arithmetics(l, r, el, er, allow_comparison_ops=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_deterministic_arithmetic_names_test_deterministic_arithmetic_names.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_deterministic_arithmetic_names_test_deterministic_arithmetic_names.None_2", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 133, "end_line": 139, "span_ids": ["test_deterministic_arithmetic_names"], "tokens": 152}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_deterministic_arithmetic_names():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4], \"y\": [5, 6, 7, 8]})\n a = dd.from_pandas(df, npartitions=2)\n\n assert sorted((a.x + a.y ** 2).dask) == sorted((a.x + a.y ** 2).dask)\n assert sorted((a.x + a.y ** 2).dask) != sorted((a.x + a.y ** 3).dask)\n assert sorted((a.x + a.y ** 2).dask) != sorted((a.x - a.y ** 2).dask)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_arithmetics_different_index_test_arithmetics_different_index.ddf6.dd_from_pandas_pdf6_2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_arithmetics_different_index_test_arithmetics_different_index.ddf6.dd_from_pandas_pdf6_2_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 142, "end_line": 172, "span_ids": ["test_arithmetics_different_index"], "tokens": 507}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.slow\ndef test_arithmetics_different_index():\n # index are different, but overwraps\n pdf1 = pd.DataFrame(\n {\"a\": [1, 2, 3, 4, 5], \"b\": [3, 5, 2, 5, 7]}, index=[1, 2, 3, 4, 5]\n )\n ddf1 = dd.from_pandas(pdf1, 2)\n pdf2 = pd.DataFrame(\n {\"a\": [3, 2, 6, 7, 8], \"b\": [9, 4, 2, 6, 2]}, index=[3, 4, 5, 6, 7]\n )\n ddf2 = dd.from_pandas(pdf2, 2)\n\n # index are not overwrapped\n pdf3 = pd.DataFrame(\n {\"a\": [1, 2, 3, 4, 5], \"b\": [3, 5, 2, 5, 7]}, index=[1, 2, 3, 4, 5]\n )\n ddf3 = dd.from_pandas(pdf3, 2)\n pdf4 = pd.DataFrame(\n {\"a\": [3, 2, 6, 7, 8], \"b\": [9, 4, 2, 6, 2]}, index=[10, 11, 12, 13, 14]\n )\n ddf4 = dd.from_pandas(pdf4, 2)\n\n # index is included in another\n pdf5 = pd.DataFrame(\n {\"a\": [1, 2, 3, 4, 5], \"b\": [3, 5, 2, 5, 7]}, index=[1, 3, 5, 7, 9]\n )\n ddf5 = dd.from_pandas(pdf5, 2)\n pdf6 = pd.DataFrame(\n {\"a\": [3, 2, 6, 7, 8], \"b\": [9, 4, 2, 6, 2]}, index=[2, 3, 4, 5, 6]\n )\n ddf6 = dd.from_pandas(pdf6, 2)\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_arithmetics_different_index.cases_test_arithmetics_different_index.ddf8.dd_from_pandas_pdf8_2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_arithmetics_different_index.cases_test_arithmetics_different_index.ddf8.dd_from_pandas_pdf8_2_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 174, "end_line": 214, "span_ids": ["test_arithmetics_different_index"], "tokens": 718}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.slow\ndef test_arithmetics_different_index():\n # ... other code\n\n cases = [\n (ddf1, ddf2, pdf1, pdf2),\n (ddf2, ddf1, pdf2, pdf1),\n (ddf1.repartition([1, 3, 5]), ddf2.repartition([3, 4, 7]), pdf1, pdf2),\n (ddf2.repartition([3, 4, 5, 7]), ddf1.repartition([1, 2, 4, 5]), pdf2, pdf1),\n (ddf3, ddf4, pdf3, pdf4),\n (ddf4, ddf3, pdf4, pdf3),\n (\n ddf3.repartition([1, 2, 3, 4, 5]),\n ddf4.repartition([10, 11, 12, 13, 14]),\n pdf3,\n pdf4,\n ),\n (ddf4.repartition([10, 14]), ddf3.repartition([1, 3, 4, 5]), pdf4, pdf3),\n (ddf5, ddf6, pdf5, pdf6),\n (ddf6, ddf5, pdf6, pdf5),\n (ddf5.repartition([1, 7, 8, 9]), ddf6.repartition([2, 3, 4, 6]), pdf5, pdf6),\n (ddf6.repartition([2, 6]), ddf5.repartition([1, 3, 7, 9]), pdf6, pdf5),\n # dask + pandas\n (ddf1, pdf2, pdf1, pdf2),\n (ddf2, pdf1, pdf2, pdf1),\n (ddf3, pdf4, pdf3, pdf4),\n (ddf4, pdf3, pdf4, pdf3),\n (ddf5, pdf6, pdf5, pdf6),\n (ddf6, pdf5, pdf6, pdf5),\n ]\n\n for (l, r, el, er) in cases:\n check_series_arithmetics(l.a, r.b, el.a, er.b, allow_comparison_ops=False)\n check_frame_arithmetics(l, r, el, er, allow_comparison_ops=False)\n\n pdf7 = pd.DataFrame(\n {\"a\": [1, 2, 3, 4, 5, 6, 7, 8], \"b\": [5, 6, 7, 8, 1, 2, 3, 4]},\n index=[0, 2, 4, 8, 9, 10, 11, 13],\n )\n pdf8 = pd.DataFrame(\n {\"a\": [5, 6, 7, 8, 4, 3, 2, 1], \"b\": [2, 4, 5, 3, 4, 2, 1, 0]},\n index=[1, 3, 4, 8, 9, 11, 12, 13],\n )\n ddf7 = dd.from_pandas(pdf7, 3)\n ddf8 = dd.from_pandas(pdf8, 2)\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_arithmetics_different_index.pdf9_test_arithmetics_different_index.None_1.check_frame_arithmetics_l": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_arithmetics_different_index.pdf9_test_arithmetics_different_index.None_1.check_frame_arithmetics_l", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 216, "end_line": 262, "span_ids": ["test_arithmetics_different_index"], "tokens": 618}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.slow\ndef test_arithmetics_different_index():\n # ... other code\n\n pdf9 = pd.DataFrame(\n {\"a\": [1, 2, 3, 4, 5, 6, 7, 8], \"b\": [5, 6, 7, 8, 1, 2, 3, 4]},\n index=[0, 2, 4, 8, 9, 10, 11, 13],\n )\n pdf10 = pd.DataFrame(\n {\"a\": [5, 6, 7, 8, 4, 3, 2, 1], \"b\": [2, 4, 5, 3, 4, 2, 1, 0]},\n index=[0, 3, 4, 8, 9, 11, 12, 13],\n )\n ddf9 = dd.from_pandas(pdf9, 3)\n ddf10 = dd.from_pandas(pdf10, 2)\n\n cases = [\n (ddf7, ddf8, pdf7, pdf8),\n (ddf8, ddf7, pdf8, pdf7),\n # (ddf7.repartition([0, 13]),\n # ddf8.repartition([0, 4, 11, 14], force=True),\n # pdf7, pdf8),\n (\n ddf8.repartition([-5, 10, 15], force=True),\n ddf7.repartition([-1, 4, 11, 14], force=True),\n pdf8,\n pdf7,\n ),\n (\n ddf7.repartition([0, 8, 12, 13]),\n ddf8.repartition([0, 2, 8, 12, 13], force=True),\n pdf7,\n pdf8,\n ),\n (\n ddf8.repartition([-5, 0, 10, 20], force=True),\n ddf7.repartition([-1, 4, 11, 13], force=True),\n pdf8,\n pdf7,\n ),\n (ddf9, ddf10, pdf9, pdf10),\n (ddf10, ddf9, pdf10, pdf9),\n # dask + pandas\n (ddf7, pdf8, pdf7, pdf8),\n (ddf8, pdf7, pdf8, pdf7),\n (ddf9, pdf10, pdf9, pdf10),\n (ddf10, pdf9, pdf10, pdf9),\n ]\n\n for (l, r, el, er) in cases:\n check_series_arithmetics(l.a, r.b, el.a, er.b, allow_comparison_ops=False)\n check_frame_arithmetics(l, r, el, er, allow_comparison_ops=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_check_series_arithmetics_check_series_arithmetics.None_1.assert_eq_l_r_el": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_check_series_arithmetics_check_series_arithmetics.None_1.assert_eq_l_r_el", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 265, "end_line": 347, "span_ids": ["check_series_arithmetics"], "tokens": 880}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def check_series_arithmetics(l, r, el, er, allow_comparison_ops=True):\n assert isinstance(l, dd.Series)\n assert isinstance(r, (dd.Series, pd.Series))\n assert isinstance(el, pd.Series)\n assert isinstance(er, pd.Series)\n\n # l, r may be repartitioned, test whether repartition keeps original data\n assert_eq(l, el)\n assert_eq(r, er)\n\n assert_eq(l + r, el + er)\n assert_eq(l * r, el * er)\n assert_eq(l - r, el - er)\n assert_eq(l / r, el / er)\n assert_eq(l // r, el // er)\n assert_eq(l ** r, el ** er)\n assert_eq(l % r, el % er)\n\n if allow_comparison_ops:\n # comparison is allowed if data have same index\n assert_eq(l & r, el & er)\n assert_eq(l | r, el | er)\n assert_eq(l ^ r, el ^ er)\n assert_eq(l > r, el > er)\n assert_eq(l < r, el < er)\n assert_eq(l >= r, el >= er)\n assert_eq(l <= r, el <= er)\n assert_eq(l == r, el == er)\n assert_eq(l != r, el != er)\n assert_eq(l.lt(r), el.lt(er))\n assert_eq(l.gt(r), el.gt(er))\n assert_eq(l.le(r), el.le(er))\n assert_eq(l.ge(r), el.ge(er))\n assert_eq(l.ne(r), el.ne(er))\n assert_eq(l.eq(r), el.eq(er))\n\n assert_eq(l + 2, el + 2)\n assert_eq(l * 2, el * 2)\n assert_eq(l - 2, el - 2)\n assert_eq(l / 2, el / 2)\n assert_eq(l & True, el & True)\n assert_eq(l | True, el | True)\n assert_eq(l ^ True, el ^ True)\n assert_eq(l // 2, el // 2)\n assert_eq(l ** 2, el ** 2)\n assert_eq(l % 2, el % 2)\n assert_eq(l > 2, el > 2)\n assert_eq(l < 2, el < 2)\n assert_eq(l >= 2, el >= 2)\n assert_eq(l <= 2, el <= 2)\n assert_eq(l == 2, el == 2)\n assert_eq(l != 2, el != 2)\n\n assert_eq(2 + r, 2 + er)\n assert_eq(2 * r, 2 * er)\n assert_eq(2 - r, 2 - er)\n assert_eq(2 / r, 2 / er)\n assert_eq(True & r, True & er)\n assert_eq(True | r, True | er)\n assert_eq(True ^ r, True ^ er)\n assert_eq(2 // r, 2 // er)\n assert_eq(2 ** r, 2 ** er)\n assert_eq(2 % r, 2 % er)\n assert_eq(2 > r, 2 > er)\n assert_eq(2 < r, 2 < er)\n assert_eq(2 >= r, 2 >= er)\n assert_eq(2 <= r, 2 <= er)\n assert_eq(2 == r, 2 == er)\n assert_eq(2 != r, 2 != er)\n\n assert_eq(l.lt(2), el.lt(2))\n assert_eq(l.gt(2), el.gt(2))\n assert_eq(l.le(2), el.le(2))\n assert_eq(l.ge(2), el.ge(2))\n assert_eq(l.ne(2), el.ne(2))\n assert_eq(l.eq(2), el.eq(2))\n\n assert_eq(-l, -el)\n assert_eq(abs(l), abs(el))\n\n if allow_comparison_ops:\n # comparison is allowed if data have same index\n assert_eq(~(l == r), ~(el == er))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_check_frame_arithmetics_check_frame_arithmetics.None_1.assert_eq_l_r_el": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_check_frame_arithmetics_check_frame_arithmetics.None_1.assert_eq_l_r_el", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 350, "end_line": 431, "span_ids": ["check_frame_arithmetics"], "tokens": 880}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def check_frame_arithmetics(l, r, el, er, allow_comparison_ops=True):\n assert isinstance(l, dd.DataFrame)\n assert isinstance(r, (dd.DataFrame, pd.DataFrame))\n assert isinstance(el, pd.DataFrame)\n assert isinstance(er, pd.DataFrame)\n # l, r may be repartitioned, test whether repartition keeps original data\n assert_eq(l, el)\n assert_eq(r, er)\n\n assert_eq(l + r, el + er)\n assert_eq(l * r, el * er)\n assert_eq(l - r, el - er)\n assert_eq(l / r, el / er)\n assert_eq(l // r, el // er)\n assert_eq(l ** r, el ** er)\n assert_eq(l % r, el % er)\n\n if allow_comparison_ops:\n # comparison is allowed if data have same index\n assert_eq(l & r, el & er)\n assert_eq(l | r, el | er)\n assert_eq(l ^ r, el ^ er)\n assert_eq(l > r, el > er)\n assert_eq(l < r, el < er)\n assert_eq(l >= r, el >= er)\n assert_eq(l <= r, el <= er)\n assert_eq(l == r, el == er)\n assert_eq(l != r, el != er)\n assert_eq(l.lt(r), el.lt(er))\n assert_eq(l.gt(r), el.gt(er))\n assert_eq(l.le(r), el.le(er))\n assert_eq(l.ge(r), el.ge(er))\n assert_eq(l.ne(r), el.ne(er))\n assert_eq(l.eq(r), el.eq(er))\n\n assert_eq(l + 2, el + 2)\n assert_eq(l * 2, el * 2)\n assert_eq(l - 2, el - 2)\n assert_eq(l / 2, el / 2)\n assert_eq(l & True, el & True)\n assert_eq(l | True, el | True)\n assert_eq(l ^ True, el ^ True)\n assert_eq(l // 2, el // 2)\n assert_eq(l ** 2, el ** 2)\n assert_eq(l % 2, el % 2)\n assert_eq(l > 2, el > 2)\n assert_eq(l < 2, el < 2)\n assert_eq(l >= 2, el >= 2)\n assert_eq(l <= 2, el <= 2)\n assert_eq(l == 2, el == 2)\n assert_eq(l != 2, el != 2)\n\n assert_eq(2 + l, 2 + el)\n assert_eq(2 * l, 2 * el)\n assert_eq(2 - l, 2 - el)\n assert_eq(2 / l, 2 / el)\n assert_eq(True & l, True & el)\n assert_eq(True | l, True | el)\n assert_eq(True ^ l, True ^ el)\n assert_eq(2 // l, 2 // el)\n assert_eq(2 ** l, 2 ** el)\n assert_eq(2 % l, 2 % el)\n assert_eq(2 > l, 2 > el)\n assert_eq(2 < l, 2 < el)\n assert_eq(2 >= l, 2 >= el)\n assert_eq(2 <= l, 2 <= el)\n assert_eq(2 == l, 2 == el)\n assert_eq(2 != l, 2 != el)\n\n assert_eq(l.lt(2), el.lt(2))\n assert_eq(l.gt(2), el.gt(2))\n assert_eq(l.le(2), el.le(2))\n assert_eq(l.ge(2), el.ge(2))\n assert_eq(l.ne(2), el.ne(2))\n assert_eq(l.eq(2), el.eq(2))\n\n assert_eq(-l, -el)\n assert_eq(abs(l), abs(el))\n\n if allow_comparison_ops:\n # comparison is allowed if data have same index\n assert_eq(~(l == r), ~(el == er))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_scalar_arithmetics_test_scalar_arithmetics.assert_eq_l_r_el": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_scalar_arithmetics_test_scalar_arithmetics.assert_eq_l_r_el", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 434, "end_line": 501, "span_ids": ["test_scalar_arithmetics"], "tokens": 717}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_scalar_arithmetics():\n el = np.int64(10)\n er = np.int64(4)\n l = dd.core.Scalar({(\"l\", 0): el}, \"l\", \"i8\")\n r = dd.core.Scalar({(\"r\", 0): er}, \"r\", \"i8\")\n\n assert isinstance(l, dd.core.Scalar)\n assert isinstance(r, dd.core.Scalar)\n\n assert_eq(l, el)\n assert_eq(r, er)\n\n assert_eq(l + r, el + er)\n assert_eq(l * r, el * er)\n assert_eq(l - r, el - er)\n assert_eq(l / r, el / er)\n assert_eq(l // r, el // er)\n assert_eq(l ** r, el ** er)\n assert_eq(l % r, el % er)\n\n assert_eq(l & r, el & er)\n assert_eq(l | r, el | er)\n assert_eq(l ^ r, el ^ er)\n assert_eq(l > r, el > er)\n assert_eq(l < r, el < er)\n assert_eq(l >= r, el >= er)\n assert_eq(l <= r, el <= er)\n assert_eq(l == r, el == er)\n assert_eq(l != r, el != er)\n\n assert_eq(l + 2, el + 2)\n assert_eq(l * 2, el * 2)\n assert_eq(l - 2, el - 2)\n assert_eq(l / 2, el / 2)\n assert_eq(l & True, el & True)\n assert_eq(l | True, el | True)\n assert_eq(l ^ True, el ^ True)\n assert_eq(l // 2, el // 2)\n assert_eq(l ** 2, el ** 2)\n assert_eq(l % 2, el % 2)\n assert_eq(l > 2, el > 2)\n assert_eq(l < 2, el < 2)\n assert_eq(l >= 2, el >= 2)\n assert_eq(l <= 2, el <= 2)\n assert_eq(l == 2, el == 2)\n assert_eq(l != 2, el != 2)\n\n assert_eq(2 + r, 2 + er)\n assert_eq(2 * r, 2 * er)\n assert_eq(2 - r, 2 - er)\n assert_eq(2 / r, 2 / er)\n assert_eq(True & r, True & er)\n assert_eq(True | r, True | er)\n assert_eq(True ^ r, True ^ er)\n assert_eq(2 // r, 2 // er)\n assert_eq(2 ** r, 2 ** er)\n assert_eq(2 % r, 2 % er)\n assert_eq(2 > r, 2 > er)\n assert_eq(2 < r, 2 < er)\n assert_eq(2 >= r, 2 >= er)\n assert_eq(2 <= r, 2 <= er)\n assert_eq(2 == r, 2 == er)\n assert_eq(2 != r, 2 != er)\n\n assert_eq(-l, -el)\n assert_eq(abs(l), abs(el))\n\n assert_eq(~(l == r), ~(el == er))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_scalar_arithmetics_with_dask_instances_test_scalar_arithmetics_with_dask_instances.None_7": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_scalar_arithmetics_with_dask_instances_test_scalar_arithmetics_with_dask_instances.None_7", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 504, "end_line": 548, "span_ids": ["test_scalar_arithmetics_with_dask_instances"], "tokens": 428}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_scalar_arithmetics_with_dask_instances():\n s = dd.core.Scalar({(\"s\", 0): 10}, \"s\", \"i8\")\n e = 10\n\n pds = pd.Series([1, 2, 3, 4, 5, 6, 7])\n dds = dd.from_pandas(pds, 2)\n\n pdf = pd.DataFrame({\"a\": [1, 2, 3, 4, 5, 6, 7], \"b\": [7, 6, 5, 4, 3, 2, 1]})\n ddf = dd.from_pandas(pdf, 2)\n\n # pandas Series\n result = pds + s # this result pd.Series (automatically computed)\n assert isinstance(result, pd.Series)\n assert_eq(result, pds + e)\n\n result = s + pds # this result dd.Series\n assert isinstance(result, dd.Series)\n assert_eq(result, pds + e)\n\n # dask Series\n result = dds + s # this result dd.Series\n assert isinstance(result, dd.Series)\n assert_eq(result, pds + e)\n\n result = s + dds # this result dd.Series\n assert isinstance(result, dd.Series)\n assert_eq(result, pds + e)\n\n # pandas DataFrame\n result = pdf + s # this result pd.DataFrame (automatically computed)\n assert isinstance(result, pd.DataFrame)\n assert_eq(result, pdf + e)\n\n result = s + pdf # this result dd.DataFrame\n assert isinstance(result, dd.DataFrame)\n assert_eq(result, pdf + e)\n\n # dask DataFrame\n result = ddf + s # this result dd.DataFrame\n assert isinstance(result, dd.DataFrame)\n assert_eq(result, pdf + e)\n\n result = s + ddf # this result dd.DataFrame\n assert isinstance(result, dd.DataFrame)\n assert_eq(result, pdf + e)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_frame_series_arithmetic_methods_test_frame_series_arithmetic_methods.s.dd_core_Scalar_s_0_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_frame_series_arithmetic_methods_test_frame_series_arithmetic_methods.s.dd_core_Scalar_s_0_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 551, "end_line": 578, "span_ids": ["test_frame_series_arithmetic_methods"], "tokens": 270}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.xfail(\n PANDAS_VERSION == \"1.0.2\",\n reason=\"https://github.com/pandas-dev/pandas/issues/32685\",\n)\ndef test_frame_series_arithmetic_methods():\n pdf1 = pd.DataFrame(\n {\n \"A\": np.arange(10),\n \"B\": [np.nan, 1, 2, 3, 4] * 2,\n \"C\": [np.nan] * 10,\n \"D\": np.arange(10),\n },\n index=list(\"abcdefghij\"),\n columns=list(\"ABCD\"),\n )\n pdf2 = pd.DataFrame(\n np.random.randn(10, 4), index=list(\"abcdefghjk\"), columns=list(\"ABCX\")\n )\n ps1 = pdf1.A\n ps2 = pdf2.A\n ps3 = pd.Series(np.random.randn(10), index=list(\"ABCDXabcde\"))\n\n ddf1 = dd.from_pandas(pdf1, 2)\n ddf2 = dd.from_pandas(pdf2, 2)\n ds1 = ddf1.A\n ds2 = ddf2.A\n\n s = dd.core.Scalar({(\"s\", 0): 4}, \"s\", \"i8\")\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_frame_series_arithmetic_methods.for_l_r_el_er_in__test_frame_series_arithmetic_methods.for_l_r_el_er_in_.None_1.assert_eq_l_rmod_r_fill_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_frame_series_arithmetic_methods.for_l_r_el_er_in__test_frame_series_arithmetic_methods.for_l_r_el_er_in_.None_1.assert_eq_l_rmod_r_fill_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 580, "end_line": 622, "span_ids": ["test_frame_series_arithmetic_methods"], "tokens": 755}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.xfail(\n PANDAS_VERSION == \"1.0.2\",\n reason=\"https://github.com/pandas-dev/pandas/issues/32685\",\n)\ndef test_frame_series_arithmetic_methods():\n # ... other code\n\n for l, r, el, er in [\n (ddf1, ddf2, pdf1, pdf2),\n (ds1, ds2, ps1, ps2),\n (ddf1.repartition([\"a\", \"f\", \"j\"]), ddf2, pdf1, pdf2),\n (ds1.repartition([\"a\", \"b\", \"f\", \"j\"]), ds2, ps1, ps2),\n (ddf1, ddf2.repartition([\"a\", \"k\"]), pdf1, pdf2),\n (ds1, ds2.repartition([\"a\", \"b\", \"d\", \"h\", \"k\"]), ps1, ps2),\n (ddf1, 3, pdf1, 3),\n (ds1, 3, ps1, 3),\n (ddf1, s, pdf1, 4),\n (ds1, s, ps1, 4),\n ]:\n # l, r may be repartitioned, test whether repartition keeps original data\n assert_eq(l, el)\n assert_eq(r, er)\n\n assert_eq(l.add(r, fill_value=0), el.add(er, fill_value=0))\n assert_eq(l.sub(r, fill_value=0), el.sub(er, fill_value=0))\n assert_eq(l.mul(r, fill_value=0), el.mul(er, fill_value=0))\n with warnings.catch_warnings():\n # pandas-26793\n warnings.simplefilter(\"ignore\", RuntimeWarning)\n assert_eq(l.div(r, fill_value=0), el.div(er, fill_value=0))\n assert_eq(l.divide(r, fill_value=0), el.divide(er, fill_value=0))\n assert_eq(l.truediv(r, fill_value=0), el.truediv(er, fill_value=0))\n assert_eq(l.floordiv(r, fill_value=1), el.floordiv(er, fill_value=1))\n assert_eq(l.pow(r, fill_value=0), el.pow(er, fill_value=0))\n assert_eq(l.mod(r, fill_value=0), el.mod(er, fill_value=0))\n\n assert_eq(l.radd(r, fill_value=0), el.radd(er, fill_value=0))\n assert_eq(l.rsub(r, fill_value=0), el.rsub(er, fill_value=0))\n assert_eq(l.rmul(r, fill_value=0), el.rmul(er, fill_value=0))\n with warnings.catch_warnings():\n # pandas-26793\n warnings.simplefilter(\"ignore\", RuntimeWarning)\n\n assert_eq(l.rdiv(r, fill_value=0), el.rdiv(er, fill_value=0))\n assert_eq(l.rtruediv(r, fill_value=0), el.rtruediv(er, fill_value=0))\n if not PANDAS_GT_0250:\n # https://github.com/pandas-dev/pandas/issues/27464\n assert_eq(l.rfloordiv(r, fill_value=1), el.rfloordiv(er, fill_value=1))\n assert_eq(l.rpow(r, fill_value=0), el.rpow(er, fill_value=0))\n assert_eq(l.rmod(r, fill_value=0), el.rmod(er, fill_value=0))\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_frame_series_arithmetic_methods.for_l_r_el_er_in_ddf_test_frame_series_arithmetic_methods.for_l_r_el_er_in_ddf.pytest_raises_ValueError_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_frame_series_arithmetic_methods.for_l_r_el_er_in_ddf_test_frame_series_arithmetic_methods.for_l_r_el_er_in_ddf.pytest_raises_ValueError_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 624, "end_line": 651, "span_ids": ["test_frame_series_arithmetic_methods"], "tokens": 536}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.xfail(\n PANDAS_VERSION == \"1.0.2\",\n reason=\"https://github.com/pandas-dev/pandas/issues/32685\",\n)\ndef test_frame_series_arithmetic_methods():\n # ... other code\n\n for l, r, el, er in [(ddf1, ds2, pdf1, ps2), (ddf1, ddf2.X, pdf1, pdf2.X)]:\n assert_eq(l, el)\n assert_eq(r, er)\n\n # must specify axis=0 to add Series to each column\n # axis=1 is not supported (add to each row)\n assert_eq(l.add(r, axis=0), el.add(er, axis=0))\n assert_eq(l.sub(r, axis=0), el.sub(er, axis=0))\n assert_eq(l.mul(r, axis=0), el.mul(er, axis=0))\n assert_eq(l.div(r, axis=0), el.div(er, axis=0))\n assert_eq(l.divide(r, axis=0), el.divide(er, axis=0))\n assert_eq(l.truediv(r, axis=0), el.truediv(er, axis=0))\n assert_eq(l.floordiv(r, axis=0), el.floordiv(er, axis=0))\n assert_eq(l.mod(r, axis=0), el.mod(er, axis=0))\n assert_eq(l.pow(r, axis=0), el.pow(er, axis=0))\n\n assert_eq(l.radd(r, axis=0), el.radd(er, axis=0))\n assert_eq(l.rsub(r, axis=0), el.rsub(er, axis=0))\n assert_eq(l.rmul(r, axis=0), el.rmul(er, axis=0))\n assert_eq(l.rdiv(r, axis=0), el.rdiv(er, axis=0))\n assert_eq(l.rtruediv(r, axis=0), el.rtruediv(er, axis=0))\n if not PANDAS_GT_0250:\n # https://github.com/pandas-dev/pandas/issues/27464\n assert_eq(l.rfloordiv(r, axis=0), el.rfloordiv(er, axis=0))\n assert_eq(l.rmod(r, axis=0), el.rmod(er, axis=0))\n assert_eq(l.rpow(r, axis=0), el.rpow(er, axis=0))\n\n pytest.raises(ValueError, lambda: l.add(r, axis=1))\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_frame_series_arithmetic_methods.None_2_test_frame_series_arithmetic_methods.None_2.for_axis_in_0_1_index.assert_eq_l_rmul_r_axis_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_frame_series_arithmetic_methods.None_2_test_frame_series_arithmetic_methods.None_2.for_axis_in_0_1_index.assert_eq_l_rmul_r_axis_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 653, "end_line": 680, "span_ids": ["test_frame_series_arithmetic_methods"], "tokens": 533}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.xfail(\n PANDAS_VERSION == \"1.0.2\",\n reason=\"https://github.com/pandas-dev/pandas/issues/32685\",\n)\ndef test_frame_series_arithmetic_methods():\n # ... other code\n\n for l, r, el, er in [(ddf1, pdf2, pdf1, pdf2), (ddf1, ps3, pdf1, ps3)]:\n assert_eq(l, el)\n assert_eq(r, er)\n\n for axis in [0, 1, \"index\", \"columns\"]:\n assert_eq(l.add(r, axis=axis), el.add(er, axis=axis))\n assert_eq(l.sub(r, axis=axis), el.sub(er, axis=axis))\n assert_eq(l.mul(r, axis=axis), el.mul(er, axis=axis))\n assert_eq(l.div(r, axis=axis), el.div(er, axis=axis))\n assert_eq(l.divide(r, axis=axis), el.divide(er, axis=axis))\n assert_eq(l.truediv(r, axis=axis), el.truediv(er, axis=axis))\n with warnings.catch_warnings():\n # https://github.com/pandas-dev/pandas/issues/26793\n warnings.simplefilter(\"ignore\", RuntimeWarning)\n assert_eq(l.floordiv(r, axis=axis), el.floordiv(er, axis=axis))\n assert_eq(l.mod(r, axis=axis), el.mod(er, axis=axis))\n assert_eq(l.pow(r, axis=axis), el.pow(er, axis=axis))\n assert_eq(l.rdiv(r, axis=axis), el.rdiv(er, axis=axis))\n assert_eq(l.rtruediv(r, axis=axis), el.rtruediv(er, axis=axis))\n if not PANDAS_GT_0250:\n # https://github.com/pandas-dev/pandas/issues/27464\n assert_eq(l.rfloordiv(r, axis=axis), el.rfloordiv(er, axis=axis))\n assert_eq(l.rpow(r, axis=axis), el.rpow(er, axis=axis))\n assert_eq(l.rmod(r, axis=axis), el.rmod(er, axis=axis))\n\n assert_eq(l.radd(r, axis=axis), el.radd(er, axis=axis))\n assert_eq(l.rsub(r, axis=axis), el.rsub(er, axis=axis))\n assert_eq(l.rmul(r, axis=axis), el.rmul(er, axis=axis))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_test_reductions.boolds.dd_from_pandas_bools_2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_test_reductions.boolds.dd_from_pandas_bools_2_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 683, "end_line": 714, "span_ids": ["test_reductions"], "tokens": 436}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"split_every\", [False, 2])\ndef test_reductions(split_every):\n dsk = {\n (\"x\", 0): pd.DataFrame(\n {\"a\": [1, 2, 3], \"b\": [4, 5, 6], \"c\": [True, True, False]}, index=[0, 1, 3]\n ),\n (\"x\", 1): pd.DataFrame(\n {\"a\": [4, 5, 6], \"b\": [3, 2, 1], \"c\": [False, False, False]},\n index=[5, 6, 8],\n ),\n (\"x\", 2): pd.DataFrame(\n {\n \"a\": [13094304034, 3489385935, 100006774],\n \"b\": [0, 0, 0],\n \"c\": [True, True, True],\n },\n index=[9, 9, 9],\n ),\n }\n meta = make_meta({\"a\": \"i8\", \"b\": \"i8\", \"c\": \"bool\"}, index=pd.Index([], \"i8\"))\n ddf1 = dd.DataFrame(dsk, \"x\", meta, [0, 4, 9, 9])\n pdf1 = ddf1.compute()\n\n nans1 = pd.Series([1] + [np.nan] * 4 + [2] + [np.nan] * 3)\n nands1 = dd.from_pandas(nans1, 2)\n nans2 = pd.Series([1] + [np.nan] * 8)\n nands2 = dd.from_pandas(nans2, 2)\n nans3 = pd.Series([np.nan] * 9)\n nands3 = dd.from_pandas(nans3, 2)\n\n bools = pd.Series([True, False, True, False, True], dtype=bool)\n boolds = dd.from_pandas(bools, 2)\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions.for_dds_pds_in__test_reductions.for_dds_pds_in_.None_17": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions.for_dds_pds_in__test_reductions.for_dds_pds_in_.None_17", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 716, "end_line": 779, "span_ids": ["test_reductions"], "tokens": 812}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"split_every\", [False, 2])\ndef test_reductions(split_every):\n # ... other code\n\n for dds, pds in [\n (ddf1.a, pdf1.a),\n (ddf1.b, pdf1.b),\n (ddf1.c, pdf1.c),\n (ddf1[\"a\"], pdf1[\"a\"]),\n (ddf1[\"b\"], pdf1[\"b\"]),\n (nands1, nans1),\n (nands2, nans2),\n (nands3, nans3),\n (boolds, bools),\n ]:\n assert isinstance(dds, dd.Series)\n assert isinstance(pds, pd.Series)\n\n assert_eq(dds.sum(split_every=split_every), pds.sum())\n assert_eq(dds.prod(split_every=split_every), pds.prod())\n assert_eq(dds.min(split_every=split_every), pds.min())\n assert_eq(dds.max(split_every=split_every), pds.max())\n assert_eq(dds.count(split_every=split_every), pds.count())\n\n with pytest.warns(None):\n # runtime warnings; https://github.com/dask/dask/issues/2381\n assert_eq(dds.std(split_every=split_every), pds.std())\n with pytest.warns(None):\n # runtime warnings; https://github.com/dask/dask/issues/2381\n assert_eq(dds.var(split_every=split_every), pds.var())\n with pytest.warns(None):\n # runtime warnings; https://github.com/dask/dask/issues/2381\n assert_eq(dds.sem(split_every=split_every), pds.sem())\n\n with warnings.catch_warnings():\n # dask.dataframe should probably filter this, to match pandas, but\n # it seems quite difficult.\n warnings.simplefilter(\"ignore\", RuntimeWarning)\n assert_eq(dds.std(ddof=0, split_every=split_every), pds.std(ddof=0))\n assert_eq(dds.var(ddof=0, split_every=split_every), pds.var(ddof=0))\n assert_eq(dds.sem(ddof=0, split_every=split_every), pds.sem(ddof=0))\n assert_eq(dds.mean(split_every=split_every), pds.mean())\n assert_eq(dds.nunique(split_every=split_every), pds.nunique())\n\n assert_eq(dds.sum(skipna=False, split_every=split_every), pds.sum(skipna=False))\n assert_eq(\n dds.prod(skipna=False, split_every=split_every), pds.prod(skipna=False)\n )\n assert_eq(dds.min(skipna=False, split_every=split_every), pds.min(skipna=False))\n assert_eq(dds.max(skipna=False, split_every=split_every), pds.max(skipna=False))\n assert_eq(dds.std(skipna=False, split_every=split_every), pds.std(skipna=False))\n assert_eq(dds.var(skipna=False, split_every=split_every), pds.var(skipna=False))\n assert_eq(dds.sem(skipna=False, split_every=split_every), pds.sem(skipna=False))\n assert_eq(\n dds.std(skipna=False, ddof=0, split_every=split_every),\n pds.std(skipna=False, ddof=0),\n )\n assert_eq(\n dds.var(skipna=False, ddof=0, split_every=split_every),\n pds.var(skipna=False, ddof=0),\n )\n assert_eq(\n dds.sem(skipna=False, ddof=0, split_every=split_every),\n pds.sem(skipna=False, ddof=0),\n )\n assert_eq(\n dds.mean(skipna=False, split_every=split_every), pds.mean(skipna=False)\n )\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions.assert_dask_graph_ddf1_b__test_reductions.assert_eq_ddf1_index_coun": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions.assert_dask_graph_ddf1_b__test_reductions.assert_eq_ddf1_index_coun", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 781, "end_line": 799, "span_ids": ["test_reductions"], "tokens": 384}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"split_every\", [False, 2])\ndef test_reductions(split_every):\n # ... other code\n\n assert_dask_graph(ddf1.b.sum(split_every=split_every), \"series-sum\")\n assert_dask_graph(ddf1.b.prod(split_every=split_every), \"series-prod\")\n assert_dask_graph(ddf1.b.min(split_every=split_every), \"series-min\")\n assert_dask_graph(ddf1.b.max(split_every=split_every), \"series-max\")\n assert_dask_graph(ddf1.b.count(split_every=split_every), \"series-count\")\n assert_dask_graph(ddf1.b.std(split_every=split_every), \"series-std\")\n assert_dask_graph(ddf1.b.var(split_every=split_every), \"series-var\")\n assert_dask_graph(ddf1.b.sem(split_every=split_every), \"series-sem\")\n assert_dask_graph(ddf1.b.std(ddof=0, split_every=split_every), \"series-std\")\n assert_dask_graph(ddf1.b.var(ddof=0, split_every=split_every), \"series-var\")\n assert_dask_graph(ddf1.b.sem(ddof=0, split_every=split_every), \"series-sem\")\n assert_dask_graph(ddf1.b.mean(split_every=split_every), \"series-mean\")\n # nunique is performed using drop-duplicates\n assert_dask_graph(ddf1.b.nunique(split_every=split_every), \"drop-duplicates\")\n\n # testing index\n assert_eq(ddf1.index.min(split_every=split_every), pdf1.index.min())\n assert_eq(ddf1.index.max(split_every=split_every), pdf1.index.max())\n assert_eq(ddf1.index.count(split_every=split_every), pd.notnull(pdf1.index).sum())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_timedelta_test_reductions_timedelta.assert_eq_dds_count_split": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_timedelta_test_reductions_timedelta.assert_eq_dds_count_split", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 802, "end_line": 810, "span_ids": ["test_reductions_timedelta"], "tokens": 120}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"split_every\", [False, 2])\ndef test_reductions_timedelta(split_every):\n ds = pd.Series(pd.to_timedelta([2, 3, 4, np.nan, 5]))\n dds = dd.from_pandas(ds, 2)\n\n assert_eq(dds.sum(split_every=split_every), ds.sum())\n assert_eq(dds.min(split_every=split_every), ds.min())\n assert_eq(dds.max(split_every=split_every), ds.max())\n assert_eq(dds.count(split_every=split_every), ds.count())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_out_test_reductions_out.None_4": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_out_test_reductions_out.None_4", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 813, "end_line": 855, "span_ids": ["test_reductions_out"], "tokens": 464}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"frame,axis,out\",\n [\n (\n pd.DataFrame({\"a\": [1, 2, 3], \"b\": [4, 5, 6]}, index=[0, 1, 3]),\n 0,\n pd.Series([], dtype=\"float64\"),\n ),\n (\n pd.DataFrame({\"a\": [1, 2, 3], \"b\": [4, 5, 6]}, index=[0, 1, 3]),\n 1,\n pd.Series([], dtype=\"float64\"),\n ),\n (pd.Series([1, 2.5, 6]), None, None),\n ],\n)\n@pytest.mark.parametrize(\"redfunc\", [\"sum\", \"prod\", \"min\", \"max\", \"mean\", \"var\", \"std\"])\ndef test_reductions_out(frame, axis, out, redfunc):\n dsk_in = dd.from_pandas(frame, 3)\n dsk_out = dd.from_pandas(pd.Series([0]), 1).sum()\n\n if out is not None:\n dsk_out = dd.from_pandas(out, 3)\n\n np_redfunc = getattr(np, redfunc)\n pd_redfunc = getattr(frame.__class__, redfunc)\n dsk_redfunc = getattr(dsk_in.__class__, redfunc)\n\n if redfunc in [\"var\", \"std\"]:\n # numpy has default ddof value 0 while\n # dask and pandas have 1, so ddof should be passed\n # explicitly when calling np.var(dask)\n np_redfunc(dsk_in, axis=axis, ddof=1, out=dsk_out)\n else:\n np_redfunc(dsk_in, axis=axis, out=dsk_out)\n\n assert_eq(dsk_out, pd_redfunc(frame, axis=axis))\n\n dsk_redfunc(dsk_in, axis=axis, split_every=False, out=dsk_out)\n assert_eq(dsk_out, pd_redfunc(frame, axis=axis))\n\n dsk_redfunc(dsk_in, axis=axis, split_every=2, out=dsk_out)\n assert_eq(dsk_out, pd_redfunc(frame, axis=axis))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_allany_test_allany.None_19": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_allany_test_allany.None_19", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 858, "end_line": 904, "span_ids": ["test_allany"], "tokens": 537}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"split_every\", [False, 2])\ndef test_allany(split_every):\n df = pd.DataFrame(\n np.random.choice([True, False], size=(100, 4)), columns=[\"A\", \"B\", \"C\", \"D\"]\n )\n df[\"E\"] = list(\"abcde\") * 20\n ddf = dd.from_pandas(df, 10)\n\n assert_eq(ddf.all(split_every=split_every), df.all())\n assert_eq(ddf.all(axis=1, split_every=split_every), df.all(axis=1))\n assert_eq(ddf.all(axis=0, split_every=split_every), df.all(axis=0))\n\n assert_eq(ddf.any(split_every=split_every), df.any())\n assert_eq(ddf.any(axis=1, split_every=split_every), df.any(axis=1))\n assert_eq(ddf.any(axis=0, split_every=split_every), df.any(axis=0))\n\n assert_eq(ddf.A.all(split_every=split_every), df.A.all())\n assert_eq(ddf.A.any(split_every=split_every), df.A.any())\n\n # testing numpy functions with out param\n ddf_out_axis_default = dd.from_pandas(\n pd.Series([False, False, False, False, False], index=[\"A\", \"B\", \"C\", \"D\", \"E\"]),\n 10,\n )\n ddf_out_axis1 = dd.from_pandas(\n pd.Series(np.random.choice([True, False], size=(100,))), 10\n )\n\n # all\n ddf.all(split_every=split_every, out=ddf_out_axis_default)\n assert_eq(ddf_out_axis_default, df.all())\n\n ddf.all(axis=1, split_every=split_every, out=ddf_out_axis1)\n assert_eq(ddf_out_axis1, df.all(axis=1))\n\n ddf.all(split_every=split_every, axis=0, out=ddf_out_axis_default)\n assert_eq(ddf_out_axis_default, df.all(axis=0))\n\n # any\n ddf.any(split_every=split_every, out=ddf_out_axis_default)\n assert_eq(ddf_out_axis_default, df.any())\n\n ddf.any(axis=1, split_every=split_every, out=ddf_out_axis1)\n assert_eq(ddf_out_axis1, df.any(axis=1))\n\n ddf.any(split_every=split_every, axis=0, out=ddf_out_axis_default)\n assert_eq(ddf_out_axis_default, df.any(axis=0))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_deterministic_reduction_names_test_deterministic_reduction_names.assert_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_deterministic_reduction_names_test_deterministic_reduction_names.assert_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 907, "end_line": 947, "span_ids": ["test_deterministic_reduction_names"], "tokens": 359}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"split_every\", [False, 2])\ndef test_deterministic_reduction_names(split_every):\n df = pd.DataFrame({\"x\": [1, 2, 3, 4], \"y\": [5, 6, 7, 8]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n for x in [ddf, ddf.x]:\n assert (\n x.sum(split_every=split_every)._name == x.sum(split_every=split_every)._name\n )\n assert (\n x.prod(split_every=split_every)._name\n == x.prod(split_every=split_every)._name\n )\n assert (\n x.min(split_every=split_every)._name == x.min(split_every=split_every)._name\n )\n assert (\n x.max(split_every=split_every)._name == x.max(split_every=split_every)._name\n )\n assert (\n x.count(split_every=split_every)._name\n == x.count(split_every=split_every)._name\n )\n assert (\n x.std(split_every=split_every)._name == x.std(split_every=split_every)._name\n )\n assert (\n x.var(split_every=split_every)._name == x.var(split_every=split_every)._name\n )\n assert (\n x.sem(split_every=split_every)._name == x.sem(split_every=split_every)._name\n )\n assert (\n x.mean(split_every=split_every)._name\n == x.mean(split_every=split_every)._name\n )\n\n assert (\n ddf.x.nunique(split_every=split_every)._name\n == ddf.x.nunique(split_every=split_every)._name\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reduction_series_invalid_axis_test_reduction_series_invalid_axis.for_axis_in_1_columns_.for_s_in_ddf1_a_pdf1_a_.None_8": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reduction_series_invalid_axis_test_reduction_series_invalid_axis.for_axis_in_1_columns_.for_s_in_ddf1_a_pdf1_a_.None_8", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 950, "end_line": 971, "span_ids": ["test_reduction_series_invalid_axis"], "tokens": 364}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_reduction_series_invalid_axis():\n dsk = {\n (\"x\", 0): pd.DataFrame({\"a\": [1, 2, 3], \"b\": [4, 5, 6]}, index=[0, 1, 3]),\n (\"x\", 1): pd.DataFrame({\"a\": [4, 5, 6], \"b\": [3, 2, 1]}, index=[5, 6, 8]),\n (\"x\", 2): pd.DataFrame({\"a\": [7, 8, 9], \"b\": [0, 0, 0]}, index=[9, 9, 9]),\n }\n meta = make_meta({\"a\": \"i8\", \"b\": \"i8\"}, index=pd.Index([], \"i8\"))\n ddf1 = dd.DataFrame(dsk, \"x\", meta, [0, 4, 9, 9])\n pdf1 = ddf1.compute()\n\n for axis in [1, \"columns\"]:\n for s in [ddf1.a, pdf1.a]: # both must behave the same\n pytest.raises(ValueError, lambda: s.sum(axis=axis))\n pytest.raises(ValueError, lambda: s.prod(axis=axis))\n pytest.raises(ValueError, lambda: s.min(axis=axis))\n pytest.raises(ValueError, lambda: s.max(axis=axis))\n # only count doesn't have axis keyword\n pytest.raises(TypeError, lambda: s.count(axis=axis))\n pytest.raises(ValueError, lambda: s.std(axis=axis))\n pytest.raises(ValueError, lambda: s.var(axis=axis))\n pytest.raises(ValueError, lambda: s.sem(axis=axis))\n pytest.raises(ValueError, lambda: s.mean(axis=axis))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_non_numeric_dtypes_test_reductions_non_numeric_dtypes.None_13": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_non_numeric_dtypes_test_reductions_non_numeric_dtypes.None_13", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 974, "end_line": 1030, "span_ids": ["test_reductions_non_numeric_dtypes"], "tokens": 648}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_reductions_non_numeric_dtypes():\n # test non-numric blocks\n\n def check_raises(d, p, func):\n pytest.raises((TypeError, ValueError), lambda: getattr(d, func)().compute())\n pytest.raises((TypeError, ValueError), lambda: getattr(p, func)())\n\n pds = pd.Series([\"a\", \"b\", \"c\", \"d\", \"e\"])\n dds = dd.from_pandas(pds, 2)\n assert_eq(dds.sum(), pds.sum())\n check_raises(dds, pds, \"prod\")\n assert_eq(dds.min(), pds.min())\n assert_eq(dds.max(), pds.max())\n assert_eq(dds.count(), pds.count())\n check_raises(dds, pds, \"std\")\n check_raises(dds, pds, \"var\")\n check_raises(dds, pds, \"sem\")\n if not PANDAS_GT_0250:\n # pandas 0.25 added DatetimeIndex.mean. We need to follow.\n check_raises(dds, pds, \"mean\")\n assert_eq(dds.nunique(), pds.nunique())\n\n for pds in [\n pd.Series(pd.Categorical([1, 2, 3, 4, 5], ordered=True)),\n pd.Series(pd.Categorical(list(\"abcde\"), ordered=True)),\n pd.Series(pd.date_range(\"2011-01-01\", freq=\"D\", periods=5)),\n ]:\n dds = dd.from_pandas(pds, 2)\n\n check_raises(dds, pds, \"sum\")\n check_raises(dds, pds, \"prod\")\n assert_eq(dds.min(), pds.min())\n assert_eq(dds.max(), pds.max())\n assert_eq(dds.count(), pds.count())\n check_raises(dds, pds, \"std\")\n check_raises(dds, pds, \"var\")\n check_raises(dds, pds, \"sem\")\n if not (PANDAS_GT_0250 and is_datetime64_ns_dtype(pds.dtype)):\n # pandas 0.25 added DatetimeIndex.mean. We need to follow\n check_raises(dds, pds, \"mean\")\n assert_eq(dds.nunique(), pds.nunique())\n\n pds = pd.Series(pd.timedelta_range(\"1 days\", freq=\"D\", periods=5))\n dds = dd.from_pandas(pds, 2)\n assert_eq(dds.sum(), pds.sum())\n assert_eq(dds.min(), pds.min())\n assert_eq(dds.max(), pds.max())\n assert_eq(dds.count(), pds.count())\n\n # ToDo: pandas supports timedelta std, dask returns float64\n # assert_eq(dds.std(), pds.std())\n\n # ToDo: pandas supports timedelta std, otherwise dask raises:\n # TypeError: unsupported operand type(s) for *: 'float' and 'Timedelta'\n # assert_eq(dds.mean(), pds.mean())\n\n assert_eq(dds.nunique(), pds.nunique())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_frame_test_reductions_frame.assert_eq_ddf1_mean_split": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_frame_test_reductions_frame.assert_eq_ddf1_mean_split", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 1033, "end_line": 1055, "span_ids": ["test_reductions_frame"], "tokens": 457}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"split_every\", [False, 2])\ndef test_reductions_frame(split_every):\n dsk = {\n (\"x\", 0): pd.DataFrame({\"a\": [1, 2, 3], \"b\": [4, 5, 6]}, index=[0, 1, 3]),\n (\"x\", 1): pd.DataFrame({\"a\": [4, 5, 6], \"b\": [3, 2, 1]}, index=[5, 6, 8]),\n (\"x\", 2): pd.DataFrame({\"a\": [7, 8, 9], \"b\": [0, 0, 0]}, index=[9, 9, 9]),\n }\n meta = make_meta({\"a\": \"i8\", \"b\": \"i8\"}, index=pd.Index([], \"i8\"))\n ddf1 = dd.DataFrame(dsk, \"x\", meta, [0, 4, 9, 9])\n pdf1 = ddf1.compute()\n\n assert_eq(ddf1.sum(split_every=split_every), pdf1.sum())\n assert_eq(ddf1.prod(split_every=split_every), pdf1.prod())\n assert_eq(ddf1.min(split_every=split_every), pdf1.min())\n assert_eq(ddf1.max(split_every=split_every), pdf1.max())\n assert_eq(ddf1.count(split_every=split_every), pdf1.count())\n assert_eq(ddf1.std(split_every=split_every), pdf1.std())\n assert_eq(ddf1.var(split_every=split_every), pdf1.var())\n assert_eq(ddf1.sem(split_every=split_every), pdf1.sem())\n assert_eq(ddf1.std(ddof=0, split_every=split_every), pdf1.std(ddof=0))\n assert_eq(ddf1.var(ddof=0, split_every=split_every), pdf1.var(ddof=0))\n assert_eq(ddf1.sem(ddof=0, split_every=split_every), pdf1.sem(ddof=0))\n assert_eq(ddf1.mean(split_every=split_every), pdf1.mean())\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_frame.for_axis_in_0_1_index_test_reductions_frame._axis_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_frame.for_axis_in_0_1_index_test_reductions_frame._axis_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 1057, "end_line": 1107, "span_ids": ["test_reductions_frame"], "tokens": 776}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"split_every\", [False, 2])\ndef test_reductions_frame(split_every):\n # ... other code\n\n for axis in [0, 1, \"index\", \"columns\"]:\n assert_eq(ddf1.sum(axis=axis, split_every=split_every), pdf1.sum(axis=axis))\n assert_eq(ddf1.prod(axis=axis, split_every=split_every), pdf1.prod(axis=axis))\n assert_eq(ddf1.min(axis=axis, split_every=split_every), pdf1.min(axis=axis))\n assert_eq(ddf1.max(axis=axis, split_every=split_every), pdf1.max(axis=axis))\n assert_eq(ddf1.count(axis=axis, split_every=split_every), pdf1.count(axis=axis))\n assert_eq(ddf1.std(axis=axis, split_every=split_every), pdf1.std(axis=axis))\n assert_eq(ddf1.var(axis=axis, split_every=split_every), pdf1.var(axis=axis))\n assert_eq(ddf1.sem(axis=axis, split_every=split_every), pdf1.sem(axis=axis))\n assert_eq(\n ddf1.std(axis=axis, ddof=0, split_every=split_every),\n pdf1.std(axis=axis, ddof=0),\n )\n assert_eq(\n ddf1.var(axis=axis, ddof=0, split_every=split_every),\n pdf1.var(axis=axis, ddof=0),\n )\n assert_eq(\n ddf1.sem(axis=axis, ddof=0, split_every=split_every),\n pdf1.sem(axis=axis, ddof=0),\n )\n assert_eq(ddf1.mean(axis=axis, split_every=split_every), pdf1.mean(axis=axis))\n\n pytest.raises(ValueError, lambda: ddf1.sum(axis=\"incorrect\").compute())\n\n # axis=0\n assert_dask_graph(ddf1.sum(split_every=split_every), \"dataframe-sum\")\n assert_dask_graph(ddf1.prod(split_every=split_every), \"dataframe-prod\")\n assert_dask_graph(ddf1.min(split_every=split_every), \"dataframe-min\")\n assert_dask_graph(ddf1.max(split_every=split_every), \"dataframe-max\")\n assert_dask_graph(ddf1.count(split_every=split_every), \"dataframe-count\")\n\n # std, var, sem, and mean consist of moment_* operations\n assert_dask_graph(ddf1.std(split_every=split_every), \"dataframe-var\")\n assert_dask_graph(ddf1.std(split_every=split_every), \"moment_chunk\")\n assert_dask_graph(ddf1.std(split_every=split_every), \"moment_agg\")\n assert_dask_graph(ddf1.std(split_every=split_every), \"values\")\n\n assert_dask_graph(ddf1.var(split_every=split_every), \"moment_chunk\")\n assert_dask_graph(ddf1.var(split_every=split_every), \"moment_agg\")\n assert_dask_graph(ddf1.var(split_every=split_every), \"values\")\n\n assert_dask_graph(ddf1.sem(split_every=split_every), \"dataframe-var\")\n assert_dask_graph(ddf1.sem(split_every=split_every), \"moment_chunk\")\n assert_dask_graph(ddf1.sem(split_every=split_every), \"moment_agg\")\n assert_dask_graph(ddf1.sem(split_every=split_every), \"values\")\n\n assert_dask_graph(ddf1.mean(split_every=split_every), \"dataframe-sum\")\n assert_dask_graph(ddf1.mean(split_every=split_every), \"dataframe-count\")\n\n # axis=1\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_frame.None_31_test_reductions_frame.None_39": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_frame.None_31_test_reductions_frame.None_39", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 1108, "end_line": 1116, "span_ids": ["test_reductions_frame"], "tokens": 246}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"split_every\", [False, 2])\ndef test_reductions_frame(split_every):\n # ... other code\n assert_dask_graph(ddf1.sum(axis=1, split_every=split_every), \"dataframe-sum\")\n assert_dask_graph(ddf1.prod(axis=1, split_every=split_every), \"dataframe-prod\")\n assert_dask_graph(ddf1.min(axis=1, split_every=split_every), \"dataframe-min\")\n assert_dask_graph(ddf1.max(axis=1, split_every=split_every), \"dataframe-max\")\n assert_dask_graph(ddf1.count(axis=1, split_every=split_every), \"dataframe-count\")\n assert_dask_graph(ddf1.std(axis=1, split_every=split_every), \"dataframe-std\")\n assert_dask_graph(ddf1.var(axis=1, split_every=split_every), \"dataframe-var\")\n assert_dask_graph(ddf1.sem(axis=1, split_every=split_every), \"dataframe-sem\")\n assert_dask_graph(ddf1.mean(axis=1, split_every=split_every), \"dataframe-mean\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_frame_dtypes_test_reductions_frame_dtypes.None_3.else_.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_frame_dtypes_test_reductions_frame_dtypes.None_3.else_.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 1119, "end_line": 1179, "span_ids": ["test_reductions_frame_dtypes"], "tokens": 786}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_reductions_frame_dtypes():\n df = pd.DataFrame(\n {\n \"int\": [1, 2, 3, 4, 5, 6, 7, 8],\n \"float\": [1.0, 2.0, 3.0, 4.0, np.nan, 6.0, 7.0, 8.0],\n \"dt\": [pd.NaT] + [datetime(2011, i, 1) for i in range(1, 8)],\n \"str\": list(\"abcdefgh\"),\n \"timedelta\": pd.to_timedelta([1, 2, 3, 4, 5, 6, 7, np.nan]),\n \"bool\": [True, False] * 4,\n }\n )\n\n if HAS_INT_NA:\n if not PANDAS_GT_0250:\n # Pandas master is returning NA for IntegerNA.sum() when mixed with other dtypes.\n # https://github.com/pandas-dev/pandas/issues/27185\n df[\"intna\"] = pd.array([1, 2, 3, 4, None, 6, 7, 8], dtype=pd.Int64Dtype())\n\n ddf = dd.from_pandas(df, 3)\n\n # TODO: std and mean do not support timedelta dtype\n df_no_timedelta = df.drop(\"timedelta\", axis=1, inplace=False)\n ddf_no_timedelta = dd.from_pandas(df_no_timedelta, 3)\n\n if not PANDAS_GT_100:\n # https://github.com/pandas-dev/pandas/issues/30886\n assert_eq(df.sum(), ddf.sum())\n assert_eq(df_no_timedelta.mean(), ddf_no_timedelta.mean())\n else:\n assert_eq(df.drop(columns=\"dt\").sum(), ddf.drop(columns=\"dt\").sum())\n assert_eq(\n df_no_timedelta.drop(columns=\"dt\").mean(),\n ddf_no_timedelta.drop(columns=\"dt\").mean(),\n )\n\n assert_eq(df.prod(), ddf.prod())\n assert_eq(df.min(), ddf.min())\n assert_eq(df.max(), ddf.max())\n assert_eq(df.count(), ddf.count())\n assert_eq(df_no_timedelta.std(), ddf_no_timedelta.std())\n assert_eq(df_no_timedelta.var(), ddf_no_timedelta.var())\n if PANDAS_GT_0250:\n # https://github.com/pandas-dev/pandas/issues/18880\n assert_eq(\n df.drop(\"timedelta\", axis=1).var(skipna=False),\n ddf.drop(\"timedelta\", axis=1).var(skipna=False),\n )\n else:\n assert_eq(df.var(skipna=False), ddf.var(skipna=False))\n\n assert_eq(df.sem(), ddf.sem())\n assert_eq(df_no_timedelta.std(ddof=0), ddf_no_timedelta.std(ddof=0))\n if PANDAS_GT_0250:\n # https://github.com/pandas-dev/pandas/issues/18880\n df2 = df.drop(\"timedelta\", axis=1)\n ddf2 = ddf.drop(\"timedelta\", axis=1)\n assert_eq(df2.var(ddof=0), ddf2.var(ddof=0))\n assert_eq(df2.var(ddof=0, skipna=False), ddf2.var(ddof=0, skipna=False))\n else:\n assert_eq(df.var(ddof=0), ddf.var(ddof=0))\n assert_eq(df.var(ddof=0, skipna=False), ddf.var(ddof=0, skipna=False))\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_frame_dtypes.assert_eq_df_sem_ddof_0__test_reductions_frame_dtypes.assert_eq_df_numerics_var": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_frame_dtypes.assert_eq_df_sem_ddof_0__test_reductions_frame_dtypes.assert_eq_df_numerics_var", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 1180, "end_line": 1198, "span_ids": ["test_reductions_frame_dtypes"], "tokens": 202}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_reductions_frame_dtypes():\n # ... other code\n assert_eq(df.sem(ddof=0), ddf.sem(ddof=0))\n\n assert_eq(df._get_numeric_data(), ddf._get_numeric_data())\n\n numerics = ddf[[\"int\", \"float\"]]\n assert numerics._get_numeric_data().dask == numerics.dask\n\n # test var corner cases\n\n # only timedelta\n df_td = df[[\"timedelta\"]]\n ddf_td = dd.from_pandas(df_td, 3)\n assert_eq(df_td.var(ddof=0), ddf_td.var(ddof=0))\n assert_eq(df_td.var(), ddf_td.var())\n\n # only numercis\n df_numerics = df[[\"int\", \"float\", \"bool\"]]\n ddf_numerics = dd.from_pandas(df_numerics, 3)\n assert_eq(df_numerics.var(), ddf_numerics.var())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_frame_nan_test_reductions_frame_nan.assert_eq_df_mean_ddf_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_frame_nan_test_reductions_frame_nan.assert_eq_df_mean_ddf_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 1201, "end_line": 1226, "span_ids": ["test_reductions_frame_nan"], "tokens": 368}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"split_every\", [False, 2])\ndef test_reductions_frame_nan(split_every):\n df = pd.DataFrame(\n {\n \"a\": [1, 2, np.nan, 4, 5, 6, 7, 8],\n \"b\": [1, 2, np.nan, np.nan, np.nan, 5, np.nan, np.nan],\n \"c\": [np.nan] * 8,\n }\n )\n ddf = dd.from_pandas(df, 3)\n assert_eq(df.sum(), ddf.sum(split_every=split_every))\n assert_eq(df.prod(), ddf.prod(split_every=split_every))\n assert_eq(df.min(), ddf.min(split_every=split_every))\n assert_eq(df.max(), ddf.max(split_every=split_every))\n assert_eq(df.count(), ddf.count(split_every=split_every))\n with warnings.catch_warnings():\n # dask.dataframe should probably filter this, to match pandas, but\n # it seems quite difficult.\n warnings.simplefilter(\"ignore\", RuntimeWarning)\n assert_eq(df.std(), ddf.std(split_every=split_every))\n assert_eq(df.var(), ddf.var(split_every=split_every))\n assert_eq(df.sem(), ddf.sem(split_every=split_every))\n assert_eq(df.std(ddof=0), ddf.std(ddof=0, split_every=split_every))\n assert_eq(df.var(ddof=0), ddf.var(ddof=0, split_every=split_every))\n assert_eq(df.sem(ddof=0), ddf.sem(ddof=0, split_every=split_every))\n assert_eq(df.mean(), ddf.mean(split_every=split_every))\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_frame_nan.None_1_test_reductions_frame_nan.None_1.None_21": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_frame_nan.None_1_test_reductions_frame_nan.None_1.None_21", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 1228, "end_line": 1297, "span_ids": ["test_reductions_frame_nan"], "tokens": 747}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"split_every\", [False, 2])\ndef test_reductions_frame_nan(split_every):\n # ... other code\n\n with warnings.catch_warnings(record=True):\n assert_eq(df.sum(skipna=False), ddf.sum(skipna=False, split_every=split_every))\n assert_eq(\n df.prod(skipna=False), ddf.prod(skipna=False, split_every=split_every)\n )\n assert_eq(df.min(skipna=False), ddf.min(skipna=False, split_every=split_every))\n assert_eq(df.max(skipna=False), ddf.max(skipna=False, split_every=split_every))\n assert_eq(df.std(skipna=False), ddf.std(skipna=False, split_every=split_every))\n assert_eq(df.var(skipna=False), ddf.var(skipna=False, split_every=split_every))\n assert_eq(df.sem(skipna=False), ddf.sem(skipna=False, split_every=split_every))\n assert_eq(\n df.std(skipna=False, ddof=0),\n ddf.std(skipna=False, ddof=0, split_every=split_every),\n )\n assert_eq(\n df.var(skipna=False, ddof=0),\n ddf.var(skipna=False, ddof=0, split_every=split_every),\n )\n assert_eq(\n df.sem(skipna=False, ddof=0),\n ddf.sem(skipna=False, ddof=0, split_every=split_every),\n )\n assert_eq(\n df.mean(skipna=False), ddf.mean(skipna=False, split_every=split_every)\n )\n\n assert_eq(\n df.sum(axis=1, skipna=False),\n ddf.sum(axis=1, skipna=False, split_every=split_every),\n )\n assert_eq(\n df.prod(axis=1, skipna=False),\n ddf.prod(axis=1, skipna=False, split_every=split_every),\n )\n assert_eq(\n df.min(axis=1, skipna=False),\n ddf.min(axis=1, skipna=False, split_every=split_every),\n )\n assert_eq(\n df.max(axis=1, skipna=False),\n ddf.max(axis=1, skipna=False, split_every=split_every),\n )\n assert_eq(\n df.std(axis=1, skipna=False),\n ddf.std(axis=1, skipna=False, split_every=split_every),\n )\n assert_eq(\n df.var(axis=1, skipna=False),\n ddf.var(axis=1, skipna=False, split_every=split_every),\n )\n assert_eq(\n df.sem(axis=1, skipna=False),\n ddf.sem(axis=1, skipna=False, split_every=split_every),\n )\n assert_eq(\n df.std(axis=1, skipna=False, ddof=0),\n ddf.std(axis=1, skipna=False, ddof=0, split_every=split_every),\n )\n assert_eq(\n df.var(axis=1, skipna=False, ddof=0),\n ddf.var(axis=1, skipna=False, ddof=0, split_every=split_every),\n )\n assert_eq(\n df.sem(axis=1, skipna=False, ddof=0),\n ddf.sem(axis=1, skipna=False, ddof=0, split_every=split_every),\n )\n assert_eq(\n df.mean(axis=1, skipna=False),\n ddf.mean(axis=1, skipna=False, split_every=split_every),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_series_comparison_nan_test_series_comparison_nan.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_series_comparison_nan_test_series_comparison_nan.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 1300, "end_line": 1313, "span_ids": ["test_series_comparison_nan"], "tokens": 168}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"comparison\", [\"lt\", \"gt\", \"le\", \"ge\", \"ne\", \"eq\"])\ndef test_series_comparison_nan(comparison):\n s = pd.Series([1, 2, 3, 4, 5, 6, 7])\n s_nan = pd.Series([1, -1, 8, np.nan, 5, 6, 2.4])\n ds = dd.from_pandas(s, 3)\n ds_nan = dd.from_pandas(s_nan, 3)\n\n fill_value = 7\n comparison_pd = getattr(s, comparison)\n comparison_dd = getattr(ds, comparison)\n assert_eq(\n comparison_dd(ds_nan, fill_value=fill_value),\n comparison_pd(s_nan, fill_value=fill_value),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_skip_if_no_intna_test_divmod.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_skip_if_no_intna_test_divmod.None_3", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 1316, "end_line": 1341, "span_ids": ["test_divmod", "test_sum_intna", "impl"], "tokens": 235}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "skip_if_no_intna = pytest.mark.skipif(not HAS_INT_NA, reason=\"integer na\")\n\n\n@skip_if_no_intna\ndef test_sum_intna():\n a = pd.Series([1, None, 2], dtype=pd.Int32Dtype())\n b = dd.from_pandas(a, 2)\n assert_eq(a.sum(), b.sum())\n\n\ndef test_divmod():\n df1 = pd.Series(np.random.rand(10))\n df2 = pd.Series(np.random.rand(10))\n\n ddf1 = dd.from_pandas(df1, npartitions=3)\n ddf2 = dd.from_pandas(df2, npartitions=3)\n\n result = divmod(ddf1, 2.0)\n expected = divmod(df1, 2.0)\n assert_eq(result[0], expected[0])\n assert_eq(result[1], expected[1])\n\n result = divmod(ddf1, ddf2)\n expected = divmod(df1, df2)\n assert_eq(result[0], expected[0])\n assert_eq(result[1], expected[1])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_moment_test_empty_df_reductions.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_moment_test_empty_df_reductions.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 1344, "end_line": 1369, "span_ids": ["test_empty_df_reductions", "test_moment"], "tokens": 228}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_moment():\n scipy = pytest.importorskip(\"scipy\")\n from dask.array import stats\n from dask.array.utils import assert_eq\n\n df = pd.Series(list(range(10)))\n ddf = dd.from_pandas(df, npartitions=2)\n\n assert_eq(stats.moment(ddf, 2, 0), scipy.stats.moment(df, 2, 0))\n\n\n@pytest.mark.parametrize(\"func\", [\"sum\", \"count\", \"mean\", \"var\", \"sem\"])\ndef test_empty_df_reductions(func):\n pdf = pd.DataFrame()\n ddf = dd.from_pandas(pdf, npartitions=1)\n\n dsk_func = getattr(ddf.__class__, func)\n pd_func = getattr(pdf.__class__, func)\n\n assert_eq(dsk_func(ddf), pd_func(pdf))\n\n idx = pd.date_range(\"2000\", periods=4)\n pdf = pd.DataFrame(index=idx)\n ddf = dd.from_pandas(pdf, npartitions=1)\n\n assert_eq(dsk_func(ddf), pd_func(pdf))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_series_agg_with_min_count_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_series_agg_with_min_count_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 1372, "end_line": 1383, "span_ids": ["test_series_agg_with_min_count"], "tokens": 111}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"method\", [\"sum\", \"prod\"])\n@pytest.mark.parametrize(\"min_count\", [0, 9])\ndef test_series_agg_with_min_count(method, min_count):\n df = pd.DataFrame([[1]], columns=[\"a\"])\n ddf = dd.from_pandas(df, npartitions=1)\n func = getattr(ddf[\"a\"], method)\n result = func(min_count=min_count).compute()\n if min_count == 0:\n assert result == 1\n else:\n assert result is np.nan", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_boolean.py_pd_test_meta.dd_utils_assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_boolean.py_pd_test_meta.dd_utils_assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_boolean.py", "file_name": "test_boolean.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 25, "span_ids": ["imports", "test_meta"], "tokens": 187}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pandas as pd\nimport pytest\n\nimport dask.dataframe as dd\n\n\npytestmark = pytest.mark.skipif(\n not dd._compat.PANDAS_GT_100, reason=\"BooleanArray added in 1.0.0\"\n)\n\n\ndef test_meta():\n values = pd.array([True, False, None], dtype=\"boolean\")\n ds = dd.from_pandas(pd.Series(values), 2)\n assert ds.dtype == pd.BooleanDtype()\n\n dd.utils.assert_eq(ds._meta_nonempty, pd.Series([True, pd.NA], dtype=\"boolean\"))\n\n ddf = dd.from_pandas(pd.DataFrame({\"A\": values}), 2)\n assert ddf.dtypes[\"A\"] == pd.BooleanDtype()\n\n dd.utils.assert_eq(\n ddf._meta_nonempty,\n pd.DataFrame({\"A\": pd.array([True, pd.NA], dtype=\"boolean\")}),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_boolean.py_test_ops_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_boolean.py_test_ops_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_boolean.py", "file_name": "test_boolean.py", "file_type": "text/x-python", "category": "test", "start_line": 28, "end_line": 38, "span_ids": ["test_ops"], "tokens": 139}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_ops():\n s1 = pd.Series(pd.array([True, False, None] * 3, dtype=\"boolean\"))\n s2 = pd.Series(pd.array([True] * 3 + [False] * 3 + [None] * 3, dtype=\"boolean\"))\n\n ds1 = dd.from_pandas(s1, 2)\n ds2 = dd.from_pandas(s2, 2)\n\n dd.utils.assert_eq(ds1 | ds2, s1 | s2)\n dd.utils.assert_eq(ds1 & ds2, s1 & s2)\n dd.utils.assert_eq(ds1 ^ ds2, s1 ^ s2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_operator_frames6._i_set_index_i_y_i_x_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_operator_frames6._i_set_index_i_y_i_x_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_categorical.py", "file_name": "test_categorical.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 75, "span_ids": ["imports"], "tokens": 529}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import operator\n\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nimport dask\nimport dask.dataframe as dd\nfrom dask.dataframe._compat import tm\nfrom dask.dataframe import _compat\nfrom dask.dataframe.core import _concat\nfrom dask.dataframe.utils import (\n make_meta,\n assert_eq,\n is_categorical_dtype,\n clear_known_categories,\n)\n\n\n# Generate a list of categorical series and indices\ncat_series = []\nfor ordered in [True, False]:\n s = pd.Series(pd.Categorical(list(\"bacbac\"), ordered=ordered))\n ds = dd.from_pandas(s, npartitions=2)\n cat_series.append((s, ds))\ns = pd.Series(range(6), index=pd.Categorical(list(\"bacbac\")))\nds = dd.from_pandas(s, npartitions=2)\ncat_series.append((ds.compute().index, ds.index))\n\n\na = pd.DataFrame(\n {\n \"v\": list(\"abcde\"),\n \"w\": list(\"xxxxx\"),\n \"x\": np.arange(5),\n \"y\": list(\"abcbc\"),\n \"z\": np.arange(5, dtype=\"f8\"),\n }\n)\n\nb = pd.DataFrame(\n {\n \"v\": list(\"fghij\"),\n \"w\": list(\"yyyyy\"),\n \"x\": np.arange(5, 10),\n \"y\": list(\"abbba\"),\n \"z\": np.arange(5, 10, dtype=\"f8\"),\n }\n)\n\nc = pd.DataFrame(\n {\n \"v\": list(\"klmno\"),\n \"w\": list(\"zzzzz\"),\n \"x\": np.arange(10, 15),\n \"y\": list(\"bcbcc\"),\n \"z\": np.arange(10, 15, dtype=\"f8\"),\n }\n)\n\nframes = [a, b, c]\nframes2 = []\nfor df in frames:\n df.w = df.w.astype(\"category\")\n df.y = df.y.astype(\"category\")\n frames2.append(\n df.assign(\n w=df.w.cat.set_categories(list(\"xyz\")),\n y=df.y.cat.set_categories(list(\"abc\")),\n )\n )\nframes3 = [i.set_index(i.y) for i in frames]\nframes4 = [i.set_index(i.y) for i in frames2]\nframes5 = [i.set_index([i.y, i.x]) for i in frames]\nframes6 = [i.set_index([i.y, i.x]) for i in frames2]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_test_concat_unions_categoricals_test_concat_unions_categoricals.None_8": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_test_concat_unions_categoricals_test_concat_unions_categoricals.None_8", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_categorical.py", "file_name": "test_categorical.py", "file_type": "text/x-python", "category": "test", "start_line": 78, "end_line": 117, "span_ids": ["test_concat_unions_categoricals"], "tokens": 338}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_concat_unions_categoricals():\n # Categorical DataFrame, regular index\n tm.assert_frame_equal(_concat(frames), pd.concat(frames2))\n\n # Categorical Series, regular index\n tm.assert_series_equal(\n _concat([i.y for i in frames]), pd.concat([i.y for i in frames2])\n )\n\n # Categorical Index\n tm.assert_index_equal(\n _concat([i.index for i in frames3]), pd.concat([i for i in frames4]).index\n )\n\n # Categorical DataFrame, Categorical Index\n tm.assert_frame_equal(_concat(frames3), pd.concat(frames4))\n\n # Non-categorical DataFrame, Categorical Index\n tm.assert_frame_equal(\n _concat([i[[\"x\", \"z\"]] for i in frames3]),\n pd.concat([i[[\"x\", \"z\"]] for i in frames4]),\n )\n\n # Categorical Series, Categorical Index\n tm.assert_series_equal(\n _concat([i.z for i in frames3]), pd.concat([i.z for i in frames4])\n )\n\n # Non-categorical Series, Categorical Index\n tm.assert_series_equal(\n _concat([i.x for i in frames3]), pd.concat([i.x for i in frames4])\n )\n\n # MultiIndex with Categorical Index\n tm.assert_index_equal(\n _concat([i.index for i in frames5]), pd.concat([i for i in frames6]).index\n )\n\n # DataFrame, MultiIndex with CategoricalIndex\n tm.assert_frame_equal(_concat(frames5), pd.concat(frames6))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_test_unknown_categoricals_test_is_categorical_dtype.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_test_unknown_categoricals_test_is_categorical_dtype.None_3", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_categorical.py", "file_name": "test_categorical.py", "file_type": "text/x-python", "category": "test", "start_line": 120, "end_line": 149, "span_ids": ["test_is_categorical_dtype", "test_unknown_categoricals"], "tokens": 284}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_unknown_categoricals():\n ddf = dd.DataFrame(\n {(\"unknown\", i): df for (i, df) in enumerate(frames)},\n \"unknown\",\n make_meta(\n {\"v\": \"object\", \"w\": \"category\", \"x\": \"i8\", \"y\": \"category\", \"z\": \"f8\"}\n ),\n [None] * 4,\n )\n # Compute\n df = ddf.compute()\n\n assert_eq(ddf.w.value_counts(), df.w.value_counts())\n assert_eq(ddf.w.nunique(), df.w.nunique())\n\n assert_eq(ddf.groupby(ddf.w).sum(), df.groupby(df.w).sum())\n assert_eq(ddf.groupby(ddf.w).y.nunique(), df.groupby(df.w).y.nunique())\n assert_eq(ddf.y.groupby(ddf.w).count(), df.y.groupby(df.w).count())\n\n\ndef test_is_categorical_dtype():\n df = pd.DataFrame({\"cat\": pd.Categorical([1, 2, 3, 4]), \"x\": [1, 2, 3, 4]})\n\n assert is_categorical_dtype(df[\"cat\"])\n assert not is_categorical_dtype(df[\"x\"])\n\n ddf = dd.from_pandas(df, 2)\n\n assert is_categorical_dtype(ddf[\"cat\"])\n assert not is_categorical_dtype(ddf[\"x\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_test_categorize_test_categorize.None_2.ddf_categorize_split_ever": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_test_categorize_test_categorize.None_2.ddf_categorize_split_ever", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_categorical.py", "file_name": "test_categorical.py", "file_type": "text/x-python", "category": "test", "start_line": 152, "end_line": 212, "span_ids": ["test_categorize"], "tokens": 637}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_categorize():\n # rename y to y_ to avoid pandas future warning about ambiguous\n # levels\n meta = clear_known_categories(frames4[0]).rename(columns={\"y\": \"y_\"})\n ddf = dd.DataFrame(\n {(\"unknown\", i): df for (i, df) in enumerate(frames3)},\n \"unknown\",\n meta,\n [None] * 4,\n ).rename(columns={\"y\": \"y_\"})\n ddf = ddf.assign(w=ddf.w.cat.set_categories([\"x\", \"y\", \"z\"]))\n assert ddf.w.cat.known\n assert not ddf.y_.cat.known\n assert not ddf.index.cat.known\n df = ddf.compute()\n\n for index in [None, True, False]:\n known_index = index is not False\n # By default categorize object and unknown cat columns\n ddf2 = ddf.categorize(index=index)\n assert ddf2.y_.cat.known\n assert ddf2.v.cat.known\n assert ddf2.index.cat.known == known_index\n assert_eq(ddf2, df.astype({\"v\": \"category\"}), check_categorical=False)\n\n # Specifying split_every works\n ddf2 = ddf.categorize(index=index, split_every=2)\n assert ddf2.y_.cat.known\n assert ddf2.v.cat.known\n assert ddf2.index.cat.known == known_index\n assert_eq(ddf2, df.astype({\"v\": \"category\"}), check_categorical=False)\n\n # Specifying one column doesn't affect others\n ddf2 = ddf.categorize(\"v\", index=index)\n assert not ddf2.y_.cat.known\n assert ddf2.v.cat.known\n assert ddf2.index.cat.known == known_index\n assert_eq(ddf2, df.astype({\"v\": \"category\"}), check_categorical=False)\n\n ddf2 = ddf.categorize(\"y_\", index=index)\n assert ddf2.y_.cat.known\n assert ddf2.v.dtype == \"object\"\n assert ddf2.index.cat.known == known_index\n assert_eq(ddf2, df)\n\n ddf_known_index = ddf.categorize(columns=[], index=True)\n assert ddf_known_index.index.cat.known\n assert_eq(ddf_known_index, df)\n\n # Specifying known categorical or no columns is a no-op:\n assert ddf.categorize([\"w\"], index=False) is ddf\n assert ddf.categorize([], index=False) is ddf\n assert ddf_known_index.categorize([\"w\"]) is ddf_known_index\n assert ddf_known_index.categorize([]) is ddf_known_index\n\n # Bad split_every fails\n with pytest.raises(ValueError):\n ddf.categorize(split_every=1)\n\n with pytest.raises(ValueError):\n ddf.categorize(split_every=\"foo\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_test_categorical_dtype_test_categorical_dtype.None_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_test_categorical_dtype_test_categorical_dtype.None_5", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_categorical.py", "file_name": "test_categorical.py", "file_type": "text/x-python", "category": "test", "start_line": 215, "end_line": 230, "span_ids": ["test_categorical_dtype"], "tokens": 172}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_categorical_dtype():\n cat_dtype = dd.categorical.categorical_dtype(\n meta=a, categories=[\"a\", \"b\", \"c\"], ordered=False\n )\n assert_eq(cat_dtype.categories, pd.Index([\"a\", \"b\", \"c\"]))\n assert_eq(cat_dtype.ordered, False)\n\n cat_dtype = dd.categorical.categorical_dtype(meta=a, categories=[\"a\", \"b\", \"c\"])\n assert_eq(cat_dtype.categories, pd.Index([\"a\", \"b\", \"c\"]))\n assert_eq(cat_dtype.ordered, False)\n\n cat_dtype = dd.categorical.categorical_dtype(\n meta=a, categories=[1, 100, 200], ordered=True\n )\n assert_eq(cat_dtype.categories, pd.Index([1, 100, 200]))\n assert_eq(cat_dtype.ordered, True)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_test_categorize_index_test_categorize_index.assert_ddf_categorize_i": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_test_categorize_index_test_categorize_index.assert_ddf_categorize_i", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_categorical.py", "file_name": "test_categorical.py", "file_type": "text/x-python", "category": "test", "start_line": 233, "end_line": 262, "span_ids": ["test_categorize_index"], "tokens": 209}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_categorize_index():\n # Object dtype\n ddf = dd.from_pandas(_compat.makeDataFrame(), npartitions=5)\n df = ddf.compute()\n\n ddf2 = ddf.categorize()\n assert ddf2.index.cat.known\n assert_eq(\n ddf2,\n df.set_index(pd.CategoricalIndex(df.index)),\n check_divisions=False,\n check_categorical=False,\n )\n\n assert ddf.categorize(index=False) is ddf\n\n # Non-object dtype\n ddf = dd.from_pandas(df.set_index(df.A.rename(\"idx\")), npartitions=5)\n df = ddf.compute()\n\n ddf2 = ddf.categorize(index=True)\n assert ddf2.index.cat.known\n assert_eq(\n ddf2,\n df.set_index(pd.CategoricalIndex(df.index)),\n check_divisions=False,\n check_categorical=False,\n )\n\n assert ddf.categorize() is ddf", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_test_categorical_set_index_test_categorical_set_index.with_dask_config_set_sche.None_8": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_test_categorical_set_index_test_categorical_set_index.with_dask_config_set_sche.None_8", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_categorical.py", "file_name": "test_categorical.py", "file_type": "text/x-python", "category": "test", "start_line": 265, "end_line": 285, "span_ids": ["test_categorical_set_index"], "tokens": 323}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"shuffle\", [\"disk\", \"tasks\"])\ndef test_categorical_set_index(shuffle):\n df = pd.DataFrame({\"x\": [1, 2, 3, 4], \"y\": [\"a\", \"b\", \"b\", \"c\"]})\n df[\"y\"] = pd.Categorical(df[\"y\"], categories=[\"a\", \"b\", \"c\"], ordered=True)\n a = dd.from_pandas(df, npartitions=2)\n\n with dask.config.set(scheduler=\"sync\", shuffle=shuffle):\n b = a.set_index(\"y\", npartitions=a.npartitions)\n d1, d2 = b.get_partition(0), b.get_partition(1)\n assert list(d1.index.compute()) == [\"a\"]\n assert list(sorted(d2.index.compute())) == [\"b\", \"b\", \"c\"]\n\n b = a.set_index(a.y, npartitions=a.npartitions)\n d1, d2 = b.get_partition(0), b.get_partition(1)\n assert list(d1.index.compute()) == [\"a\"]\n assert list(sorted(d2.index.compute())) == [\"b\", \"b\", \"c\"]\n\n b = a.set_index(\"y\", divisions=[\"a\", \"b\", \"c\"], npartitions=a.npartitions)\n d1, d2 = b.get_partition(0), b.get_partition(1)\n assert list(d1.index.compute()) == [\"a\"]\n assert list(sorted(d2.index.compute())) == [\"b\", \"b\", \"c\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_test_categorical_set_index_npartitions_vs_ncategories_test_categorical_set_index_npartitions_vs_ncategories._Test_passes_if_this_wor": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_test_categorical_set_index_npartitions_vs_ncategories_test_categorical_set_index_npartitions_vs_ncategories._Test_passes_if_this_wor", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_categorical.py", "file_name": "test_categorical.py", "file_type": "text/x-python", "category": "test", "start_line": 288, "end_line": 302, "span_ids": ["test_categorical_set_index_npartitions_vs_ncategories"], "tokens": 190}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"ncategories\", [1, 3, 6])\n@pytest.mark.parametrize(\"npartitions\", [1, 3, 6])\ndef test_categorical_set_index_npartitions_vs_ncategories(npartitions, ncategories):\n \"\"\"https://github.com/dask/dask/issues/5343\"\"\"\n rows_per_category = 10\n n_rows = ncategories * rows_per_category\n\n categories = [\"CAT\" + str(i) for i in range(ncategories)]\n pdf = pd.DataFrame(\n {\"id\": categories * rows_per_category, \"value\": np.random.random(n_rows)}\n )\n ddf = dd.from_pandas(pdf, npartitions=npartitions)\n ddf[\"id\"] = ddf[\"id\"].astype(\"category\").cat.as_ordered()\n ddf = ddf.set_index(\"id\")\n # Test passes if this worked and didn't raise any warnings", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_test_repartition_on_categoricals_test_repartition_on_categoricals.assert_eq_df_ddf2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_test_repartition_on_categoricals_test_repartition_on_categoricals.assert_eq_df_ddf2_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_categorical.py", "file_name": "test_categorical.py", "file_type": "text/x-python", "category": "test", "start_line": 305, "end_line": 315, "span_ids": ["test_repartition_on_categoricals"], "tokens": 134}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"npartitions\", [1, 4])\ndef test_repartition_on_categoricals(npartitions):\n df = pd.DataFrame({\"x\": range(10), \"y\": list(\"abababcbcb\")})\n ddf = dd.from_pandas(df, npartitions=2)\n ddf[\"y\"] = ddf[\"y\"].astype(\"category\")\n ddf2 = ddf.repartition(npartitions=npartitions)\n\n df = df.copy()\n df[\"y\"] = df[\"y\"].astype(\"category\")\n assert_eq(df, ddf)\n assert_eq(df, ddf2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_test_categorical_accessor_presence_test_categorical_accessor_presence.assert_not_hasattr_ddf_in": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_test_categorical_accessor_presence_test_categorical_accessor_presence.assert_not_hasattr_ddf_in", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_categorical.py", "file_name": "test_categorical.py", "file_type": "text/x-python", "category": "test", "start_line": 318, "end_line": 331, "span_ids": ["test_categorical_accessor_presence"], "tokens": 166}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_categorical_accessor_presence():\n df = pd.DataFrame({\"x\": list(\"a\" * 5 + \"b\" * 5 + \"c\" * 5), \"y\": range(15)})\n df.x = df.x.astype(\"category\")\n ddf = dd.from_pandas(df, npartitions=2)\n\n assert \"cat\" in dir(ddf.x)\n assert \"cat\" not in dir(ddf.y)\n assert hasattr(ddf.x, \"cat\")\n assert not hasattr(ddf.y, \"cat\")\n\n df2 = df.set_index(df.x)\n ddf2 = dd.from_pandas(df2, npartitions=2, sort=False)\n assert hasattr(ddf2.index, \"categories\")\n assert not hasattr(ddf.index, \"categories\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_test_categorize_nan_test_return_type_known_categories.assert_isinstance_ret_typ": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_test_categorize_nan_test_return_type_known_categories.assert_isinstance_ret_typ", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_categorical.py", "file_name": "test_categorical.py", "file_type": "text/x-python", "category": "test", "start_line": 334, "end_line": 361, "span_ids": ["assert_array_index_eq", "get_cat", "test_return_type_known_categories", "test_categorize_nan"], "tokens": 229}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_categorize_nan():\n df = dd.from_pandas(\n pd.DataFrame({\"A\": [\"a\", \"b\", \"a\", float(\"nan\")]}), npartitions=2\n )\n with pytest.warns(None) as record:\n df.categorize().compute()\n assert len(record) == 0\n\n\ndef get_cat(x):\n return x if isinstance(x, pd.CategoricalIndex) else x.cat\n\n\ndef assert_array_index_eq(left, right, check_divisions=False):\n \"\"\"left and right are equal, treating index and array as equivalent\"\"\"\n assert_eq(\n left,\n pd.Index(right) if isinstance(right, np.ndarray) else right,\n check_divisions=check_divisions,\n )\n\n\ndef test_return_type_known_categories():\n df = pd.DataFrame({\"A\": [\"a\", \"b\", \"c\"]})\n df[\"A\"] = df[\"A\"].astype(\"category\")\n dask_df = dd.from_pandas(df, 2)\n ret_type = dask_df.A.cat.as_known()\n assert isinstance(ret_type, dd.core.Series)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_TestCategoricalAccessor_TestCategoricalAccessor.test_callable.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_TestCategoricalAccessor_TestCategoricalAccessor.test_callable.None_2", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_categorical.py", "file_name": "test_categorical.py", "file_type": "text/x-python", "category": "test", "start_line": 364, "end_line": 412, "span_ids": ["TestCategoricalAccessor", "TestCategoricalAccessor.test_properties", "TestCategoricalAccessor.test_callable"], "tokens": 367}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestCategoricalAccessor:\n @pytest.mark.parametrize(\"series\", cat_series)\n @pytest.mark.parametrize(\n \"prop, compare\",\n [\n (\"categories\", assert_array_index_eq),\n (\"ordered\", assert_eq),\n (\"codes\", assert_array_index_eq),\n ],\n )\n def test_properties(self, series, prop, compare):\n s, ds = series\n expected = getattr(get_cat(s), prop)\n result = getattr(get_cat(ds), prop)\n compare(result, expected, check_divisions=False)\n\n @pytest.mark.parametrize(\"series\", cat_series)\n @pytest.mark.parametrize(\n \"method, kwargs\",\n [\n (\"add_categories\", dict(new_categories=[\"d\", \"e\"])),\n (\"as_ordered\", {}),\n (\"as_unordered\", {}),\n (\"as_ordered\", {}),\n (\"remove_categories\", dict(removals=[\"a\"])),\n (\"rename_categories\", dict(new_categories=[\"d\", \"e\", \"f\"])),\n (\"reorder_categories\", dict(new_categories=[\"a\", \"b\", \"c\"])),\n (\"set_categories\", dict(new_categories=[\"a\", \"e\", \"b\"])),\n (\"remove_unused_categories\", {}),\n ],\n )\n def test_callable(self, series, method, kwargs):\n op = operator.methodcaller(method, **kwargs)\n\n # Series\n s, ds = series\n expected = op(get_cat(s))\n result = op(get_cat(ds))\n assert_eq(result, expected, check_divisions=False)\n assert_eq(\n get_cat(result._meta).categories,\n get_cat(expected).categories,\n check_divisions=False,\n )\n assert_eq(\n get_cat(result._meta).ordered,\n get_cat(expected).ordered,\n check_divisions=False,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_TestCategoricalAccessor.test_categorical_empty_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_TestCategoricalAccessor.test_categorical_empty_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_categorical.py", "file_name": "test_categorical.py", "file_type": "text/x-python", "category": "test", "start_line": 414, "end_line": 462, "span_ids": ["TestCategoricalAccessor.test_categorical_empty", "TestCategoricalAccessor.test_categorical_non_string_raises", "TestCategoricalAccessor.test_unknown_categories", "TestCategoricalAccessor.test_categorical_string_ops"], "tokens": 395}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestCategoricalAccessor:\n\n def test_categorical_empty(self):\n # GH 1705\n\n def make_empty():\n return pd.DataFrame({\"A\": pd.Categorical([np.nan, np.nan])})\n\n def make_full():\n return pd.DataFrame({\"A\": pd.Categorical([\"a\", \"a\"])})\n\n a = dd.from_delayed([dask.delayed(make_empty)(), dask.delayed(make_full)()])\n # Used to raise an IndexError\n a.A.cat.categories\n\n @pytest.mark.parametrize(\"series\", cat_series)\n def test_unknown_categories(self, series):\n a, da = series\n assert da.cat.known\n da = da.cat.as_unknown()\n assert not da.cat.known\n\n with pytest.raises(NotImplementedError):\n da.cat.categories\n with pytest.raises(NotImplementedError):\n da.cat.codes\n\n db = da.cat.set_categories([\"a\", \"b\", \"c\"])\n assert db.cat.known\n tm.assert_index_equal(db.cat.categories, get_cat(a).categories)\n assert_array_index_eq(db.cat.codes, get_cat(a).codes)\n\n db = da.cat.as_known()\n assert db.cat.known\n res = db.compute()\n tm.assert_index_equal(db.cat.categories, get_cat(res).categories)\n assert_array_index_eq(db.cat.codes, get_cat(res).codes)\n\n def test_categorical_string_ops(self):\n a = pd.Series([\"a\", \"a\", \"b\"], dtype=\"category\")\n da = dd.from_pandas(a, 2)\n result = da.str.upper()\n expected = a.str.upper()\n assert_eq(result, expected)\n\n def test_categorical_non_string_raises(self):\n a = pd.Series([1, 2, 3], dtype=\"category\")\n da = dd.from_pandas(a, 2)\n with pytest.raises(AttributeError):\n da.str.upper()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_warnings_test_dataframe_doc.assert_disclaimer_in_doc": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_warnings_test_dataframe_doc.assert_disclaimer_in_doc", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 48, "span_ids": ["imports", "test_dataframe_doc"], "tokens": 454}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import warnings\nfrom itertools import product\nfrom operator import add\n\nimport pytest\nimport numpy as np\nimport pandas as pd\nfrom pandas.io.formats import format as pandas_format\n\nimport dask\nimport dask.array as da\nfrom dask.array.numpy_compat import _numpy_118, _numpy_120\nimport dask.dataframe as dd\nfrom dask.blockwise import fuse_roots\nfrom dask.dataframe import _compat\nfrom dask.dataframe._compat import tm, PANDAS_GT_100, PANDAS_GT_110\nfrom dask.base import compute_as_if_collection\nfrom dask.utils import put_lines, M\n\nfrom dask.dataframe.core import (\n repartition_divisions,\n aca,\n _concat,\n Scalar,\n has_parallel_type,\n total_mem_usage,\n is_broadcastable,\n)\nfrom dask.dataframe import methods\nfrom dask.dataframe.utils import assert_eq, make_meta, assert_max_deps, PANDAS_VERSION\n\ndsk = {\n (\"x\", 0): pd.DataFrame({\"a\": [1, 2, 3], \"b\": [4, 5, 6]}, index=[0, 1, 3]),\n (\"x\", 1): pd.DataFrame({\"a\": [4, 5, 6], \"b\": [3, 2, 1]}, index=[5, 6, 8]),\n (\"x\", 2): pd.DataFrame({\"a\": [7, 8, 9], \"b\": [0, 0, 0]}, index=[9, 9, 9]),\n}\nmeta = make_meta({\"a\": \"i8\", \"b\": \"i8\"}, index=pd.Index([], \"i8\"))\nd = dd.DataFrame(dsk, \"x\", meta, [0, 5, 9, 9])\nfull = d.compute()\nCHECK_FREQ = {}\nif dd._compat.PANDAS_GT_110:\n CHECK_FREQ[\"check_freq\"] = False\n\n\ndef test_dataframe_doc():\n doc = d.add.__doc__\n disclaimer = \"Some inconsistencies with the Dask version may exist.\"\n assert disclaimer in doc", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_dataframe_doc_from_non_pandas_test_dataframe_doc_from_non_pandas.try_.finally_.del_dd_DataFrame_foo": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_dataframe_doc_from_non_pandas_test_dataframe_doc_from_non_pandas.try_.finally_.del_dd_DataFrame_foo", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 51, "end_line": 69, "span_ids": ["test_dataframe_doc_from_non_pandas"], "tokens": 127}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_dataframe_doc_from_non_pandas():\n class Foo:\n def foo(self):\n \"\"\"This is a new docstring that I just made up\n\n Parameters:\n ----------\n None\n \"\"\"\n\n d._bind_operator_method(\"foo\", Foo.foo, original=Foo)\n try:\n doc = d.foo.__doc__\n disclaimer = \"Some inconsistencies with the Dask version may exist.\"\n assert disclaimer in doc\n assert \"new docstring that I just made up\" in doc\n finally:\n # make sure to clean up this alteration of the dd.DataFrame class\n del dd.DataFrame.foo", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_Dataframe_test_Dataframe.assert_repr_d_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_Dataframe_test_Dataframe.assert_repr_d_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 72, "end_line": 90, "span_ids": ["test_Dataframe"], "tokens": 220}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_Dataframe():\n expected = pd.Series(\n [2, 3, 4, 5, 6, 7, 8, 9, 10], index=[0, 1, 3, 5, 6, 8, 9, 9, 9], name=\"a\"\n )\n\n assert_eq(d[\"a\"] + 1, expected)\n\n tm.assert_index_equal(d.columns, pd.Index([\"a\", \"b\"]))\n\n assert_eq(d[d[\"b\"] > 2], full[full[\"b\"] > 2])\n assert_eq(d[[\"a\", \"b\"]], full[[\"a\", \"b\"]])\n assert_eq(d.a, full.a)\n assert d.b.mean().compute() == full.b.mean()\n assert np.allclose(d.b.var().compute(), full.b.var())\n assert np.allclose(d.b.std().compute(), full.b.std())\n\n assert d.index._name == d.index._name # this is deterministic\n\n assert repr(d)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_head_tail_test_head_tail.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_head_tail_test_head_tail.None_3", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 93, "end_line": 118, "span_ids": ["test_head_tail"], "tokens": 353}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_head_tail():\n assert_eq(d.head(2), full.head(2))\n assert_eq(d.head(3), full.head(3))\n assert_eq(d.head(2), dsk[(\"x\", 0)].head(2))\n assert_eq(d[\"a\"].head(2), full[\"a\"].head(2))\n assert_eq(d[\"a\"].head(3), full[\"a\"].head(3))\n assert_eq(d[\"a\"].head(2), dsk[(\"x\", 0)][\"a\"].head(2))\n assert sorted(d.head(2, compute=False).dask) == sorted(\n d.head(2, compute=False).dask\n )\n assert sorted(d.head(2, compute=False).dask) != sorted(\n d.head(3, compute=False).dask\n )\n\n assert_eq(d.tail(2), full.tail(2))\n assert_eq(d.tail(3), full.tail(3))\n assert_eq(d.tail(2), dsk[(\"x\", 2)].tail(2))\n assert_eq(d[\"a\"].tail(2), full[\"a\"].tail(2))\n assert_eq(d[\"a\"].tail(3), full[\"a\"].tail(3))\n assert_eq(d[\"a\"].tail(2), dsk[(\"x\", 2)][\"a\"].tail(2))\n assert sorted(d.tail(2, compute=False).dask) == sorted(\n d.tail(2, compute=False).dask\n )\n assert sorted(d.tail(2, compute=False).dask) != sorted(\n d.tail(3, compute=False).dask\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_head_npartitions_test_head_npartitions.with_pytest_raises_ValueE.d_head_2_npartitions_5_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_head_npartitions_test_head_npartitions.with_pytest_raises_ValueE.d_head_2_npartitions_5_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 121, "end_line": 129, "span_ids": ["test_head_npartitions"], "tokens": 136}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.filterwarnings(\"ignore:Insufficient:UserWarning\")\ndef test_head_npartitions():\n assert_eq(d.head(5, npartitions=2), full.head(5))\n assert_eq(d.head(5, npartitions=2, compute=False), full.head(5))\n assert_eq(d.head(5, npartitions=-1), full.head(5))\n assert_eq(d.head(7, npartitions=-1), full.head(7))\n assert_eq(d.head(2, npartitions=-1), full.head(2))\n with pytest.raises(ValueError):\n d.head(2, npartitions=5)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_head_npartitions_warn_test_Index.for_case_in_.pytest_raises_AttributeEr": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_head_npartitions_warn_test_Index.for_case_in_.pytest_raises_AttributeEr", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 132, "end_line": 168, "span_ids": ["test_head_npartitions_warn", "test_Index", "test_index_head", "test_Series"], "tokens": 260}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_head_npartitions_warn():\n match = \"5 elements requested, only 3 elements\"\n with pytest.warns(UserWarning, match=match):\n d.head(5)\n\n with pytest.warns(None):\n d.head(100)\n\n with pytest.warns(None):\n d.head(7)\n\n with pytest.warns(None):\n d.head(7, npartitions=2)\n\n\ndef test_index_head():\n assert_eq(d.index.head(2), full.index[:2])\n assert_eq(d.index.head(3), full.index[:3])\n\n\ndef test_Series():\n assert isinstance(d.a, dd.Series)\n assert isinstance(d.a + 1, dd.Series)\n assert_eq((d + 1), full + 1)\n\n\ndef test_Index():\n for case in [\n pd.DataFrame(np.random.randn(10, 5), index=list(\"abcdefghij\")),\n pd.DataFrame(\n np.random.randn(10, 5),\n index=pd.date_range(\"2011-01-01\", freq=\"D\", periods=10),\n ),\n ]:\n ddf = dd.from_pandas(case, 3)\n assert_eq(ddf.index, case.index)\n pytest.raises(AttributeError, lambda: ddf.index.index)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_Scalar_test_scalar_raises.with_pytest_raises_TypeEr.bool_s_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_Scalar_test_scalar_raises.with_pytest_raises_TypeEr.bool_s_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 171, "end_line": 192, "span_ids": ["test_Scalar", "test_scalar_raises"], "tokens": 208}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_Scalar():\n val = np.int64(1)\n s = Scalar({(\"a\", 0): val}, \"a\", \"i8\")\n assert hasattr(s, \"dtype\")\n assert \"dtype\" in dir(s)\n assert_eq(s, val)\n assert repr(s) == \"dd.Scalar\"\n\n val = pd.Timestamp(\"2001-01-01\")\n s = Scalar({(\"a\", 0): val}, \"a\", val)\n assert not hasattr(s, \"dtype\")\n assert \"dtype\" not in dir(s)\n assert_eq(s, val)\n assert repr(s) == \"dd.Scalar\"\n\n\ndef test_scalar_raises():\n val = np.int64(1)\n s = Scalar({(\"a\", 0): val}, \"a\", \"i8\")\n msg = \"cannot be converted to a boolean value\"\n with pytest.raises(TypeError, match=msg):\n bool(s)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_attributes_test_attributes.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_attributes_test_attributes.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 195, "end_line": 207, "span_ids": ["test_attributes"], "tokens": 158}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_attributes():\n assert \"a\" in dir(d)\n assert \"foo\" not in dir(d)\n pytest.raises(AttributeError, lambda: d.foo)\n\n df = dd.from_pandas(pd.DataFrame({\"a b c\": [1, 2, 3]}), npartitions=2)\n assert \"a b c\" not in dir(df)\n df = dd.from_pandas(pd.DataFrame({\"a\": [1, 2], 5: [1, 2]}), npartitions=2)\n assert \"a\" in dir(df)\n assert 5 not in dir(df)\n\n df = dd.from_pandas(_compat.makeTimeDataFrame(), npartitions=3)\n pytest.raises(AttributeError, lambda: df.foo)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_column_names_test_index_names.assert_ddf_index_compute_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_column_names_test_index_names.assert_ddf_index_compute_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 210, "end_line": 225, "span_ids": ["test_index_names", "test_column_names"], "tokens": 191}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_column_names():\n tm.assert_index_equal(d.columns, pd.Index([\"a\", \"b\"]))\n tm.assert_index_equal(d[[\"b\", \"a\"]].columns, pd.Index([\"b\", \"a\"]))\n assert d[\"a\"].name == \"a\"\n assert (d[\"a\"] + 1).name == \"a\"\n assert (d[\"a\"] + d[\"b\"]).name is None\n\n\ndef test_index_names():\n assert d.index.name is None\n\n idx = pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], name=\"x\")\n df = pd.DataFrame(np.random.randn(10, 5), idx)\n ddf = dd.from_pandas(df, 3)\n assert ddf.index.name == \"x\"\n assert ddf.index.compute().name == \"x\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_timezone_freq_test_timezone_freq.assert_pdf_tz_0_freq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_timezone_freq_test_timezone_freq.assert_pdf_tz_0_freq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 228, "end_line": 246, "span_ids": ["test_timezone_freq"], "tokens": 175}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"npartitions\",\n [\n 1,\n pytest.param(\n 2,\n marks=pytest.mark.xfail(\n not dd._compat.PANDAS_GT_110, reason=\"Fixed upstream.\"\n ),\n ),\n ],\n)\ndef test_timezone_freq(npartitions):\n s_naive = pd.Series(pd.date_range(\"20130101\", periods=10))\n s_aware = pd.Series(pd.date_range(\"20130101\", periods=10, tz=\"US/Eastern\"))\n pdf = pd.DataFrame({\"tz\": s_aware, \"notz\": s_naive})\n ddf = dd.from_pandas(pdf, npartitions=npartitions)\n\n assert pdf.tz[0].freq == ddf.compute().tz[0].freq == ddf.tz.compute()[0].freq", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_rename_columns_test_rename_columns.None_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_rename_columns_test_rename_columns.None_5", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 249, "end_line": 273, "span_ids": ["test_rename_columns"], "tokens": 327}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_rename_columns():\n # GH 819\n df = pd.DataFrame({\"a\": [1, 2, 3, 4, 5, 6, 7], \"b\": [7, 6, 5, 4, 3, 2, 1]})\n ddf = dd.from_pandas(df, 2)\n\n ddf.columns = [\"x\", \"y\"]\n df.columns = [\"x\", \"y\"]\n tm.assert_index_equal(ddf.columns, pd.Index([\"x\", \"y\"]))\n tm.assert_index_equal(ddf._meta.columns, pd.Index([\"x\", \"y\"]))\n assert_eq(ddf, df)\n\n msg = r\"Length mismatch: Expected axis has 2 elements, new values have 4 elements\"\n with pytest.raises(ValueError) as err:\n ddf.columns = [1, 2, 3, 4]\n assert msg in str(err.value)\n\n # Multi-index columns\n df = pd.DataFrame({(\"A\", \"0\"): [1, 2, 2, 3], (\"B\", 1): [1, 2, 3, 4]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n df.columns = [\"x\", \"y\"]\n ddf.columns = [\"x\", \"y\"]\n tm.assert_index_equal(ddf.columns, pd.Index([\"x\", \"y\"]))\n tm.assert_index_equal(ddf._meta.columns, pd.Index([\"x\", \"y\"]))\n assert_eq(ddf, df)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_rename_series_test_rename_series.with_warnings_catch_warni.assert_eq_dind_ind_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_rename_series_test_rename_series.with_warnings_catch_warni.assert_eq_dind_ind_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 276, "end_line": 296, "span_ids": ["test_rename_series"], "tokens": 184}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_rename_series():\n # GH 819\n s = pd.Series([1, 2, 3, 4, 5, 6, 7], name=\"x\")\n ds = dd.from_pandas(s, 2)\n\n s.name = \"renamed\"\n ds.name = \"renamed\"\n assert s.name == \"renamed\"\n assert_eq(ds, s)\n\n ind = s.index\n dind = ds.index\n ind.name = \"renamed\"\n dind.name = \"renamed\"\n assert ind.name == \"renamed\"\n with warnings.catch_warnings():\n if _numpy_118:\n # Catch DeprecationWarning from numpy from rewrite_blockwise\n # where we attempt to do `'str' in ndarray`.\n warnings.simplefilter(\"ignore\", DeprecationWarning)\n assert_eq(dind, ind)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_rename_series_method_test_rename_series_method.assert_eq_ds_s_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_rename_series_method_test_rename_series_method.assert_eq_ds_s_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 299, "end_line": 311, "span_ids": ["test_rename_series_method"], "tokens": 121}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_rename_series_method():\n # Series name\n s = pd.Series([1, 2, 3, 4, 5, 6, 7], name=\"x\")\n ds = dd.from_pandas(s, 2)\n\n assert_eq(ds.rename(\"y\"), s.rename(\"y\"))\n assert ds.name == \"x\" # no mutation\n assert_eq(ds.rename(), s.rename())\n\n ds.rename(\"z\", inplace=True)\n s.rename(\"z\", inplace=True)\n assert ds.name == \"z\"\n assert_eq(ds, s)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_rename_series_method_2_test_rename_series_method_2.assert_eq_ds_s_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_rename_series_method_2_test_rename_series_method_2.assert_eq_ds_s_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 314, "end_line": 344, "span_ids": ["test_rename_series_method_2"], "tokens": 302}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_rename_series_method_2():\n # Series index\n s = pd.Series([\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\"], name=\"x\")\n ds = dd.from_pandas(s, 2)\n\n for is_sorted in [True, False]:\n res = ds.rename(lambda x: x ** 2, sorted_index=is_sorted)\n assert_eq(res, s.rename(lambda x: x ** 2))\n assert res.known_divisions == is_sorted\n\n res = ds.rename(s, sorted_index=is_sorted)\n assert_eq(res, s.rename(s))\n assert res.known_divisions == is_sorted\n\n with pytest.raises(ValueError):\n ds.rename(lambda x: -x, sorted_index=True)\n assert_eq(ds.rename(lambda x: -x), s.rename(lambda x: -x))\n\n res = ds.rename(ds)\n assert_eq(res, s.rename(s))\n assert not res.known_divisions\n\n ds2 = ds.clear_divisions()\n res = ds2.rename(lambda x: x ** 2, sorted_index=True)\n assert_eq(res, s.rename(lambda x: x ** 2))\n assert not res.known_divisions\n\n res = ds.rename(lambda x: x ** 2, inplace=True, sorted_index=True)\n assert res is ds\n s.rename(lambda x: x ** 2, inplace=True)\n assert_eq(ds, s)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_describe_numeric_test_describe_numeric.None_6": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_describe_numeric_test_describe_numeric.None_6", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 347, "end_line": 387, "span_ids": ["test_describe_numeric"], "tokens": 404}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"method,test_values\", [(\"tdigest\", (6, 10)), (\"dask\", (4, 20))]\n)\ndef test_describe_numeric(method, test_values):\n if method == \"tdigest\":\n pytest.importorskip(\"crick\")\n # prepare test case which approx quantiles will be the same as actuals\n s = pd.Series(list(range(test_values[1])) * test_values[0])\n df = pd.DataFrame(\n {\n \"a\": list(range(test_values[1])) * test_values[0],\n \"b\": list(range(test_values[0])) * test_values[1],\n }\n )\n\n ds = dd.from_pandas(s, test_values[0])\n ddf = dd.from_pandas(df, test_values[0])\n\n test_quantiles = [0.25, 0.75]\n\n assert_eq(df.describe(), ddf.describe(percentiles_method=method))\n assert_eq(s.describe(), ds.describe(percentiles_method=method))\n\n assert_eq(\n df.describe(percentiles=test_quantiles),\n ddf.describe(percentiles=test_quantiles, percentiles_method=method),\n )\n assert_eq(s.describe(), ds.describe(split_every=2, percentiles_method=method))\n assert_eq(df.describe(), ddf.describe(split_every=2, percentiles_method=method))\n\n # remove string columns\n df = pd.DataFrame(\n {\n \"a\": list(range(test_values[1])) * test_values[0],\n \"b\": list(range(test_values[0])) * test_values[1],\n \"c\": list(\"abcdef\"[: test_values[0]]) * test_values[1],\n }\n )\n ddf = dd.from_pandas(df, test_values[0])\n assert_eq(df.describe(), ddf.describe(percentiles_method=method))\n assert_eq(df.describe(), ddf.describe(split_every=2, percentiles_method=method))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_describe_test_describe._Act": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_describe_test_describe._Act", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 390, "end_line": 480, "span_ids": ["test_describe"], "tokens": 745}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.xfail(\n PANDAS_VERSION == \"0.24.2\",\n reason=\"Known bug in Pandas. See https://github.com/pandas-dev/pandas/issues/24011.\",\n)\n@pytest.mark.parametrize(\n \"include,exclude,percentiles,subset\",\n [\n (None, None, None, [\"c\", \"d\"]), # numeric\n (None, None, None, [\"c\", \"d\", \"f\"]), # numeric + timedelta\n (None, None, None, [\"c\", \"d\", \"g\"]), # numeric + bool\n (None, None, None, [\"c\", \"d\", \"f\", \"g\"]), # numeric + bool + timedelta\n (None, None, None, [\"f\", \"g\"]), # bool + timedelta\n pytest.param(\n \"all\",\n None,\n None,\n None,\n marks=pytest.mark.xfail(PANDAS_GT_110, reason=\"upstream changes\"),\n ),\n pytest.param(\n [\"number\"],\n None,\n [0.25, 0.5],\n None,\n marks=pytest.mark.xfail(PANDAS_GT_110, reason=\"upstream changes\"),\n ),\n pytest.param(\n [np.timedelta64],\n None,\n None,\n None,\n marks=pytest.mark.xfail(PANDAS_GT_110, reason=\"upstream changes\"),\n ),\n pytest.param(\n [\"number\", \"object\"],\n None,\n [0.25, 0.75],\n None,\n marks=pytest.mark.xfail(PANDAS_GT_110, reason=\"upstream changes\"),\n ),\n pytest.param(\n None,\n [\"number\", \"object\"],\n None,\n None,\n marks=pytest.mark.xfail(PANDAS_GT_110, reason=\"upstream changes\"),\n ),\n pytest.param(\n [\"object\", \"datetime\", \"bool\"],\n None,\n None,\n None,\n marks=pytest.mark.xfail(PANDAS_GT_110, reason=\"upstream changes\"),\n ),\n ],\n)\ndef test_describe(include, exclude, percentiles, subset):\n data = {\n \"a\": [\"aaa\", \"bbb\", \"bbb\", None, None, \"zzz\"] * 2,\n \"c\": [None, 0, 1, 2, 3, 4] * 2,\n \"d\": [None, 0, 1] * 4,\n \"e\": [\n pd.Timestamp(\"2017-05-09 00:00:00.006000\"),\n pd.Timestamp(\"2017-05-09 00:00:00.006000\"),\n pd.Timestamp(\"2017-05-09 07:56:23.858694\"),\n pd.Timestamp(\"2017-05-09 05:59:58.938999\"),\n None,\n None,\n ]\n * 2,\n \"f\": [\n np.timedelta64(3, \"D\"),\n np.timedelta64(1, \"D\"),\n None,\n None,\n np.timedelta64(3, \"D\"),\n np.timedelta64(1, \"D\"),\n ]\n * 2,\n \"g\": [True, False, True] * 4,\n }\n\n # Arrange\n df = pd.DataFrame(data)\n\n if subset is not None:\n df = df.loc[:, subset]\n\n ddf = dd.from_pandas(df, 2)\n\n # Act\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_describe.desc_ddf_test_describe.if_subset_is_None_.for_col_in_a_c_e_.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_describe.desc_ddf_test_describe.if_subset_is_None_.for_col_in_a_c_e_.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 481, "end_line": 493, "span_ids": ["test_describe"], "tokens": 574}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.xfail(\n PANDAS_VERSION == \"0.24.2\",\n reason=\"Known bug in Pandas. See https://github.com/pandas-dev/pandas/issues/24011.\",\n)\n@pytest.mark.parametrize(\n \"include,exclude,percentiles,subset\",\n [\n (None, None, None, [\"c\", \"d\"]), # numeric\n (None, None, None, [\"c\", \"d\", \"f\"]), # numeric + timedelta\n (None, None, None, [\"c\", \"d\", \"g\"]), # numeric + bool\n (None, None, None, [\"c\", \"d\", \"f\", \"g\"]), # numeric + bool + timedelta\n (None, None, None, [\"f\", \"g\"]), # bool + timedelta\n pytest.param(\n \"all\",\n None,\n None,\n None,\n marks=pytest.mark.xfail(PANDAS_GT_110, reason=\"upstream changes\"),\n ),\n pytest.param(\n [\"number\"],\n None,\n [0.25, 0.5],\n None,\n marks=pytest.mark.xfail(PANDAS_GT_110, reason=\"upstream changes\"),\n ),\n pytest.param(\n [np.timedelta64],\n None,\n None,\n None,\n marks=pytest.mark.xfail(PANDAS_GT_110, reason=\"upstream changes\"),\n ),\n pytest.param(\n [\"number\", \"object\"],\n None,\n [0.25, 0.75],\n None,\n marks=pytest.mark.xfail(PANDAS_GT_110, reason=\"upstream changes\"),\n ),\n pytest.param(\n None,\n [\"number\", \"object\"],\n None,\n None,\n marks=pytest.mark.xfail(PANDAS_GT_110, reason=\"upstream changes\"),\n ),\n pytest.param(\n [\"object\", \"datetime\", \"bool\"],\n None,\n None,\n None,\n marks=pytest.mark.xfail(PANDAS_GT_110, reason=\"upstream changes\"),\n ),\n ],\n)\ndef test_describe(include, exclude, percentiles, subset):\n # ... other code\n desc_ddf = ddf.describe(include=include, exclude=exclude, percentiles=percentiles)\n desc_df = df.describe(include=include, exclude=exclude, percentiles=percentiles)\n\n # Assert\n assert_eq(desc_ddf, desc_df)\n\n # Check series\n if subset is None:\n for col in [\"a\", \"c\", \"e\", \"g\"]:\n assert_eq(\n df[col].describe(include=include, exclude=exclude),\n ddf[col].describe(include=include, exclude=exclude),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_describe_empty_test_describe_empty.None_2.ddf_nocols_describe_perce": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_describe_empty_test_describe_empty.None_2.ddf_nocols_describe_perce", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 496, "end_line": 516, "span_ids": ["test_describe_empty"], "tokens": 206}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_describe_empty():\n df_none = pd.DataFrame({\"A\": [None, None]})\n ddf_none = dd.from_pandas(df_none, 2)\n df_len0 = pd.DataFrame({\"A\": [], \"B\": []})\n ddf_len0 = dd.from_pandas(df_len0, 2)\n ddf_nocols = dd.from_pandas(pd.DataFrame({}), 2)\n\n # Pandas have different dtypes for resulting describe dataframe if there are only\n # None-values, pre-compute dask df to bypass _meta check\n assert_eq(\n df_none.describe(), ddf_none.describe(percentiles_method=\"dask\").compute()\n )\n\n with pytest.raises(ValueError):\n ddf_len0.describe(percentiles_method=\"dask\").compute()\n\n with pytest.raises(ValueError):\n ddf_len0.describe(percentiles_method=\"dask\").compute()\n\n with pytest.raises(ValueError):\n ddf_nocols.describe(percentiles_method=\"dask\").compute()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_describe_empty_tdigest_test_describe_empty_tdigest.with_pytest_raises_ValueE.ddf_nocols_describe_perce": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_describe_empty_tdigest_test_describe_empty_tdigest.with_pytest_raises_ValueE.ddf_nocols_describe_perce", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 519, "end_line": 541, "span_ids": ["test_describe_empty_tdigest"], "tokens": 251}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_describe_empty_tdigest():\n pytest.importorskip(\"crick\")\n\n df_none = pd.DataFrame({\"A\": [None, None]})\n ddf_none = dd.from_pandas(df_none, 2)\n df_len0 = pd.DataFrame({\"A\": []})\n ddf_len0 = dd.from_pandas(df_len0, 2)\n ddf_nocols = dd.from_pandas(pd.DataFrame({}), 2)\n\n # Pandas have different dtypes for resulting describe dataframe if there are only\n # None-values, pre-compute dask df to bypass _meta check\n assert_eq(\n df_none.describe(), ddf_none.describe(percentiles_method=\"tdigest\").compute()\n )\n with warnings.catch_warnings():\n # dask.dataframe should probably filter this, to match pandas, but\n # it seems quite difficult.\n warnings.simplefilter(\"ignore\", RuntimeWarning)\n assert_eq(df_len0.describe(), ddf_len0.describe(percentiles_method=\"tdigest\"))\n assert_eq(df_len0.describe(), ddf_len0.describe(percentiles_method=\"tdigest\"))\n\n with pytest.raises(ValueError):\n ddf_nocols.describe(percentiles_method=\"tdigest\").compute()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_describe_for_possibly_unsorted_q_test_describe_for_possibly_unsorted_q.for_q_in_None_0_25_0_.for_f_convert_in_list_t.assert_eq_r_75_75_0_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_describe_for_possibly_unsorted_q_test_describe_for_possibly_unsorted_q.for_q_in_None_0_25_0_.for_f_convert_in_list_t.assert_eq_r_75_75_0_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 544, "end_line": 563, "span_ids": ["test_describe_for_possibly_unsorted_q"], "tokens": 230}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_describe_for_possibly_unsorted_q():\n \"\"\"make sure describe is sorting percentiles parameter, q, properly and can\n handle lists, tuples and ndarrays.\n\n See https://github.com/dask/dask/issues/4642.\n \"\"\"\n # prepare test case where quantiles should equal values\n A = da.arange(0, 101)\n ds = dd.from_dask_array(A)\n\n for q in [None, [0.25, 0.50, 0.75], [0.25, 0.50, 0.75, 0.99], [0.75, 0.5, 0.25]]:\n for f_convert in [list, tuple, np.array]:\n if q is None:\n r = ds.describe(percentiles=q).compute()\n else:\n r = ds.describe(percentiles=f_convert(q)).compute()\n\n assert_eq(r[\"25%\"], 25.0)\n assert_eq(r[\"50%\"], 50.0)\n assert_eq(r[\"75%\"], 75.0)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_cumulative_test_cumulative.None_43": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_cumulative_test_cumulative.None_43", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 566, "end_line": 635, "span_ids": ["test_cumulative"], "tokens": 874}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_cumulative():\n index = [\"row{:03d}\".format(i) for i in range(100)]\n df = pd.DataFrame(np.random.randn(100, 5), columns=list(\"abcde\"), index=index)\n df_out = pd.DataFrame(np.random.randn(100, 5), columns=list(\"abcde\"), index=index)\n\n ddf = dd.from_pandas(df, 5)\n ddf_out = dd.from_pandas(df_out, 5)\n\n assert_eq(ddf.cumsum(), df.cumsum())\n assert_eq(ddf.cumprod(), df.cumprod())\n assert_eq(ddf.cummin(), df.cummin())\n assert_eq(ddf.cummax(), df.cummax())\n\n assert_eq(ddf.cumsum(axis=1), df.cumsum(axis=1))\n assert_eq(ddf.cumprod(axis=1), df.cumprod(axis=1))\n assert_eq(ddf.cummin(axis=1), df.cummin(axis=1))\n assert_eq(ddf.cummax(axis=1), df.cummax(axis=1))\n\n np.cumsum(ddf, out=ddf_out)\n assert_eq(ddf_out, df.cumsum())\n np.cumprod(ddf, out=ddf_out)\n assert_eq(ddf_out, df.cumprod())\n ddf.cummin(out=ddf_out)\n assert_eq(ddf_out, df.cummin())\n ddf.cummax(out=ddf_out)\n assert_eq(ddf_out, df.cummax())\n\n np.cumsum(ddf, out=ddf_out, axis=1)\n assert_eq(ddf_out, df.cumsum(axis=1))\n np.cumprod(ddf, out=ddf_out, axis=1)\n assert_eq(ddf_out, df.cumprod(axis=1))\n ddf.cummin(out=ddf_out, axis=1)\n assert_eq(ddf_out, df.cummin(axis=1))\n ddf.cummax(out=ddf_out, axis=1)\n assert_eq(ddf_out, df.cummax(axis=1))\n\n assert_eq(ddf.a.cumsum(), df.a.cumsum())\n assert_eq(ddf.a.cumprod(), df.a.cumprod())\n assert_eq(ddf.a.cummin(), df.a.cummin())\n assert_eq(ddf.a.cummax(), df.a.cummax())\n\n # With NaNs\n df = pd.DataFrame(\n {\n \"a\": [1, 2, np.nan, 4, 5, 6, 7, 8],\n \"b\": [1, 2, np.nan, np.nan, np.nan, 5, np.nan, np.nan],\n \"c\": [np.nan] * 8,\n }\n )\n ddf = dd.from_pandas(df, 3)\n\n assert_eq(df.cumsum(), ddf.cumsum())\n assert_eq(df.cummin(), ddf.cummin())\n assert_eq(df.cummax(), ddf.cummax())\n assert_eq(df.cumprod(), ddf.cumprod())\n\n assert_eq(df.cumsum(skipna=False), ddf.cumsum(skipna=False))\n assert_eq(df.cummin(skipna=False), ddf.cummin(skipna=False))\n assert_eq(df.cummax(skipna=False), ddf.cummax(skipna=False))\n assert_eq(df.cumprod(skipna=False), ddf.cumprod(skipna=False))\n\n assert_eq(df.cumsum(axis=1), ddf.cumsum(axis=1))\n assert_eq(df.cummin(axis=1), ddf.cummin(axis=1))\n assert_eq(df.cummax(axis=1), ddf.cummax(axis=1))\n assert_eq(df.cumprod(axis=1), ddf.cumprod(axis=1))\n\n assert_eq(df.cumsum(axis=1, skipna=False), ddf.cumsum(axis=1, skipna=False))\n assert_eq(df.cummin(axis=1, skipna=False), ddf.cummin(axis=1, skipna=False))\n assert_eq(df.cummax(axis=1, skipna=False), ddf.cummax(axis=1, skipna=False))\n assert_eq(df.cumprod(axis=1, skipna=False), ddf.cumprod(axis=1, skipna=False))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_cumulative_empty_partitions_test_cumulative_empty_partitions.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_cumulative_empty_partitions_test_cumulative_empty_partitions.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 638, "end_line": 668, "span_ids": ["test_cumulative_empty_partitions"], "tokens": 253}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"func\",\n [\n M.cumsum,\n M.cumprod,\n pytest.param(\n M.cummin,\n marks=[\n pytest.mark.xfail(\n reason=\"ValueError: Can only compare identically-labeled Series objects\"\n )\n ],\n ),\n pytest.param(\n M.cummax,\n marks=[\n pytest.mark.xfail(\n reason=\"ValueError: Can only compare identically-labeled Series objects\"\n )\n ],\n ),\n ],\n)\ndef test_cumulative_empty_partitions(func):\n df = pd.DataFrame({\"x\": [1, 2, 3, 4, 5, 6, 7, 8]})\n ddf = dd.from_pandas(df, npartitions=4)\n assert_eq(func(df[df.x < 5]), func(ddf[ddf.x < 5]))\n\n df = pd.DataFrame({\"x\": [1, 2, 3, 4, None, 5, 6, None, 7, 8]})\n ddf = dd.from_pandas(df, npartitions=5)\n assert_eq(func(df[df.x < 5]), func(ddf[ddf.x < 5]))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_dropna_test_dropna.None_17": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_dropna_test_dropna.None_17", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 671, "end_line": 709, "span_ids": ["test_dropna"], "tokens": 486}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_dropna():\n df = pd.DataFrame(\n {\n \"x\": [np.nan, 2, 3, 4, np.nan, 6],\n \"y\": [1, 2, np.nan, 4, np.nan, np.nan],\n \"z\": [1, 2, 3, 4, np.nan, 6],\n },\n index=[10, 20, 30, 40, 50, 60],\n )\n ddf = dd.from_pandas(df, 3)\n\n assert_eq(ddf.x.dropna(), df.x.dropna())\n assert_eq(ddf.y.dropna(), df.y.dropna())\n assert_eq(ddf.z.dropna(), df.z.dropna())\n\n assert_eq(ddf.dropna(), df.dropna())\n assert_eq(ddf.dropna(how=\"all\"), df.dropna(how=\"all\"))\n assert_eq(ddf.dropna(subset=[\"x\"]), df.dropna(subset=[\"x\"]))\n assert_eq(ddf.dropna(subset=[\"y\", \"z\"]), df.dropna(subset=[\"y\", \"z\"]))\n assert_eq(\n ddf.dropna(subset=[\"y\", \"z\"], how=\"all\"),\n df.dropna(subset=[\"y\", \"z\"], how=\"all\"),\n )\n\n # threshold\n assert_eq(df.dropna(thresh=None), df.loc[[20, 40]])\n assert_eq(ddf.dropna(thresh=None), df.dropna(thresh=None))\n\n assert_eq(df.dropna(thresh=0), df.loc[:])\n assert_eq(ddf.dropna(thresh=0), df.dropna(thresh=0))\n\n assert_eq(df.dropna(thresh=1), df.loc[[10, 20, 30, 40, 60]])\n assert_eq(ddf.dropna(thresh=1), df.dropna(thresh=1))\n\n assert_eq(df.dropna(thresh=2), df.loc[[10, 20, 30, 40, 60]])\n assert_eq(ddf.dropna(thresh=2), df.dropna(thresh=2))\n\n assert_eq(df.dropna(thresh=3), df.loc[[20, 40]])\n assert_eq(ddf.dropna(thresh=3), df.dropna(thresh=3))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_clip_test_clip.assert_eq_ds_clip_upper_u": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_clip_test_clip.assert_eq_ds_clip_upper_u", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 712, "end_line": 729, "span_ids": ["test_clip"], "tokens": 270}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"lower, upper\", [(2, 5), (2.5, 3.5)])\ndef test_clip(lower, upper):\n\n df = pd.DataFrame(\n {\"a\": [1, 2, 3, 4, 5, 6, 7, 8, 9], \"b\": [3, 5, 2, 5, 7, 2, 4, 2, 4]}\n )\n ddf = dd.from_pandas(df, 3)\n\n s = pd.Series([1, 2, 3, 4, 5, 6, 7, 8, 9])\n ds = dd.from_pandas(s, 3)\n\n assert_eq(ddf.clip(lower=lower, upper=upper), df.clip(lower=lower, upper=upper))\n assert_eq(ddf.clip(lower=lower), df.clip(lower=lower))\n assert_eq(ddf.clip(upper=upper), df.clip(upper=upper))\n\n assert_eq(ds.clip(lower=lower, upper=upper), s.clip(lower=lower, upper=upper))\n assert_eq(ds.clip(lower=lower), s.clip(lower=lower))\n assert_eq(ds.clip(upper=upper), s.clip(upper=upper))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_squeeze_test_squeeze.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_squeeze_test_squeeze.None_2", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 732, "end_line": 758, "span_ids": ["test_squeeze"], "tokens": 266}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_squeeze():\n df = pd.DataFrame({\"x\": [1, 3, 6]})\n df2 = pd.DataFrame({\"x\": [0]})\n s = pd.Series({\"test\": 0, \"b\": 100})\n\n ddf = dd.from_pandas(df, 3)\n ddf2 = dd.from_pandas(df2, 3)\n ds = dd.from_pandas(s, 2)\n\n assert_eq(df.squeeze(), ddf.squeeze())\n assert_eq(pd.Series([0], name=\"x\"), ddf2.squeeze())\n assert_eq(ds.squeeze(), s.squeeze())\n\n with pytest.raises(NotImplementedError) as info:\n ddf.squeeze(axis=0)\n msg = \"{0} does not support squeeze along axis 0\".format(type(ddf))\n assert msg in str(info.value)\n\n with pytest.raises(ValueError) as info:\n ddf.squeeze(axis=2)\n msg = \"No axis {0} for object type {1}\".format(2, type(ddf))\n assert msg in str(info.value)\n\n with pytest.raises(ValueError) as info:\n ddf.squeeze(axis=\"test\")\n msg = \"No axis test for object type {0}\".format(type(ddf))\n assert msg in str(info.value)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_where_mask_test_where_mask.ddf6.dd_from_pandas_pdf6_2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_where_mask_test_where_mask.ddf6.dd_from_pandas_pdf6_2_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 761, "end_line": 800, "span_ids": ["test_where_mask"], "tokens": 609}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_where_mask():\n pdf1 = pd.DataFrame(\n {\"a\": [1, 2, 3, 4, 5, 6, 7, 8, 9], \"b\": [3, 5, 2, 5, 7, 2, 4, 2, 4]}\n )\n ddf1 = dd.from_pandas(pdf1, 2)\n pdf2 = pd.DataFrame({\"a\": [True, False, True] * 3, \"b\": [False, False, True] * 3})\n ddf2 = dd.from_pandas(pdf2, 2)\n\n # different index\n pdf3 = pd.DataFrame(\n {\"a\": [1, 2, 3, 4, 5, 6, 7, 8, 9], \"b\": [3, 5, 2, 5, 7, 2, 4, 2, 4]},\n index=[0, 1, 2, 3, 4, 5, 6, 7, 8],\n )\n ddf3 = dd.from_pandas(pdf3, 2)\n pdf4 = pd.DataFrame(\n {\"a\": [True, False, True] * 3, \"b\": [False, False, True] * 3},\n index=[5, 6, 7, 8, 9, 10, 11, 12, 13],\n )\n ddf4 = dd.from_pandas(pdf4, 2)\n\n # different columns\n pdf5 = pd.DataFrame(\n {\n \"a\": [1, 2, 3, 4, 5, 6, 7, 8, 9],\n \"b\": [9, 4, 2, 6, 2, 3, 1, 6, 2],\n \"c\": [5, 6, 7, 8, 9, 10, 11, 12, 13],\n },\n index=[0, 1, 2, 3, 4, 5, 6, 7, 8],\n )\n ddf5 = dd.from_pandas(pdf5, 2)\n pdf6 = pd.DataFrame(\n {\n \"a\": [True, False, True] * 3,\n \"b\": [False, False, True] * 3,\n \"c\": [False] * 9,\n \"d\": [True] * 9,\n },\n index=[5, 6, 7, 8, 9, 10, 11, 12, 13],\n )\n ddf6 = dd.from_pandas(pdf6, 2)\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_where_mask.cases_test_where_mask.for_ddf_ddcond_pdf_pdc.None_9": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_where_mask.cases_test_where_mask.for_ddf_ddcond_pdf_pdc.None_9", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 802, "end_line": 831, "span_ids": ["test_where_mask"], "tokens": 463}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_where_mask():\n # ... other code\n\n cases = [\n (ddf1, ddf2, pdf1, pdf2),\n (ddf1.repartition([0, 3, 6, 8]), ddf2, pdf1, pdf2),\n (ddf1, ddf4, pdf3, pdf4),\n (ddf3.repartition([0, 4, 6, 8]), ddf4.repartition([5, 9, 10, 13]), pdf3, pdf4),\n (ddf5, ddf6, pdf5, pdf6),\n (ddf5.repartition([0, 4, 7, 8]), ddf6, pdf5, pdf6),\n # use pd.DataFrame as cond\n (ddf1, pdf2, pdf1, pdf2),\n (ddf1, pdf4, pdf3, pdf4),\n (ddf5, pdf6, pdf5, pdf6),\n ]\n\n for ddf, ddcond, pdf, pdcond in cases:\n assert isinstance(ddf, dd.DataFrame)\n assert isinstance(ddcond, (dd.DataFrame, pd.DataFrame))\n assert isinstance(pdf, pd.DataFrame)\n assert isinstance(pdcond, pd.DataFrame)\n\n assert_eq(ddf.where(ddcond), pdf.where(pdcond))\n assert_eq(ddf.mask(ddcond), pdf.mask(pdcond))\n assert_eq(ddf.where(ddcond, -ddf), pdf.where(pdcond, -pdf))\n assert_eq(ddf.mask(ddcond, -ddf), pdf.mask(pdcond, -pdf))\n\n assert_eq(ddf.where(ddcond.a, -ddf), pdf.where(pdcond.a, -pdf))\n assert_eq(ddf.mask(ddcond.a, -ddf), pdf.mask(pdcond.a, -pdf))\n assert_eq(ddf.a.where(ddcond.a), pdf.a.where(pdcond.a))\n assert_eq(ddf.a.mask(ddcond.a), pdf.a.mask(pdcond.a))\n assert_eq(ddf.a.where(ddcond.a, -ddf.a), pdf.a.where(pdcond.a, -pdf.a))\n assert_eq(ddf.a.mask(ddcond.a, -ddf.a), pdf.a.mask(pdcond.a, -pdf.a))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_map_partitions_multi_argument_test_map_partitions.assert_result_dtype_np": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_map_partitions_multi_argument_test_map_partitions.assert_result_dtype_np", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 834, "end_line": 856, "span_ids": ["test_map_partitions", "test_map_partitions_multi_argument"], "tokens": 262}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_map_partitions_multi_argument():\n assert_eq(dd.map_partitions(lambda a, b: a + b, d.a, d.b), full.a + full.b)\n assert_eq(\n dd.map_partitions(lambda a, b, c: a + b + c, d.a, d.b, 1), full.a + full.b + 1\n )\n\n\ndef test_map_partitions():\n assert_eq(d.map_partitions(lambda df: df, meta=d), full)\n assert_eq(d.map_partitions(lambda df: df), full)\n result = d.map_partitions(lambda df: df.sum(axis=1))\n assert_eq(result, full.sum(axis=1))\n\n assert_eq(\n d.map_partitions(lambda df: 1),\n pd.Series([1, 1, 1], dtype=np.int64),\n check_divisions=False,\n )\n x = Scalar({(\"x\", 0): 1}, \"x\", int)\n result = dd.map_partitions(lambda x: 2, x)\n assert result.dtype in (np.int32, np.int64) and result.compute() == 2\n result = dd.map_partitions(lambda x: 4.0, x)\n assert result.dtype == np.float64 and result.compute() == 4.0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_map_partitions_type_test_map_partitions_names.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_map_partitions_type_test_map_partitions_names.None_2", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 859, "end_line": 877, "span_ids": ["test_map_partitions_names", "test_map_partitions_type"], "tokens": 189}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_map_partitions_type():\n result = d.map_partitions(type).compute(scheduler=\"single-threaded\")\n assert isinstance(result, pd.Series)\n assert all(x == pd.DataFrame for x in result)\n\n\ndef test_map_partitions_names():\n func = lambda x: x\n assert sorted(dd.map_partitions(func, d, meta=d).dask) == sorted(\n dd.map_partitions(func, d, meta=d).dask\n )\n assert sorted(dd.map_partitions(lambda x: x, d, meta=d, token=1).dask) == sorted(\n dd.map_partitions(lambda x: x, d, meta=d, token=1).dask\n )\n\n func = lambda x, y: x\n assert sorted(dd.map_partitions(func, d, d, meta=d).dask) == sorted(\n dd.map_partitions(func, d, d, meta=d).dask\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_map_partitions_column_info_test_map_partitions_column_info.None_6": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_map_partitions_column_info_test_map_partitions_column_info.None_6", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 880, "end_line": 903, "span_ids": ["test_map_partitions_column_info"], "tokens": 245}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_map_partitions_column_info():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4], \"y\": [5, 6, 7, 8]})\n a = dd.from_pandas(df, npartitions=2)\n\n b = dd.map_partitions(lambda x: x, a, meta=a)\n tm.assert_index_equal(b.columns, a.columns)\n assert_eq(df, b)\n\n b = dd.map_partitions(lambda x: x, a.x, meta=a.x)\n assert b.name == a.x.name\n assert_eq(df.x, b)\n\n b = dd.map_partitions(lambda x: x, a.x, meta=a.x)\n assert b.name == a.x.name\n assert_eq(df.x, b)\n\n b = dd.map_partitions(lambda df: df.x + df.y, a)\n assert isinstance(b, dd.Series)\n assert b.dtype == \"i8\"\n\n b = dd.map_partitions(lambda df: df.x + 1, a, meta=(\"x\", \"i8\"))\n assert isinstance(b, dd.Series)\n assert b.name == \"x\"\n assert b.dtype == \"i8\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_map_partitions_method_names_test_map_partitions_method_names.None_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_map_partitions_method_names_test_map_partitions_method_names.None_5", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 906, "end_line": 921, "span_ids": ["test_map_partitions_method_names"], "tokens": 167}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_map_partitions_method_names():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4], \"y\": [5, 6, 7, 8]})\n a = dd.from_pandas(df, npartitions=2)\n\n b = a.map_partitions(lambda x: x)\n assert isinstance(b, dd.DataFrame)\n tm.assert_index_equal(b.columns, a.columns)\n\n b = a.map_partitions(lambda df: df.x + 1)\n assert isinstance(b, dd.Series)\n assert b.dtype == \"i8\"\n\n b = a.map_partitions(lambda df: df.x + 1, meta=(\"x\", \"i8\"))\n assert isinstance(b, dd.Series)\n assert b.name == \"x\"\n assert b.dtype == \"i8\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_map_partitions_propagates_index_metadata_test_map_partitions_propagates_index_metadata.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_map_partitions_propagates_index_metadata_test_map_partitions_propagates_index_metadata.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 924, "end_line": 940, "span_ids": ["test_map_partitions_propagates_index_metadata"], "tokens": 184}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_map_partitions_propagates_index_metadata():\n index = pd.Series(list(\"abcde\"), name=\"myindex\")\n df = pd.DataFrame(\n {\"A\": np.arange(5, dtype=np.int32), \"B\": np.arange(10, 15, dtype=np.int32)},\n index=index,\n )\n ddf = dd.from_pandas(df, npartitions=2)\n res = ddf.map_partitions(\n lambda df: df.assign(C=df.A + df.B),\n meta=[(\"A\", \"i4\"), (\"B\", \"i4\"), (\"C\", \"i4\")],\n )\n sol = df.assign(C=df.A + df.B)\n assert_eq(res, sol)\n\n res = ddf.map_partitions(lambda df: df.rename_axis(\"newindex\"))\n sol = df.rename_axis(\"newindex\")\n assert_eq(res, sol)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_map_partitions_keeps_kwargs_readable_test_map_partitions_keeps_kwargs_readable.assert_a_x_map_partitions": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_map_partitions_keeps_kwargs_readable_test_map_partitions_keeps_kwargs_readable.assert_a_x_map_partitions", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 943, "end_line": 958, "span_ids": ["test_map_partitions_keeps_kwargs_readable"], "tokens": 193}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.xfail(reason=\"now we use SubgraphCallables\")\ndef test_map_partitions_keeps_kwargs_readable():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4], \"y\": [5, 6, 7, 8]})\n a = dd.from_pandas(df, npartitions=2)\n\n def f(s, x=1):\n return s + x\n\n b = a.x.map_partitions(f, x=5)\n\n # NOTE: we'd like to ensure that we keep the keyword arguments readable\n # in the dask graph\n assert \"['x', 5]\" in str(dict(b.dask)) or \"{'x': 5}\" in str(dict(b.dask))\n assert_eq(df.x + 5, b)\n\n assert a.x.map_partitions(f, x=5)._name != a.x.map_partitions(f, x=6)._name", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_map_partitions_with_delayed_collection_test_metadata_inference_single_partition_aligned_args.assert_eq_res_ddf_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_map_partitions_with_delayed_collection_test_metadata_inference_single_partition_aligned_args.assert_eq_res_ddf_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 961, "end_line": 982, "span_ids": ["test_metadata_inference_single_partition_aligned_args", "test_map_partitions_with_delayed_collection"], "tokens": 195}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_map_partitions_with_delayed_collection():\n # https://github.com/dask/dask/issues/5854\n df = pd.DataFrame(columns=list(\"abcdefghijk\"))\n ddf = dd.from_pandas(df, 2)\n ddf.dropna(subset=list(\"abcdefghijk\")).compute()\n # no error!\n\n\ndef test_metadata_inference_single_partition_aligned_args():\n # https://github.com/dask/dask/issues/3034\n # Previously broadcastable series functionality broke this\n\n df = pd.DataFrame({\"x\": [1, 2, 3, 4, 5]})\n ddf = dd.from_pandas(df, npartitions=1)\n\n def check(df, df_x):\n assert len(df) == len(df_x)\n assert len(df) > 0\n return df\n\n res = dd.map_partitions(check, ddf, ddf.x)\n assert_eq(res, ddf)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_drop_duplicates_test_drop_duplicates.with_pytest_raises_NotImp.d_drop_duplicates_keep_Fa": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_drop_duplicates_test_drop_duplicates.with_pytest_raises_NotImp.d_drop_duplicates_keep_Fa", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 985, "end_line": 1008, "span_ids": ["test_drop_duplicates"], "tokens": 184}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_drop_duplicates():\n res = d.drop_duplicates()\n res2 = d.drop_duplicates(split_every=2)\n sol = full.drop_duplicates()\n assert_eq(res, sol)\n assert_eq(res2, sol)\n assert res._name != res2._name\n\n res = d.a.drop_duplicates()\n res2 = d.a.drop_duplicates(split_every=2)\n sol = full.a.drop_duplicates()\n assert_eq(res, sol)\n assert_eq(res2, sol)\n assert res._name != res2._name\n\n res = d.index.drop_duplicates()\n res2 = d.index.drop_duplicates(split_every=2)\n sol = full.index.drop_duplicates()\n assert_eq(res, sol)\n assert_eq(res2, sol)\n assert res._name != res2._name\n\n with pytest.raises(NotImplementedError):\n d.drop_duplicates(keep=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_drop_duplicates_subset_test_drop_duplicates_subset.for_kwarg_in_keep_f.for_ss_in_x_y_.assert_eq_df_drop_duplica": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_drop_duplicates_subset_test_drop_duplicates_subset.for_kwarg_in_keep_f.for_ss_in_x_y_.assert_eq_df_drop_duplica", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 1011, "end_line": 1022, "span_ids": ["test_drop_duplicates_subset"], "tokens": 180}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_drop_duplicates_subset():\n df = pd.DataFrame({\"x\": [1, 2, 3, 1, 2, 3], \"y\": [\"a\", \"a\", \"b\", \"b\", \"c\", \"c\"]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n for kwarg in [{\"keep\": \"first\"}, {\"keep\": \"last\"}]:\n assert_eq(df.x.drop_duplicates(**kwarg), ddf.x.drop_duplicates(**kwarg))\n for ss in [[\"x\"], \"y\", [\"x\", \"y\"]]:\n assert_eq(\n df.drop_duplicates(subset=ss, **kwarg),\n ddf.drop_duplicates(subset=ss, **kwarg),\n )\n assert_eq(df.drop_duplicates(ss, **kwarg), ddf.drop_duplicates(ss, **kwarg))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_get_partition_test_get_partition.None_1.ddf_get_partition_3_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_get_partition_test_get_partition.None_1.ddf_get_partition_3_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 1025, "end_line": 1054, "span_ids": ["test_get_partition"], "tokens": 302}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_get_partition():\n pdf = pd.DataFrame(np.random.randn(10, 5), columns=list(\"abcde\"))\n ddf = dd.from_pandas(pdf, 3)\n assert ddf.divisions == (0, 4, 8, 9)\n\n # DataFrame\n div1 = ddf.get_partition(0)\n assert isinstance(div1, dd.DataFrame)\n assert_eq(div1, pdf.loc[0:3])\n div2 = ddf.get_partition(1)\n assert_eq(div2, pdf.loc[4:7])\n div3 = ddf.get_partition(2)\n assert_eq(div3, pdf.loc[8:9])\n assert len(div1) + len(div2) + len(div3) == len(pdf)\n\n # Series\n div1 = ddf.a.get_partition(0)\n assert isinstance(div1, dd.Series)\n assert_eq(div1, pdf.a.loc[0:3])\n div2 = ddf.a.get_partition(1)\n assert_eq(div2, pdf.a.loc[4:7])\n div3 = ddf.a.get_partition(2)\n assert_eq(div3, pdf.a.loc[8:9])\n assert len(div1) + len(div2) + len(div3) == len(pdf.a)\n\n with pytest.raises(ValueError):\n ddf.get_partition(-1)\n\n with pytest.raises(ValueError):\n ddf.get_partition(3)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_ndim_test_value_counts.assert_result__name_re": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_ndim_test_value_counts.assert_result__name_re", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 1057, "end_line": 1075, "span_ids": ["test_ndim", "test_value_counts", "test_dtype"], "tokens": 154}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_ndim():\n assert d.ndim == 2\n assert d.a.ndim == 1\n assert d.index.ndim == 1\n\n\ndef test_dtype():\n assert (d.dtypes == full.dtypes).all()\n\n\ndef test_value_counts():\n df = pd.DataFrame({\"x\": [1, 2, 1, 3, 3, 1, 4]})\n ddf = dd.from_pandas(df, npartitions=3)\n result = ddf.x.value_counts()\n expected = df.x.value_counts()\n assert_eq(result, expected)\n result2 = ddf.x.value_counts(split_every=2)\n assert_eq(result2, expected)\n assert result._name != result2._name", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_value_counts_not_sorted_test_value_counts_not_sorted.assert_result__name_re": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_value_counts_not_sorted_test_value_counts_not_sorted.assert_result__name_re", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 1078, "end_line": 1086, "span_ids": ["test_value_counts_not_sorted"], "tokens": 112}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_value_counts_not_sorted():\n df = pd.DataFrame({\"x\": [1, 2, 1, 3, 3, 1, 4]})\n ddf = dd.from_pandas(df, npartitions=3)\n result = ddf.x.value_counts(sort=False)\n expected = df.x.value_counts(sort=False)\n assert_eq(result, expected)\n result2 = ddf.x.value_counts(split_every=2)\n assert_eq(result2, expected)\n assert result._name != result2._name", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_value_counts_with_dropna_test_value_counts_with_dropna.assert_result__name_re": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_value_counts_with_dropna_test_value_counts_with_dropna.assert_result__name_re", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 1089, "end_line": 1102, "span_ids": ["test_value_counts_with_dropna"], "tokens": 161}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_value_counts_with_dropna():\n df = pd.DataFrame({\"x\": [1, 2, 1, 3, np.nan, 1, 4]})\n ddf = dd.from_pandas(df, npartitions=3)\n if not PANDAS_GT_110:\n with pytest.raises(NotImplementedError, match=\"dropna is not a valid argument\"):\n ddf.x.value_counts(dropna=False)\n return\n\n result = ddf.x.value_counts(dropna=False)\n expected = df.x.value_counts(dropna=False)\n assert_eq(result, expected)\n result2 = ddf.x.value_counts(split_every=2, dropna=False)\n assert_eq(result2, expected)\n assert result._name != result2._name", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_unique_test_unique.assert_ddf_x_unique_split": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_unique_test_unique.assert_ddf_x_unique_split", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 1105, "end_line": 1120, "span_ids": ["test_unique"], "tokens": 211}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_unique():\n pdf = pd.DataFrame(\n {\n \"x\": [1, 2, 1, 3, 3, 1, 4, 2, 3, 1],\n \"y\": [\"a\", \"c\", \"b\", np.nan, \"c\", \"b\", \"a\", \"d\", np.nan, \"a\"],\n }\n )\n ddf = dd.from_pandas(pdf, npartitions=3)\n assert_eq(ddf.x.unique(), pd.Series(pdf.x.unique(), name=\"x\"))\n assert_eq(ddf.y.unique(), pd.Series(pdf.y.unique(), name=\"y\"))\n\n assert_eq(ddf.x.unique(split_every=2), pd.Series(pdf.x.unique(), name=\"x\"))\n assert_eq(ddf.y.unique(split_every=2), pd.Series(pdf.y.unique(), name=\"y\"))\n assert_eq(ddf.index.unique(), pdf.index.unique())\n\n assert ddf.x.unique(split_every=2)._name != ddf.x.unique()._name", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_isin_test_isin.for_obj_in_d_f_series_.with_pytest_raises_NotImp.d_isin_obj_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_isin_test_isin.for_obj_in_d_f_series_.with_pytest_raises_NotImp.d_isin_obj_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 1123, "end_line": 1145, "span_ids": ["test_isin"], "tokens": 226}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_isin():\n f_list = [1, 2, 3]\n f_series = pd.Series(f_list)\n f_dict = {\"a\": [0, 3], \"b\": [1, 2]}\n\n # Series\n assert_eq(d.a.isin(f_list), full.a.isin(f_list))\n assert_eq(d.a.isin(f_series), full.a.isin(f_series))\n with pytest.raises(NotImplementedError):\n d.a.isin(d.a)\n\n # Index\n da.utils.assert_eq(d.index.isin(f_list), full.index.isin(f_list))\n da.utils.assert_eq(d.index.isin(f_series), full.index.isin(f_series))\n with pytest.raises(NotImplementedError):\n d.a.isin(d.a)\n\n # DataFrame test\n assert_eq(d.isin(f_list), full.isin(f_list))\n assert_eq(d.isin(f_dict), full.isin(f_dict))\n for obj in [d, f_series, full]:\n with pytest.raises(NotImplementedError):\n d.isin(obj)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_contains_frame_test_size.assert_eq_d_index_size_f": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_contains_frame_test_size.assert_eq_d_index_size_f", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 1148, "end_line": 1168, "span_ids": ["test_size", "test_len", "test_contains_frame"], "tokens": 217}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_contains_frame():\n df = dd.from_pandas(pd.DataFrame({\"A\": [1, 2], 0: [3, 4]}), 1)\n assert \"A\" in df\n assert 0 in df\n assert \"B\" not in df\n assert 1 not in df\n\n\ndef test_len():\n assert len(d) == len(full)\n assert len(d.a) == len(full.a)\n assert len(dd.from_pandas(pd.DataFrame(), npartitions=1)) == 0\n assert len(dd.from_pandas(pd.DataFrame(columns=[1, 2]), npartitions=1)) == 0\n # Regression test for https://github.com/dask/dask/issues/6110\n assert len(dd.from_pandas(pd.DataFrame(columns=[\"foo\", \"foo\"]), npartitions=1)) == 0\n\n\ndef test_size():\n assert_eq(d.size, full.size)\n assert_eq(d.a.size, full.a.size)\n assert_eq(d.index.size, full.index.size)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_shape_test_nbytes.assert_eq_d_index_nbytes_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_shape_test_nbytes.assert_eq_d_index_nbytes_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 1171, "end_line": 1188, "span_ids": ["test_shape", "test_nbytes"], "tokens": 211}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_shape():\n result = d.shape\n assert_eq((result[0].compute(), result[1]), (len(full), len(full.columns)))\n assert_eq(dd.compute(result)[0], (len(full), len(full.columns)))\n\n result = d.a.shape\n assert_eq(result[0].compute(), len(full.a))\n assert_eq(dd.compute(result)[0], (len(full.a),))\n\n sh = dd.from_pandas(pd.DataFrame(index=[1, 2, 3]), npartitions=2).shape\n assert (sh[0].compute(), sh[1]) == (3, 0)\n sh = dd.from_pandas(pd.DataFrame({\"a\": [], \"b\": []}, index=[]), npartitions=1).shape\n assert (sh[0].compute(), sh[1]) == (0, 2)\n\n\ndef test_nbytes():\n assert_eq(d.a.nbytes, full.a.nbytes)\n assert_eq(d.index.nbytes, full.index.nbytes)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_quantile_test_quantile.assert_result_expected": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_quantile_test_quantile.assert_result_expected", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 1191, "end_line": 1233, "span_ids": ["test_quantile"], "tokens": 433}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"method,expected\",\n [(\"tdigest\", (0.35, 3.80, 2.5, 6.5, 2.0)), (\"dask\", (0.0, 4.0, 1.2, 6.2, 2.0))],\n)\ndef test_quantile(method, expected):\n if method == \"tdigest\":\n pytest.importorskip(\"crick\")\n # series / multiple\n result = d.b.quantile([0.3, 0.7], method=method)\n\n exp = full.b.quantile([0.3, 0.7]) # result may different\n assert len(result) == 2\n assert result.divisions == (0.3, 0.7)\n assert_eq(result.index, exp.index)\n assert isinstance(result, dd.Series)\n\n result = result.compute()\n assert isinstance(result, pd.Series)\n\n assert result.iloc[0] == pytest.approx(expected[0])\n assert result.iloc[1] == pytest.approx(expected[1])\n\n # index\n s = pd.Series(np.arange(10), index=np.arange(10))\n ds = dd.from_pandas(s, 2)\n\n result = ds.index.quantile([0.3, 0.7], method=method)\n exp = s.quantile([0.3, 0.7])\n assert len(result) == 2\n assert result.divisions == (0.3, 0.7)\n assert_eq(result.index, exp.index)\n assert isinstance(result, dd.Series)\n\n result = result.compute()\n assert isinstance(result, pd.Series)\n assert result.iloc[0] == pytest.approx(expected[2])\n assert result.iloc[1] == pytest.approx(expected[3])\n\n # series / single\n result = d.b.quantile(0.5, method=method)\n assert isinstance(result, dd.core.Scalar)\n result = result.compute()\n assert result == expected[4]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_quantile_missing_test_empty_quantile.assert_eq_result_exp_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_quantile_missing_test_empty_quantile.assert_eq_result_exp_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 1236, "end_line": 1261, "span_ids": ["test_empty_quantile", "test_quantile_missing"], "tokens": 214}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"method\", [\"tdigest\", \"dask\"])\ndef test_quantile_missing(method):\n if method == \"tdigest\":\n pytest.importorskip(\"crick\")\n df = pd.DataFrame({\"A\": [0, np.nan, 2]})\n ddf = dd.from_pandas(df, 2)\n expected = df.quantile()\n result = ddf.quantile(method=method)\n assert_eq(result, expected)\n\n expected = df.A.quantile()\n result = ddf.A.quantile(method=method)\n assert_eq(result, expected)\n\n\n@pytest.mark.parametrize(\"method\", [\"tdigest\", \"dask\"])\ndef test_empty_quantile(method):\n if method == \"tdigest\":\n pytest.importorskip(\"crick\")\n result = d.b.quantile([], method=method)\n exp = full.b.quantile([])\n assert result.divisions == (None, None)\n\n assert result.name == \"b\"\n assert result.compute().name == \"b\"\n assert_eq(result, exp)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_dataframe_quantile_test_dataframe_quantile.pytest_raises_ValueError_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_dataframe_quantile_test_dataframe_quantile.pytest_raises_ValueError_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 1264, "end_line": 1328, "span_ids": ["test_dataframe_quantile"], "tokens": 600}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"method,expected\",\n [\n (\n \"tdigest\",\n (\n pd.Series([9.5, 29.5, 19.5], index=[\"A\", \"X\", \"B\"]),\n pd.DataFrame(\n [[4.5, 24.5, 14.5], [14.5, 34.5, 24.5]],\n index=[0.25, 0.75],\n columns=[\"A\", \"X\", \"B\"],\n ),\n ),\n ),\n (\n \"dask\",\n (\n pd.Series([7.0, 27.0, 17.0], index=[\"A\", \"X\", \"B\"]),\n pd.DataFrame(\n [[1.50, 21.50, 11.50], [14.0, 34.0, 24.0]],\n index=[0.25, 0.75],\n columns=[\"A\", \"X\", \"B\"],\n ),\n ),\n ),\n ],\n)\ndef test_dataframe_quantile(method, expected):\n if method == \"tdigest\":\n pytest.importorskip(\"crick\")\n # column X is for test column order and result division\n df = pd.DataFrame(\n {\n \"A\": np.arange(20),\n \"X\": np.arange(20, 40),\n \"B\": np.arange(10, 30),\n \"C\": [\"a\", \"b\", \"c\", \"d\"] * 5,\n },\n columns=[\"A\", \"X\", \"B\", \"C\"],\n )\n ddf = dd.from_pandas(df, 3)\n\n result = ddf.quantile(method=method)\n assert result.npartitions == 1\n assert result.divisions == (\"A\", \"X\")\n\n result = result.compute()\n assert isinstance(result, pd.Series)\n assert result.name == 0.5\n tm.assert_index_equal(result.index, pd.Index([\"A\", \"X\", \"B\"]))\n assert (result == expected[0]).all()\n\n result = ddf.quantile([0.25, 0.75], method=method)\n assert result.npartitions == 1\n assert result.divisions == (0.25, 0.75)\n\n result = result.compute()\n assert isinstance(result, pd.DataFrame)\n tm.assert_index_equal(result.index, pd.Index([0.25, 0.75]))\n tm.assert_index_equal(result.columns, pd.Index([\"A\", \"X\", \"B\"]))\n\n assert (result == expected[1]).all().all()\n\n assert_eq(ddf.quantile(axis=1, method=method), df.quantile(axis=1))\n pytest.raises(ValueError, lambda: ddf.quantile([0.25, 0.75], axis=1, method=method))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_quantile_for_possibly_unsorted_q_test_index.assert_eq_d_index_full_i": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_quantile_for_possibly_unsorted_q_test_index.assert_eq_d_index_full_i", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 1331, "end_line": 1367, "span_ids": ["test_quantile_tiny_partitions", "test_index", "test_quantile_for_possibly_unsorted_q"], "tokens": 367}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_quantile_for_possibly_unsorted_q():\n \"\"\"check that quantile is giving correct answers even when quantile parameter, q, may be unsorted.\n\n See https://github.com/dask/dask/issues/4642.\n \"\"\"\n # prepare test case where percentiles should equal values\n A = da.arange(0, 101)\n ds = dd.from_dask_array(A)\n\n for q in [\n [0.25, 0.50, 0.75],\n [0.25, 0.50, 0.75, 0.99],\n [0.75, 0.5, 0.25],\n [0.25, 0.99, 0.75, 0.50],\n ]:\n r = ds.quantile(q).compute()\n assert_eq(r.loc[0.25], 25.0)\n assert_eq(r.loc[0.50], 50.0)\n assert_eq(r.loc[0.75], 75.0)\n\n r = ds.quantile([0.25]).compute()\n assert_eq(r.loc[0.25], 25.0)\n\n r = ds.quantile(0.25).compute()\n assert_eq(r, 25.0)\n\n\ndef test_quantile_tiny_partitions():\n \"\"\" See https://github.com/dask/dask/issues/6551 \"\"\"\n df = pd.DataFrame({\"a\": [1, 2, 3]})\n ddf = dd.from_pandas(df, npartitions=3)\n r = ddf[\"a\"].quantile(0.5).compute()\n assert r == 2\n\n\ndef test_index():\n assert_eq(d.index, full.index)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_assign_test_assign.None_3.ddf_assign_foo_ddf_unknow": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_assign_test_assign.None_3.ddf_assign_foo_ddf_unknow", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 1370, "end_line": 1425, "span_ids": ["test_assign"], "tokens": 446}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_assign():\n df = pd.DataFrame(\n {\"a\": range(8), \"b\": [float(i) for i in range(10, 18)]},\n index=pd.Index(list(\"abcdefgh\")),\n )\n ddf = dd.from_pandas(df, npartitions=3)\n ddf_unknown = dd.from_pandas(df, npartitions=3, sort=False)\n assert not ddf_unknown.known_divisions\n\n res = ddf.assign(\n c=1,\n d=\"string\",\n e=ddf.a.sum(),\n f=ddf.a + ddf.b,\n g=lambda x: x.a + x.b,\n dt=pd.Timestamp(2018, 2, 13),\n )\n res_unknown = ddf_unknown.assign(\n c=1,\n d=\"string\",\n e=ddf_unknown.a.sum(),\n f=ddf_unknown.a + ddf_unknown.b,\n g=lambda x: x.a + x.b,\n dt=pd.Timestamp(2018, 2, 13),\n )\n sol = df.assign(\n c=1,\n d=\"string\",\n e=df.a.sum(),\n f=df.a + df.b,\n g=lambda x: x.a + x.b,\n dt=pd.Timestamp(2018, 2, 13),\n )\n assert_eq(res, sol)\n assert_eq(res_unknown, sol)\n\n res = ddf.assign(c=df.a + 1)\n assert_eq(res, df.assign(c=df.a + 1))\n\n res = ddf.assign(c=ddf.index)\n assert_eq(res, df.assign(c=df.index))\n\n # divisions unknown won't work with pandas\n with pytest.raises(ValueError):\n ddf_unknown.assign(c=df.a + 1)\n\n # unsupported type\n with pytest.raises(TypeError):\n ddf.assign(c=list(range(9)))\n\n # Fails when assigning known divisions to unknown divisions\n with pytest.raises(ValueError):\n ddf_unknown.assign(foo=ddf.a)\n # Fails when assigning unknown divisions to known divisions\n with pytest.raises(ValueError):\n ddf.assign(foo=ddf_unknown.a)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_assign_callable_test_assign_dtypes.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_assign_callable_test_assign_dtypes.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 1428, "end_line": 1449, "span_ids": ["test_assign_dtypes", "test_assign_callable"], "tokens": 184}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_assign_callable():\n df = dd.from_pandas(pd.DataFrame({\"A\": range(10)}), npartitions=2)\n a = df.assign(B=df.A.shift())\n b = df.assign(B=lambda x: x.A.shift())\n assert_eq(a, b)\n\n\ndef test_assign_dtypes():\n ddf = dd.from_pandas(\n pd.DataFrame(\n data={\"col1\": [\"a\", \"b\"], \"col2\": [1, 2]}, columns=[\"col1\", \"col2\"]\n ),\n npartitions=2,\n )\n\n new_col = {\"col3\": pd.Series([\"0\", \"1\"])}\n res = ddf.assign(**new_col)\n\n assert_eq(\n res.dtypes,\n pd.Series(data=[\"object\", \"int64\", \"object\"], index=[\"col1\", \"col2\", \"col3\"]),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_map_test_map.None_6": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_map_test_map.None_6", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 1452, "end_line": 1467, "span_ids": ["test_map"], "tokens": 264}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_map():\n df = pd.DataFrame(\n {\"a\": range(9), \"b\": [4, 5, 6, 1, 2, 3, 0, 0, 0]},\n index=pd.Index([0, 1, 3, 5, 6, 8, 9, 9, 9], name=\"myindex\"),\n )\n ddf = dd.from_pandas(df, npartitions=3)\n\n assert_eq(ddf.a.map(lambda x: x + 1), df.a.map(lambda x: x + 1))\n lk = dict((v, v + 1) for v in df.a.values)\n assert_eq(ddf.a.map(lk), df.a.map(lk))\n assert_eq(ddf.b.map(lk), df.b.map(lk))\n lk = pd.Series(lk)\n assert_eq(ddf.a.map(lk), df.a.map(lk))\n assert_eq(ddf.b.map(lk), df.b.map(lk))\n assert_eq(ddf.b.map(lk, meta=ddf.b), df.b.map(lk))\n assert_eq(ddf.b.map(lk, meta=(\"b\", \"i8\")), df.b.map(lk))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_concat_test_known_divisions.assert_not_df_known_divis": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_concat_test_known_divisions.assert_not_df_known_divis", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 1470, "end_line": 1487, "span_ids": ["test_concat", "test_known_divisions", "test_args"], "tokens": 161}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_concat():\n x = _concat([pd.DataFrame(columns=[\"a\", \"b\"]), pd.DataFrame(columns=[\"a\", \"b\"])])\n assert list(x.columns) == [\"a\", \"b\"]\n assert len(x) == 0\n\n\ndef test_args():\n e = d.assign(c=d.a + 1)\n f = type(e)(*e._args)\n assert_eq(e, f)\n assert_eq(d.a, type(d.a)(*d.a._args))\n assert_eq(d.a.sum(), type(d.a.sum())(*d.a.sum()._args))\n\n\ndef test_known_divisions():\n assert d.known_divisions\n df = dd.DataFrame(dsk, \"x\", meta, divisions=[None, None, None])\n assert not df.known_divisions", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_unknown_divisions_test_unknown_divisions.assert_eq_d_a_d_b_1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_unknown_divisions_test_unknown_divisions.assert_eq_d_a_d_b_1_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 1490, "end_line": 1501, "span_ids": ["test_unknown_divisions"], "tokens": 198}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_unknown_divisions():\n dsk = {\n (\"x\", 0): pd.DataFrame({\"a\": [1, 2, 3], \"b\": [4, 5, 6]}),\n (\"x\", 1): pd.DataFrame({\"a\": [4, 5, 6], \"b\": [3, 2, 1]}),\n (\"x\", 2): pd.DataFrame({\"a\": [7, 8, 9], \"b\": [0, 0, 0]}),\n }\n meta = make_meta({\"a\": \"i8\", \"b\": \"i8\"})\n d = dd.DataFrame(dsk, \"x\", meta, [None, None, None, None])\n full = d.compute(scheduler=\"sync\")\n\n assert_eq(d.a.sum(), full.a.sum())\n assert_eq(d.a + d.b + 1, full.a + full.b + 1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_with_min_count_test_with_min_count.for_df_ddf_in_zip_dfs_d.for_axis_in_axes_.for_min_count_in_0_1_2.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_with_min_count_test_with_min_count.for_df_ddf_in_zip_dfs_d.for_axis_in_axes_.for_min_count_in_0_1_2.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 1504, "end_line": 1527, "span_ids": ["test_with_min_count"], "tokens": 241}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n PANDAS_VERSION < \"0.22.0\",\n reason=\"Parameter min_count not implemented in \"\n \"DataFrame.sum() and DataFrame.prod()\",\n)\ndef test_with_min_count():\n dfs = [\n pd.DataFrame([[None, 2, 3], [None, 5, 6], [5, 4, 9]]),\n pd.DataFrame([[2, None, None], [None, 5, 6], [5, 4, 9]]),\n ]\n ddfs = [dd.from_pandas(df, npartitions=4) for df in dfs]\n axes = [0, 1]\n\n for df, ddf in zip(dfs, ddfs):\n for axis in axes:\n for min_count in [0, 1, 2, 3]:\n assert_eq(\n df.sum(min_count=min_count, axis=axis),\n ddf.sum(min_count=min_count, axis=axis),\n )\n assert_eq(\n df.prod(min_count=min_count, axis=axis),\n ddf.prod(min_count=min_count, axis=axis),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_align_test_align.None_15": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_align_test_align.None_15", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 1530, "end_line": 1566, "span_ids": ["test_align"], "tokens": 478}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"join\", [\"inner\", \"outer\", \"left\", \"right\"])\ndef test_align(join):\n df1a = pd.DataFrame(\n {\"A\": np.random.randn(10), \"B\": np.random.randn(10)},\n index=[1, 12, 5, 6, 3, 9, 10, 4, 13, 11],\n )\n\n df1b = pd.DataFrame(\n {\"A\": np.random.randn(10), \"B\": np.random.randn(10)},\n index=[0, 3, 2, 10, 5, 6, 7, 8, 12, 13],\n )\n ddf1a = dd.from_pandas(df1a, 3)\n ddf1b = dd.from_pandas(df1b, 3)\n\n # DataFrame\n res1, res2 = ddf1a.align(ddf1b, join=join)\n exp1, exp2 = df1a.align(df1b, join=join)\n assert assert_eq(res1, exp1)\n assert assert_eq(res2, exp2)\n\n # Series\n res1, res2 = ddf1a[\"A\"].align(ddf1b[\"B\"], join=join)\n exp1, exp2 = df1a[\"A\"].align(df1b[\"B\"], join=join)\n assert assert_eq(res1, exp1)\n assert assert_eq(res2, exp2)\n\n # DataFrame with fill_value\n res1, res2 = ddf1a.align(ddf1b, join=join, fill_value=1)\n exp1, exp2 = df1a.align(df1b, join=join, fill_value=1)\n assert assert_eq(res1, exp1)\n assert assert_eq(res2, exp2)\n\n # Series\n res1, res2 = ddf1a[\"A\"].align(ddf1b[\"B\"], join=join, fill_value=1)\n exp1, exp2 = df1a[\"A\"].align(df1b[\"B\"], join=join, fill_value=1)\n assert assert_eq(res1, exp1)\n assert assert_eq(res2, exp2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_align_axis_test_align_axis.None_1.ddf1a_A_align_ddf1b_B": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_align_axis_test_align_axis.None_1.ddf1a_A_align_ddf1b_B", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 1569, "end_line": 1608, "span_ids": ["test_align_axis"], "tokens": 529}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"join\", [\"inner\", \"outer\", \"left\", \"right\"])\ndef test_align_axis(join):\n df1a = pd.DataFrame(\n {\"A\": np.random.randn(10), \"B\": np.random.randn(10), \"C\": np.random.randn(10)},\n index=[1, 12, 5, 6, 3, 9, 10, 4, 13, 11],\n )\n\n df1b = pd.DataFrame(\n {\"B\": np.random.randn(10), \"C\": np.random.randn(10), \"D\": np.random.randn(10)},\n index=[0, 3, 2, 10, 5, 6, 7, 8, 12, 13],\n )\n ddf1a = dd.from_pandas(df1a, 3)\n ddf1b = dd.from_pandas(df1b, 3)\n\n res1, res2 = ddf1a.align(ddf1b, join=join, axis=0)\n exp1, exp2 = df1a.align(df1b, join=join, axis=0)\n assert assert_eq(res1, exp1)\n assert assert_eq(res2, exp2)\n\n res1, res2 = ddf1a.align(ddf1b, join=join, axis=1)\n exp1, exp2 = df1a.align(df1b, join=join, axis=1)\n assert assert_eq(res1, exp1)\n assert assert_eq(res2, exp2)\n\n res1, res2 = ddf1a.align(ddf1b, join=join, axis=\"index\")\n exp1, exp2 = df1a.align(df1b, join=join, axis=\"index\")\n assert assert_eq(res1, exp1)\n assert assert_eq(res2, exp2)\n\n res1, res2 = ddf1a.align(ddf1b, join=join, axis=\"columns\")\n exp1, exp2 = df1a.align(df1b, join=join, axis=\"columns\")\n assert assert_eq(res1, exp1)\n assert assert_eq(res2, exp2)\n\n # invalid\n with pytest.raises(ValueError):\n ddf1a.align(ddf1b, join=join, axis=\"XXX\")\n\n with pytest.raises(ValueError):\n ddf1a[\"A\"].align(ddf1b[\"B\"], join=join, axis=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_combine_test_combine.assert_dda_combine_ddb_a": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_combine_test_combine.assert_dda_combine_ddb_a", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 1611, "end_line": 1644, "span_ids": ["test_combine"], "tokens": 347}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_combine():\n df1 = pd.DataFrame(\n {\n \"A\": np.random.choice([1, 2, np.nan], 100),\n \"B\": np.random.choice([\"a\", \"b\", np.nan], 100),\n }\n )\n\n df2 = pd.DataFrame(\n {\n \"A\": np.random.choice([1, 2, 3], 100),\n \"B\": np.random.choice([\"a\", \"b\", \"c\"], 100),\n }\n )\n ddf1 = dd.from_pandas(df1, 4)\n ddf2 = dd.from_pandas(df2, 5)\n\n first = lambda a, b: a\n\n # DataFrame\n for dda, ddb, a, b in [\n (ddf1, ddf2, df1, df2),\n (ddf1.A, ddf2.A, df1.A, df2.A),\n (ddf1.B, ddf2.B, df1.B, df2.B),\n ]:\n for func, fill_value in [(add, None), (add, 100), (first, None)]:\n sol = a.combine(b, func, fill_value=fill_value)\n assert_eq(dda.combine(ddb, func, fill_value=fill_value), sol)\n assert_eq(dda.combine(b, func, fill_value=fill_value), sol)\n\n assert_eq(\n ddf1.combine(ddf2, add, overwrite=False), df1.combine(df2, add, overwrite=False)\n )\n assert dda.combine(ddb, add)._name == dda.combine(ddb, add)._name", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_combine_first_test_combine_first.None_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_combine_first_test_combine_first.None_5", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 1647, "end_line": 1673, "span_ids": ["test_combine_first"], "tokens": 272}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_combine_first():\n df1 = pd.DataFrame(\n {\n \"A\": np.random.choice([1, 2, np.nan], 100),\n \"B\": np.random.choice([\"a\", \"b\", np.nan], 100),\n }\n )\n\n df2 = pd.DataFrame(\n {\n \"A\": np.random.choice([1, 2, 3], 100),\n \"B\": np.random.choice([\"a\", \"b\", \"c\"], 100),\n }\n )\n ddf1 = dd.from_pandas(df1, 4)\n ddf2 = dd.from_pandas(df2, 5)\n\n # DataFrame\n assert_eq(ddf1.combine_first(ddf2), df1.combine_first(df2))\n assert_eq(ddf1.combine_first(df2), df1.combine_first(df2))\n\n # Series\n assert_eq(ddf1.A.combine_first(ddf2.A), df1.A.combine_first(df2.A))\n assert_eq(ddf1.A.combine_first(df2.A), df1.A.combine_first(df2.A))\n\n assert_eq(ddf1.B.combine_first(ddf2.B), df1.B.combine_first(df2.B))\n assert_eq(ddf1.B.combine_first(df2.B), df1.B.combine_first(df2.B))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_dataframe_picklable_test_dataframe_picklable.assert_eq_s_s2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_dataframe_picklable_test_dataframe_picklable.assert_eq_s_s2_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 1676, "end_line": 1709, "span_ids": ["test_dataframe_picklable"], "tokens": 252}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_dataframe_picklable():\n from pickle import loads, dumps\n\n cloudpickle = pytest.importorskip(\"cloudpickle\")\n cp_dumps = cloudpickle.dumps\n cp_loads = cloudpickle.loads\n\n d = _compat.makeTimeDataFrame()\n df = dd.from_pandas(d, npartitions=3)\n df = df + 2\n\n # dataframe\n df2 = loads(dumps(df))\n assert_eq(df, df2)\n df2 = cp_loads(cp_dumps(df))\n assert_eq(df, df2)\n\n # series\n a2 = loads(dumps(df.A))\n assert_eq(df.A, a2)\n a2 = cp_loads(cp_dumps(df.A))\n assert_eq(df.A, a2)\n\n # index\n i2 = loads(dumps(df.index))\n assert_eq(df.index, i2)\n i2 = cp_loads(cp_dumps(df.index))\n assert_eq(df.index, i2)\n\n # scalar\n # lambdas are present, so only test cloudpickle\n s = df.A.sum()\n s2 = cp_loads(cp_dumps(s))\n assert_eq(s, s2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_random_partitions_test_series_round.assert_eq_s_round_ps_r": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_random_partitions_test_series_round.assert_eq_s_round_ps_r", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 1712, "end_line": 1743, "span_ids": ["test_random_partitions", "test_series_round"], "tokens": 361}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_random_partitions():\n a, b = d.random_split([0.5, 0.5], 42)\n assert isinstance(a, dd.DataFrame)\n assert isinstance(b, dd.DataFrame)\n assert a._name != b._name\n np.testing.assert_array_equal(a.index, sorted(a.index))\n\n assert len(a.compute()) + len(b.compute()) == len(full)\n a2, b2 = d.random_split([0.5, 0.5], 42)\n assert a2._name == a._name\n assert b2._name == b._name\n\n a, b = d.random_split([0.5, 0.5], 42, True)\n a2, b2 = d.random_split([0.5, 0.5], 42, True)\n assert_eq(a, a2)\n assert_eq(b, b2)\n with pytest.raises(AssertionError):\n np.testing.assert_array_equal(a.index, sorted(a.index))\n\n parts = d.random_split([0.4, 0.5, 0.1], 42)\n names = set([p._name for p in parts])\n names.update([a._name, b._name])\n assert len(names) == 5\n\n with pytest.raises(ValueError):\n d.random_split([0.4, 0.5], 42)\n\n\ndef test_series_round():\n ps = pd.Series([1.123, 2.123, 3.123, 1.234, 2.234, 3.234], name=\"a\")\n s = dd.from_pandas(ps, npartitions=3)\n assert_eq(s.round(), ps.round())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_repartition_test_repartition.for_p_in_range_1_7_.for_div_in_5_10_2.assert_eq_pdf_x_rds_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_repartition_test_repartition.for_p_in_range_1_7_.for_div_in_5_10_2.assert_eq_pdf_x_rds_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 1746, "end_line": 1814, "span_ids": ["test_repartition"], "tokens": 748}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.slow\ndef test_repartition():\n def _check_split_data(orig, d):\n \"\"\"Check data is split properly\"\"\"\n keys = [k for k in d.dask if k[0].startswith(\"repartition-split\")]\n keys = sorted(keys)\n sp = pd.concat(\n [compute_as_if_collection(dd.DataFrame, d.dask, k) for k in keys]\n )\n assert_eq(orig, sp)\n assert_eq(orig, d)\n\n df = pd.DataFrame(\n {\"x\": [1, 2, 3, 4, 5, 6], \"y\": list(\"abdabd\")}, index=[10, 20, 30, 40, 50, 60]\n )\n a = dd.from_pandas(df, 2)\n\n b = a.repartition(divisions=[10, 20, 50, 60])\n assert b.divisions == (10, 20, 50, 60)\n assert_eq(a, b)\n assert_eq(compute_as_if_collection(dd.DataFrame, b.dask, (b._name, 0)), df.iloc[:1])\n\n for div in [\n [20, 60],\n [10, 50],\n [1], # first / last element mismatch\n [0, 60],\n [10, 70], # do not allow to expand divisions by default\n [10, 50, 20, 60], # not sorted\n [10, 10, 20, 60],\n ]: # not unique (last element can be duplicated)\n\n pytest.raises(ValueError, lambda: a.repartition(divisions=div))\n\n pdf = pd.DataFrame(np.random.randn(7, 5), columns=list(\"abxyz\"))\n for p in range(1, 7):\n ddf = dd.from_pandas(pdf, p)\n assert_eq(ddf, pdf)\n for div in [\n [0, 6],\n [0, 6, 6],\n [0, 5, 6],\n [0, 4, 6, 6],\n [0, 2, 6],\n [0, 2, 6, 6],\n [0, 2, 3, 6, 6],\n [0, 1, 2, 3, 4, 5, 6, 6],\n ]:\n rddf = ddf.repartition(divisions=div)\n _check_split_data(ddf, rddf)\n assert rddf.divisions == tuple(div)\n assert_eq(pdf, rddf)\n\n rds = ddf.x.repartition(divisions=div)\n _check_split_data(ddf.x, rds)\n assert rds.divisions == tuple(div)\n assert_eq(pdf.x, rds)\n\n # expand divisions\n for div in [[-5, 10], [-2, 3, 5, 6], [0, 4, 5, 9, 10]]:\n rddf = ddf.repartition(divisions=div, force=True)\n _check_split_data(ddf, rddf)\n assert rddf.divisions == tuple(div)\n assert_eq(pdf, rddf)\n\n rds = ddf.x.repartition(divisions=div, force=True)\n _check_split_data(ddf.x, rds)\n assert rds.divisions == tuple(div)\n assert_eq(pdf.x, rds)\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_repartition.pdf_4_test_repartition.None_2.for_div_in_list_Yadijm_.assert_eq_pdf_x_rds_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_repartition.pdf_4_test_repartition.None_2.for_div_in_list_Yadijm_.assert_eq_pdf_x_rds_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 1816, "end_line": 1854, "span_ids": ["test_repartition"], "tokens": 402}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.slow\ndef test_repartition():\n # ... other code\n\n pdf = pd.DataFrame(\n {\"x\": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], \"y\": [9, 8, 7, 6, 5, 4, 3, 2, 1, 0]},\n index=list(\"abcdefghij\"),\n )\n for p in range(1, 7):\n ddf = dd.from_pandas(pdf, p)\n assert_eq(ddf, pdf)\n for div in [\n list(\"aj\"),\n list(\"ajj\"),\n list(\"adj\"),\n list(\"abfj\"),\n list(\"ahjj\"),\n list(\"acdj\"),\n list(\"adfij\"),\n list(\"abdefgij\"),\n list(\"abcdefghij\"),\n ]:\n rddf = ddf.repartition(divisions=div)\n _check_split_data(ddf, rddf)\n assert rddf.divisions == tuple(div)\n assert_eq(pdf, rddf)\n\n rds = ddf.x.repartition(divisions=div)\n _check_split_data(ddf.x, rds)\n assert rds.divisions == tuple(div)\n assert_eq(pdf.x, rds)\n\n # expand divisions\n for div in [list(\"Yadijm\"), list(\"acmrxz\"), list(\"Yajz\")]:\n rddf = ddf.repartition(divisions=div, force=True)\n _check_split_data(ddf, rddf)\n assert rddf.divisions == tuple(div)\n assert_eq(pdf, rddf)\n\n rds = ddf.x.repartition(divisions=div, force=True)\n _check_split_data(ddf.x, rds)\n assert rds.divisions == tuple(div)\n assert_eq(pdf.x, rds)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_repartition_divisions_test_repartition_divisions.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_repartition_divisions_test_repartition_divisions.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 1857, "end_line": 1875, "span_ids": ["test_repartition_divisions"], "tokens": 329}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_repartition_divisions():\n result = repartition_divisions([0, 6], [0, 6, 6], \"a\", \"b\", \"c\")\n assert result == {\n (\"b\", 0): (methods.boundary_slice, (\"a\", 0), 0, 6, False),\n (\"b\", 1): (methods.boundary_slice, (\"a\", 0), 6, 6, True),\n (\"c\", 0): (\"b\", 0),\n (\"c\", 1): (\"b\", 1),\n }\n\n result = repartition_divisions([1, 3, 7], [1, 4, 6, 7], \"a\", \"b\", \"c\")\n assert result == {\n (\"b\", 0): (methods.boundary_slice, (\"a\", 0), 1, 3, False),\n (\"b\", 1): (methods.boundary_slice, (\"a\", 1), 3, 4, False),\n (\"b\", 2): (methods.boundary_slice, (\"a\", 1), 4, 6, False),\n (\"b\", 3): (methods.boundary_slice, (\"a\", 1), 6, 7, True),\n (\"c\", 0): (methods.concat, [(\"b\", 0), (\"b\", 1)]),\n (\"c\", 1): (\"b\", 2),\n (\"c\", 2): (\"b\", 3),\n }", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_repartition_on_pandas_dataframe_test_repartition_on_pandas_dataframe.assert_eq_ddf_df_y_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_repartition_on_pandas_dataframe_test_repartition_on_pandas_dataframe.assert_eq_ddf_df_y_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 1878, "end_line": 1890, "span_ids": ["test_repartition_on_pandas_dataframe"], "tokens": 185}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_repartition_on_pandas_dataframe():\n df = pd.DataFrame(\n {\"x\": [1, 2, 3, 4, 5, 6], \"y\": list(\"abdabd\")}, index=[10, 20, 30, 40, 50, 60]\n )\n ddf = dd.repartition(df, divisions=[10, 20, 50, 60])\n assert isinstance(ddf, dd.DataFrame)\n assert ddf.divisions == (10, 20, 50, 60)\n assert_eq(ddf, df)\n\n ddf = dd.repartition(df.y, divisions=[10, 20, 50, 60])\n assert isinstance(ddf, dd.Series)\n assert ddf.divisions == (10, 20, 50, 60)\n assert_eq(ddf, df.y)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_repartition_npartitions_test_repartition_npartitions.assert_all_map_len_parts": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_repartition_npartitions_test_repartition_npartitions.assert_all_map_len_parts", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 1893, "end_line": 1909, "span_ids": ["test_repartition_npartitions"], "tokens": 250}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"use_index\", [True, False])\n@pytest.mark.parametrize(\"n\", [1, 2, 4, 5])\n@pytest.mark.parametrize(\"k\", [1, 2, 4, 5])\n@pytest.mark.parametrize(\"dtype\", [float, \"M8[ns]\"])\n@pytest.mark.parametrize(\"transform\", [lambda df: df, lambda df: df.x])\ndef test_repartition_npartitions(use_index, n, k, dtype, transform):\n df = pd.DataFrame(\n {\"x\": [1, 2, 3, 4, 5, 6] * 10, \"y\": list(\"abdabd\") * 10},\n index=pd.Series([1, 2, 3, 4, 5, 6] * 10, dtype=dtype),\n )\n df = transform(df)\n a = dd.from_pandas(df, npartitions=n, sort=use_index)\n b = a.repartition(k)\n assert_eq(a, b)\n assert b.npartitions == k\n parts = dask.get(b.dask, b.__dask_keys__())\n assert all(map(len, parts))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_repartition_partition_size_test_repartition_partition_size.assert_all_map_len_parts": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_repartition_partition_size_test_repartition_partition_size.assert_all_map_len_parts", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 1912, "end_line": 1927, "span_ids": ["test_repartition_partition_size"], "tokens": 242}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"use_index\", [True, False])\n@pytest.mark.parametrize(\"n\", [2, 5])\n@pytest.mark.parametrize(\"partition_size\", [\"1kiB\", 379])\n@pytest.mark.parametrize(\"transform\", [lambda df: df, lambda df: df.x])\ndef test_repartition_partition_size(use_index, n, partition_size, transform):\n df = pd.DataFrame(\n {\"x\": [1, 2, 3, 4, 5, 6] * 10, \"y\": list(\"abdabd\") * 10},\n index=pd.Series([10, 20, 30, 40, 50, 60] * 10),\n )\n df = transform(df)\n a = dd.from_pandas(df, npartitions=n, sort=use_index)\n b = a.repartition(partition_size=partition_size)\n assert_eq(a, b, check_divisions=False)\n assert np.alltrue(b.map_partitions(total_mem_usage, deep=True).compute() <= 1024)\n parts = dask.get(b.dask, b.__dask_keys__())\n assert all(map(len, parts))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_repartition_partition_size_arg_test_repartition_npartitions_same_limits.ddf_repartition_npartitio": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_repartition_partition_size_arg_test_repartition_npartitions_same_limits.ddf_repartition_npartitio", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 1930, "end_line": 1949, "span_ids": ["test_repartition_npartitions_same_limits", "test_repartition_partition_size_arg"], "tokens": 179}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_repartition_partition_size_arg():\n df = pd.DataFrame({\"x\": range(10)})\n a = dd.from_pandas(df, npartitions=2)\n b = a.repartition(\"1 MiB\")\n assert b.npartitions == 1\n\n\ndef test_repartition_npartitions_same_limits():\n df = pd.DataFrame(\n {\"x\": [1, 2, 3]},\n index=[\n pd.Timestamp(\"2017-05-09 00:00:00.006000\"),\n pd.Timestamp(\"2017-05-09 02:45:00.017999\"),\n pd.Timestamp(\"2017-05-09 05:59:58.938999\"),\n ],\n )\n\n ddf = dd.from_pandas(df, npartitions=2)\n\n ddf.repartition(npartitions=10)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_repartition_npartitions_numeric_edge_case_test_repartition_object_index.assert_not_b_known_divisi": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_repartition_npartitions_numeric_edge_case_test_repartition_object_index.assert_not_b_known_divisi", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 1952, "end_line": 1974, "span_ids": ["test_repartition_npartitions_numeric_edge_case", "test_repartition_object_index"], "tokens": 233}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_repartition_npartitions_numeric_edge_case():\n \"\"\"\n Test that we cover numeric edge cases when\n int(ddf.npartitions / npartitions) * npartitions) != ddf.npartitions\n \"\"\"\n df = pd.DataFrame({\"x\": range(100)})\n a = dd.from_pandas(df, npartitions=15)\n assert a.npartitions == 15\n b = a.repartition(npartitions=11)\n assert_eq(a, b)\n\n\ndef test_repartition_object_index():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4, 5, 6] * 10}, index=list(\"abdabd\") * 10)\n a = dd.from_pandas(df, npartitions=5)\n b = a.repartition(npartitions=2)\n assert b.npartitions == 2\n assert_eq(b, df)\n\n b = a.repartition(npartitions=10)\n assert b.npartitions == 10\n assert_eq(b, df)\n assert not b.known_divisions", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_repartition_freq_test_repartition_freq.assert_eq_ddf2_df_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_repartition_freq_test_repartition_freq.assert_eq_ddf2_df_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 1977, "end_line": 1994, "span_ids": ["test_repartition_freq"], "tokens": 241}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.slow\n@pytest.mark.parametrize(\"npartitions\", [1, 20, 243])\n@pytest.mark.parametrize(\"freq\", [\"1D\", \"7D\", \"28h\", \"1h\"])\n@pytest.mark.parametrize(\n \"end\", [\"2000-04-15\", \"2000-04-15 12:37:01\", \"2000-01-01 12:37:00\"]\n)\n@pytest.mark.parametrize(\n \"start\", [\"2000-01-01\", \"2000-01-01 12:30:00\", \"2000-01-01 12:30:00\"]\n)\ndef test_repartition_freq(npartitions, freq, start, end):\n start = pd.Timestamp(start)\n end = pd.Timestamp(end)\n ind = pd.date_range(start=start, end=end, freq=\"60s\")\n df = pd.DataFrame({\"x\": np.arange(len(ind))}, index=ind)\n ddf = dd.from_pandas(df, npartitions=npartitions, name=\"x\")\n\n ddf2 = ddf.repartition(freq=freq)\n assert_eq(ddf2, df)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_repartition_freq_divisions_test_repartition_freq_divisions.assert_eq_ddf2_ddf2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_repartition_freq_divisions_test_repartition_freq_divisions.assert_eq_ddf2_ddf2_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 1997, "end_line": 2009, "span_ids": ["test_repartition_freq_divisions"], "tokens": 139}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_repartition_freq_divisions():\n df = pd.DataFrame(\n {\"x\": np.random.random(10)},\n index=pd.DatetimeIndex(np.random.random(10) * 100e9),\n )\n ddf = dd.from_pandas(df, npartitions=3)\n\n ddf2 = ddf.repartition(freq=\"15s\")\n for div in ddf2.divisions[1:-1]:\n assert div == div.round(\"15s\")\n assert ddf2.divisions[0] == df.index.min()\n assert ddf2.divisions[-1] == df.index.max()\n assert_eq(ddf2, ddf2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_repartition_freq_errors_test_repartition_input_errors.None_1.ddf_repartition_npartitio": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_repartition_freq_errors_test_repartition_input_errors.None_1.ddf_repartition_npartitio", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 2012, "end_line": 2039, "span_ids": ["test_repartition_freq_month", "test_repartition_freq_errors", "test_repartition_input_errors"], "tokens": 291}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_repartition_freq_errors():\n df = pd.DataFrame({\"x\": [1, 2, 3]})\n ddf = dd.from_pandas(df, npartitions=1)\n with pytest.raises(TypeError) as info:\n ddf.repartition(freq=\"1s\")\n\n assert \"only\" in str(info.value)\n assert \"timeseries\" in str(info.value)\n\n\ndef test_repartition_freq_month():\n ts = pd.date_range(\"2015-01-01 00:00\", \" 2015-05-01 23:50\", freq=\"10min\")\n df = pd.DataFrame(\n np.random.randint(0, 100, size=(len(ts), 4)), columns=list(\"ABCD\"), index=ts\n )\n ddf = dd.from_pandas(df, npartitions=1).repartition(freq=\"1M\")\n\n assert_eq(df, ddf)\n assert 2 < ddf.npartitions <= 6\n\n\ndef test_repartition_input_errors():\n df = pd.DataFrame({\"x\": [1, 2, 3]})\n ddf = dd.from_pandas(df, npartitions=1)\n with pytest.raises(ValueError):\n ddf.repartition(npartitions=5, divisions=[None, None])\n with pytest.raises(ValueError):\n ddf.repartition(npartitions=5, partition_size=\"5MiB\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_embarrassingly_parallel_operations_test_embarrassingly_parallel_operations.assert_len_a_sample_frac_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_embarrassingly_parallel_operations_test_embarrassingly_parallel_operations.assert_len_a_sample_frac_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 2042, "end_line": 2063, "span_ids": ["test_embarrassingly_parallel_operations"], "tokens": 232}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_embarrassingly_parallel_operations():\n df = pd.DataFrame(\n {\"x\": [1, 2, 3, 4, None, 6], \"y\": list(\"abdabd\")},\n index=[10, 20, 30, 40, 50, 60],\n )\n a = dd.from_pandas(df, 2)\n\n assert_eq(a.x.astype(\"float32\"), df.x.astype(\"float32\"))\n assert a.x.astype(\"float32\").compute().dtype == \"float32\"\n\n assert_eq(a.x.dropna(), df.x.dropna())\n\n assert_eq(a.x.between(2, 4), df.x.between(2, 4))\n\n assert_eq(a.x.clip(2, 4), df.x.clip(2, 4))\n\n assert_eq(a.x.notnull(), df.x.notnull())\n assert_eq(a.x.isnull(), df.x.isnull())\n assert_eq(a.notnull(), df.notnull())\n assert_eq(a.isnull(), df.isnull())\n\n assert len(a.sample(frac=0.5).compute()) < len(df)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_fillna_test_fillna.assert_eq_df_fillna_metho": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_fillna_test_fillna.assert_eq_df_fillna_metho", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 2066, "end_line": 2103, "span_ids": ["test_fillna"], "tokens": 533}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fillna():\n df = _compat.makeMissingDataframe()\n ddf = dd.from_pandas(df, npartitions=5, sort=False)\n\n assert_eq(ddf.fillna(100), df.fillna(100))\n assert_eq(ddf.A.fillna(100), df.A.fillna(100))\n assert_eq(ddf.A.fillna(ddf[\"A\"].mean()), df.A.fillna(df[\"A\"].mean()))\n\n assert_eq(ddf.fillna(method=\"pad\"), df.fillna(method=\"pad\"))\n assert_eq(ddf.A.fillna(method=\"pad\"), df.A.fillna(method=\"pad\"))\n\n assert_eq(ddf.fillna(method=\"bfill\"), df.fillna(method=\"bfill\"))\n assert_eq(ddf.A.fillna(method=\"bfill\"), df.A.fillna(method=\"bfill\"))\n\n assert_eq(ddf.fillna(method=\"pad\", limit=2), df.fillna(method=\"pad\", limit=2))\n assert_eq(ddf.A.fillna(method=\"pad\", limit=2), df.A.fillna(method=\"pad\", limit=2))\n\n assert_eq(ddf.fillna(method=\"bfill\", limit=2), df.fillna(method=\"bfill\", limit=2))\n assert_eq(\n ddf.A.fillna(method=\"bfill\", limit=2), df.A.fillna(method=\"bfill\", limit=2)\n )\n\n assert_eq(ddf.fillna(100, axis=1), df.fillna(100, axis=1))\n assert_eq(ddf.fillna(method=\"pad\", axis=1), df.fillna(method=\"pad\", axis=1))\n assert_eq(\n ddf.fillna(method=\"pad\", limit=2, axis=1),\n df.fillna(method=\"pad\", limit=2, axis=1),\n )\n\n pytest.raises(ValueError, lambda: ddf.A.fillna(0, axis=1))\n pytest.raises(NotImplementedError, lambda: ddf.fillna(0, limit=10))\n pytest.raises(NotImplementedError, lambda: ddf.fillna(0, limit=10, axis=1))\n\n df = _compat.makeMissingDataframe()\n df.iloc[:15, 0] = np.nan # all NaN partition\n ddf = dd.from_pandas(df, npartitions=5, sort=False)\n pytest.raises(ValueError, lambda: ddf.fillna(method=\"pad\").compute())\n assert_eq(df.fillna(method=\"pad\", limit=3), ddf.fillna(method=\"pad\", limit=3))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_fillna_duplicate_index_test_fillna_series_types.assert_eq_ddf_fillna_fill": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_fillna_duplicate_index_test_fillna_series_types.assert_eq_ddf_fillna_fill", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 2106, "end_line": 2139, "span_ids": ["test_ffill_bfill", "test_fillna_multi_dataframe", "test_fillna_duplicate_index", "test_fillna_series_types"], "tokens": 346}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fillna_duplicate_index():\n @dask.delayed\n def f():\n return pd.DataFrame(dict(a=[1.0], b=[np.NaN]))\n\n ddf = dd.from_delayed([f(), f()], meta=dict(a=float, b=float))\n ddf.b = ddf.b.fillna(ddf.a)\n ddf.compute()\n\n\ndef test_fillna_multi_dataframe():\n df = _compat.makeMissingDataframe()\n ddf = dd.from_pandas(df, npartitions=5, sort=False)\n\n assert_eq(ddf.A.fillna(ddf.B), df.A.fillna(df.B))\n assert_eq(ddf.B.fillna(ddf.A), df.B.fillna(df.A))\n\n\ndef test_ffill_bfill():\n df = _compat.makeMissingDataframe()\n ddf = dd.from_pandas(df, npartitions=5, sort=False)\n\n assert_eq(ddf.ffill(), df.ffill())\n assert_eq(ddf.bfill(), df.bfill())\n assert_eq(ddf.ffill(axis=1), df.ffill(axis=1))\n assert_eq(ddf.bfill(axis=1), df.bfill(axis=1))\n\n\ndef test_fillna_series_types():\n # https://github.com/dask/dask/issues/2809\n df = pd.DataFrame({\"A\": [1, np.nan, 3], \"B\": [1, np.nan, 3]})\n ddf = dd.from_pandas(df, npartitions=2)\n fill_value = pd.Series([1, 10], index=[\"A\", \"C\"])\n assert_eq(ddf.fillna(fill_value), df.fillna(fill_value))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_sample_test_sample.assert_a_sample_frac_0_5_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_sample_test_sample.assert_a_sample_frac_0_5_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 2142, "end_line": 2157, "span_ids": ["test_sample"], "tokens": 158}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_sample():\n df = pd.DataFrame(\n {\"x\": [1, 2, 3, 4, None, 6], \"y\": list(\"abdabd\")},\n index=[10, 20, 30, 40, 50, 60],\n )\n a = dd.from_pandas(df, 2)\n\n b = a.sample(frac=0.5)\n\n assert_eq(b, b)\n\n c = a.sample(frac=0.5, random_state=1234)\n d = a.sample(frac=0.5, random_state=1234)\n assert_eq(c, d)\n\n assert a.sample(frac=0.5)._name != a.sample(frac=0.5)._name", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_sample_without_replacement_test_sample_without_replacement.assert_len_bb_len_set": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_sample_without_replacement_test_sample_without_replacement.assert_len_bb_len_set", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 2160, "end_line": 2168, "span_ids": ["test_sample_without_replacement"], "tokens": 108}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_sample_without_replacement():\n df = pd.DataFrame(\n {\"x\": [1, 2, 3, 4, None, 6], \"y\": list(\"abdabd\")},\n index=[10, 20, 30, 40, 50, 60],\n )\n a = dd.from_pandas(df, 2)\n b = a.sample(frac=0.7, replace=False)\n bb = b.index.compute()\n assert len(bb) == len(set(bb))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_sample_raises_test_sample_raises.None_2.a_sample_frac_None_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_sample_raises_test_sample_raises.None_2.a_sample_frac_None_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 2171, "end_line": 2190, "span_ids": ["test_sample_raises"], "tokens": 198}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_sample_raises():\n df = pd.DataFrame(\n {\"x\": [1, 2, 3, 4, None, 6], \"y\": list(\"abdabd\")},\n index=[10, 20, 30, 40, 50, 60],\n )\n a = dd.from_pandas(df, 2)\n\n # Make sure frac is replaced with n when 0 <= n <= 1\n # This is so existing code (i.e. ddf.sample(0.5)) won't break\n with pytest.warns(UserWarning):\n b = a.sample(0.5, random_state=1234)\n c = a.sample(frac=0.5, random_state=1234)\n assert_eq(b, c)\n\n with pytest.raises(ValueError):\n a.sample(n=10)\n\n # Make sure frac is provided\n with pytest.raises(ValueError):\n a.sample(frac=None)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_empty_max_test_query.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_empty_max_test_query.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 2193, "end_line": 2213, "span_ids": ["test_empty_max", "test_query"], "tokens": 212}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_empty_max():\n meta = make_meta({\"x\": \"i8\"})\n a = dd.DataFrame(\n {(\"x\", 0): pd.DataFrame({\"x\": [1]}), (\"x\", 1): pd.DataFrame({\"x\": []})},\n \"x\",\n meta,\n [None, None, None],\n )\n assert_eq(a.x.max(), 1)\n\n\ndef test_query():\n pytest.importorskip(\"numexpr\")\n\n df = pd.DataFrame({\"x\": [1, 2, 3, 4], \"y\": [5, 6, 7, 8]})\n ddf = dd.from_pandas(df, npartitions=2)\n assert_eq(ddf.query(\"x**2 > y\"), df.query(\"x**2 > y\"))\n assert_eq(\n ddf.query(\"x**2 > @value\", local_dict={\"value\": 4}),\n df.query(\"x**2 > @value\", local_dict={\"value\": 4}),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_eval_test_eval.with_pytest_raises_NotImp.d_eval_z_x_y_inpla": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_eval_test_eval.with_pytest_raises_NotImp.d_eval_z_x_y_inpla", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 2216, "end_line": 2225, "span_ids": ["test_eval"], "tokens": 129}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_eval():\n pytest.importorskip(\"numexpr\")\n\n p = pd.DataFrame({\"x\": [1, 2, 3, 4], \"y\": [5, 6, 7, 8]})\n d = dd.from_pandas(p, npartitions=2)\n\n assert_eq(p.eval(\"x + y\"), d.eval(\"x + y\"))\n assert_eq(p.eval(\"z = x + y\", inplace=False), d.eval(\"z = x + y\", inplace=False))\n with pytest.raises(NotImplementedError):\n d.eval(\"z = x + y\", inplace=True)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_select_dtypes_test_select_dtypes.if_not_PANDAS_GT_100_.with_ctx_.tm_assert_series_equal_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_select_dtypes_test_select_dtypes.if_not_PANDAS_GT_100_.with_ctx_.tm_assert_series_equal_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 2228, "end_line": 2265, "span_ids": ["test_select_dtypes"], "tokens": 291}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"include, exclude\",\n [\n ([int], None),\n (None, [int]),\n ([np.number, object], [float]),\n ([\"datetime\"], None),\n ],\n)\ndef test_select_dtypes(include, exclude):\n n = 10\n df = pd.DataFrame(\n {\n \"cint\": [1] * n,\n \"cstr\": [\"a\"] * n,\n \"clfoat\": [1.0] * n,\n \"cdt\": pd.date_range(\"2016-01-01\", periods=n),\n }\n )\n a = dd.from_pandas(df, npartitions=2)\n result = a.select_dtypes(include=include, exclude=exclude)\n expected = df.select_dtypes(include=include, exclude=exclude)\n assert_eq(result, expected)\n\n # count dtypes\n tm.assert_series_equal(a.dtypes.value_counts(), df.dtypes.value_counts())\n\n tm.assert_series_equal(result.dtypes.value_counts(), expected.dtypes.value_counts())\n\n if not PANDAS_GT_100:\n # removed in pandas 1.0\n ctx = pytest.warns(FutureWarning)\n\n with ctx:\n tm.assert_series_equal(a.get_ftype_counts(), df.get_ftype_counts())\n tm.assert_series_equal(\n result.get_ftype_counts(), expected.get_ftype_counts()\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_deterministic_apply_concat_apply_names_test_deterministic_apply_concat_apply_names.assert_eq_res_df_x_sum_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_deterministic_apply_concat_apply_names_test_deterministic_apply_concat_apply_names.assert_eq_res_df_x_sum_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 2268, "end_line": 2327, "span_ids": ["test_deterministic_apply_concat_apply_names"], "tokens": 573}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_deterministic_apply_concat_apply_names():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4], \"y\": [5, 6, 7, 8]})\n a = dd.from_pandas(df, npartitions=2)\n\n assert sorted(a.x.nlargest(2).dask) == sorted(a.x.nlargest(2).dask)\n assert sorted(a.x.nlargest(2).dask) != sorted(a.x.nlargest(3).dask)\n assert sorted(a.x.drop_duplicates().dask) == sorted(a.x.drop_duplicates().dask)\n assert sorted(a.groupby(\"x\").y.mean().dask) == sorted(a.groupby(\"x\").y.mean().dask)\n\n # Test aca without passing in token string\n f = lambda a: a.nlargest(5)\n f2 = lambda a: a.nlargest(3)\n assert sorted(aca(a.x, f, f, a.x._meta).dask) != sorted(\n aca(a.x, f2, f2, a.x._meta).dask\n )\n assert sorted(aca(a.x, f, f, a.x._meta).dask) == sorted(\n aca(a.x, f, f, a.x._meta).dask\n )\n\n # Test aca with keywords\n def chunk(x, c_key=0, both_key=0):\n return x.sum() + c_key + both_key\n\n def agg(x, a_key=0, both_key=0):\n return pd.Series(x).sum() + a_key + both_key\n\n c_key = 2\n a_key = 3\n both_key = 4\n\n res = aca(\n a.x,\n chunk=chunk,\n aggregate=agg,\n chunk_kwargs={\"c_key\": c_key},\n aggregate_kwargs={\"a_key\": a_key},\n both_key=both_key,\n )\n assert sorted(res.dask) == sorted(\n aca(\n a.x,\n chunk=chunk,\n aggregate=agg,\n chunk_kwargs={\"c_key\": c_key},\n aggregate_kwargs={\"a_key\": a_key},\n both_key=both_key,\n ).dask\n )\n assert sorted(res.dask) != sorted(\n aca(\n a.x,\n chunk=chunk,\n aggregate=agg,\n chunk_kwargs={\"c_key\": c_key},\n aggregate_kwargs={\"a_key\": a_key},\n both_key=0,\n ).dask\n )\n\n assert_eq(res, df.x.sum() + 2 * (c_key + both_key) + a_key + both_key)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_aca_meta_infer_test_aca_meta_infer.assert_res_compute_d": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_aca_meta_infer_test_aca_meta_infer.assert_res_compute_d", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 2330, "end_line": 2349, "span_ids": ["test_aca_meta_infer"], "tokens": 205}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_aca_meta_infer():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4], \"y\": [5, 6, 7, 8]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n def chunk(x, y, constant=1.0):\n return (x + y + constant).head()\n\n def agg(x):\n return x.head()\n\n res = aca([ddf, 2.0], chunk=chunk, aggregate=agg, chunk_kwargs=dict(constant=2.0))\n sol = (df + 2.0 + 2.0).head()\n assert_eq(res, sol)\n\n # Should infer as a scalar\n res = aca(\n [ddf.x], chunk=lambda x: pd.Series([x.sum()]), aggregate=lambda x: x.sum()\n )\n assert isinstance(res, Scalar)\n assert res.compute() == df.x.sum()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_aca_split_every_test_aca_split_every.None_1.aca_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_aca_split_every_test_aca_split_every.None_1.aca_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 2352, "end_line": 2417, "span_ids": ["test_aca_split_every"], "tokens": 597}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_aca_split_every():\n df = pd.DataFrame({\"x\": [1] * 60})\n ddf = dd.from_pandas(df, npartitions=15)\n\n def chunk(x, y, constant=0):\n return x.sum() + y + constant\n\n def combine(x, constant=0):\n return x.sum() + constant + 1\n\n def agg(x, constant=0):\n return x.sum() + constant + 2\n\n f = lambda n: aca(\n [ddf, 2.0],\n chunk=chunk,\n aggregate=agg,\n combine=combine,\n chunk_kwargs=dict(constant=1.0),\n combine_kwargs=dict(constant=2.0),\n aggregate_kwargs=dict(constant=3.0),\n split_every=n,\n )\n\n assert_max_deps(f(3), 3)\n assert_max_deps(f(4), 4, False)\n assert_max_deps(f(5), 5)\n assert set(f(15).dask.keys()) == set(f(ddf.npartitions).dask.keys())\n\n r3 = f(3)\n r4 = f(4)\n assert r3._name != r4._name\n # Only intersect on reading operations\n assert len(set(r3.dask.keys()) & set(r4.dask.keys())) == len(ddf.dask.keys())\n\n # Keywords are different for each step\n assert f(3).compute() == 60 + 15 * (2 + 1) + 7 * (2 + 1) + (3 + 2)\n # Keywords are same for each step\n res = aca(\n [ddf, 2.0],\n chunk=chunk,\n aggregate=agg,\n combine=combine,\n constant=3.0,\n split_every=3,\n )\n assert res.compute() == 60 + 15 * (2 + 3) + 7 * (3 + 1) + (3 + 2)\n # No combine provided, combine is agg\n res = aca([ddf, 2.0], chunk=chunk, aggregate=agg, constant=3, split_every=3)\n assert res.compute() == 60 + 15 * (2 + 3) + 8 * (3 + 2)\n\n # split_every must be >= 2\n with pytest.raises(ValueError):\n f(1)\n\n # combine_kwargs with no combine provided\n with pytest.raises(ValueError):\n aca(\n [ddf, 2.0],\n chunk=chunk,\n aggregate=agg,\n split_every=3,\n chunk_kwargs=dict(constant=1.0),\n combine_kwargs=dict(constant=2.0),\n aggregate_kwargs=dict(constant=3.0),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_reduction_method_test_reduction_method.assert_eq_res_pd_DataFra": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_reduction_method_test_reduction_method.assert_eq_res_pd_DataFra", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 2420, "end_line": 2448, "span_ids": ["test_reduction_method"], "tokens": 305}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_reduction_method():\n df = pd.DataFrame({\"x\": range(50), \"y\": range(50, 100)})\n ddf = dd.from_pandas(df, npartitions=4)\n\n chunk = lambda x, val=0: (x >= val).sum()\n agg = lambda x: x.sum()\n\n # Output of chunk is a scalar\n res = ddf.x.reduction(chunk, aggregate=agg)\n assert_eq(res, df.x.count())\n\n # Output of chunk is a series\n res = ddf.reduction(chunk, aggregate=agg)\n assert res._name == ddf.reduction(chunk, aggregate=agg)._name\n assert_eq(res, df.count())\n\n # Test with keywords\n res2 = ddf.reduction(chunk, aggregate=agg, chunk_kwargs={\"val\": 25})\n res2._name == ddf.reduction(chunk, aggregate=agg, chunk_kwargs={\"val\": 25})._name\n assert res2._name != res._name\n assert_eq(res2, (df >= 25).sum())\n\n # Output of chunk is a dataframe\n def sum_and_count(x):\n return pd.DataFrame({\"sum\": x.sum(), \"count\": x.count()})\n\n res = ddf.reduction(sum_and_count, aggregate=lambda x: x.groupby(level=0).sum())\n\n assert_eq(res, pd.DataFrame({\"sum\": df.sum(), \"count\": df.count()}))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_reduction_method_split_every_test_reduction_method_split_every.None_1.ddf_reduction_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_reduction_method_split_every_test_reduction_method_split_every.None_1.ddf_reduction_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 2451, "end_line": 2509, "span_ids": ["test_reduction_method_split_every"], "tokens": 538}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_reduction_method_split_every():\n df = pd.Series([1] * 60)\n ddf = dd.from_pandas(df, npartitions=15)\n\n def chunk(x, constant=0):\n return x.sum() + constant\n\n def combine(x, constant=0):\n return x.sum() + constant + 1\n\n def agg(x, constant=0):\n return x.sum() + constant + 2\n\n f = lambda n: ddf.reduction(\n chunk,\n aggregate=agg,\n combine=combine,\n chunk_kwargs=dict(constant=1.0),\n combine_kwargs=dict(constant=2.0),\n aggregate_kwargs=dict(constant=3.0),\n split_every=n,\n )\n\n assert_max_deps(f(3), 3)\n assert_max_deps(f(4), 4, False)\n assert_max_deps(f(5), 5)\n assert set(f(15).dask.keys()) == set(f(ddf.npartitions).dask.keys())\n\n r3 = f(3)\n r4 = f(4)\n assert r3._name != r4._name\n # Only intersect on reading operations\n assert len(set(r3.dask.keys()) & set(r4.dask.keys())) == len(ddf.dask.keys())\n\n # Keywords are different for each step\n assert f(3).compute() == 60 + 15 + 7 * (2 + 1) + (3 + 2)\n # Keywords are same for each step\n res = ddf.reduction(\n chunk, aggregate=agg, combine=combine, constant=3.0, split_every=3\n )\n assert res.compute() == 60 + 15 * 3 + 7 * (3 + 1) + (3 + 2)\n # No combine provided, combine is agg\n res = ddf.reduction(chunk, aggregate=agg, constant=3.0, split_every=3)\n assert res.compute() == 60 + 15 * 3 + 8 * (3 + 2)\n\n # split_every must be >= 2\n with pytest.raises(ValueError):\n f(1)\n\n # combine_kwargs with no combine provided\n with pytest.raises(ValueError):\n ddf.reduction(\n chunk,\n aggregate=agg,\n split_every=3,\n chunk_kwargs=dict(constant=1.0),\n combine_kwargs=dict(constant=2.0),\n aggregate_kwargs=dict(constant=3.0),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_pipe_test_gh_517.assert_ddf2_index_nunique": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_pipe_test_gh_517.assert_ddf2_index_nunique", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 2512, "end_line": 2530, "span_ids": ["test_pipe", "test_gh_517"], "tokens": 208}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_pipe():\n df = pd.DataFrame({\"x\": range(50), \"y\": range(50, 100)})\n ddf = dd.from_pandas(df, npartitions=4)\n\n def f(x, y, z=0):\n return x + y + z\n\n assert_eq(ddf.pipe(f, 1, z=2), f(ddf, 1, z=2))\n assert_eq(ddf.x.pipe(f, 1, z=2), f(ddf.x, 1, z=2))\n\n\ndef test_gh_517():\n arr = np.random.randn(100, 2)\n df = pd.DataFrame(arr, columns=[\"a\", \"b\"])\n ddf = dd.from_pandas(df, 2)\n assert ddf.index.nunique().compute() == 100\n\n ddf2 = dd.from_pandas(pd.concat([df, df]), 5)\n assert ddf2.index.nunique().compute() == 100", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_drop_axis_1_test_drop_axis_1.assert_eq_ddf_drop_column": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_drop_axis_1_test_drop_axis_1.assert_eq_ddf_drop_column", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 2533, "end_line": 2545, "span_ids": ["test_drop_axis_1"], "tokens": 206}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_drop_axis_1():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4], \"y\": [5, 6, 7, 8], \"z\": [9, 10, 11, 12]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n assert_eq(ddf.drop(\"y\", axis=1), df.drop(\"y\", axis=1))\n assert_eq(ddf.drop([\"y\", \"z\"], axis=1), df.drop([\"y\", \"z\"], axis=1))\n with pytest.raises(ValueError):\n ddf.drop([\"a\", \"x\"], axis=1)\n assert_eq(\n ddf.drop([\"a\", \"x\"], axis=1, errors=\"ignore\"),\n df.drop([\"a\", \"x\"], axis=1, errors=\"ignore\"),\n )\n assert_eq(ddf.drop(columns=[\"y\", \"z\"]), df.drop(columns=[\"y\", \"z\"]))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_gh580_test_rename_index.pytest_raises_ValueError_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_gh580_test_rename_index.pytest_raises_ValueError_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 2548, "end_line": 2576, "span_ids": ["test_rename_function", "test_rename_dict", "test_gh6305", "test_gh580", "test_rename_index"], "tokens": 235}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_gh580():\n df = pd.DataFrame({\"x\": np.arange(10, dtype=float)})\n ddf = dd.from_pandas(df, 2)\n assert_eq(np.cos(df[\"x\"]), np.cos(ddf[\"x\"]))\n assert_eq(np.cos(df[\"x\"]), np.cos(ddf[\"x\"]))\n\n\ndef test_gh6305():\n df = pd.DataFrame({\"x\": np.arange(3, dtype=float)})\n ddf = dd.from_pandas(df, 1)\n ddf_index_only = ddf.set_index(\"x\")\n ds = ddf[\"x\"]\n\n is_broadcastable([ddf_index_only], ds)\n\n\ndef test_rename_dict():\n renamer = {\"a\": \"A\", \"b\": \"B\"}\n assert_eq(d.rename(columns=renamer), full.rename(columns=renamer))\n\n\ndef test_rename_function():\n renamer = lambda x: x.upper()\n assert_eq(d.rename(columns=renamer), full.rename(columns=renamer))\n\n\ndef test_rename_index():\n renamer = {0: 1}\n pytest.raises(ValueError, lambda: d.rename(index=renamer))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_to_timestamp_test_to_timestamp.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_to_timestamp_test_to_timestamp.None_3", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 2579, "end_line": 2594, "span_ids": ["test_to_timestamp"], "tokens": 195}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_timestamp():\n index = pd.period_range(freq=\"A\", start=\"1/1/2001\", end=\"12/1/2004\")\n df = pd.DataFrame({\"x\": [1, 2, 3, 4], \"y\": [10, 20, 30, 40]}, index=index)\n ddf = dd.from_pandas(df, npartitions=3)\n assert_eq(ddf.to_timestamp(), df.to_timestamp(), **CHECK_FREQ)\n assert_eq(\n ddf.to_timestamp(freq=\"M\", how=\"s\").compute(),\n df.to_timestamp(freq=\"M\", how=\"s\"),\n **CHECK_FREQ\n )\n assert_eq(ddf.x.to_timestamp(), df.x.to_timestamp())\n assert_eq(\n ddf.x.to_timestamp(freq=\"M\", how=\"s\").compute(),\n df.x.to_timestamp(freq=\"M\", how=\"s\"),\n **CHECK_FREQ\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_to_frame_test_to_dask_array_raises.None_2.a_to_dask_array_5_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_to_frame_test_to_dask_array_raises.None_2.a_to_dask_array_5_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 2597, "end_line": 2617, "span_ids": ["test_to_frame", "test_to_dask_array_raises"], "tokens": 192}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_frame():\n s = pd.Series([1, 2, 3], name=\"foo\")\n a = dd.from_pandas(s, npartitions=2)\n\n assert_eq(s.to_frame(), a.to_frame())\n assert_eq(s.to_frame(\"bar\"), a.to_frame(\"bar\"))\n\n\n@pytest.mark.parametrize(\"as_frame\", [False, False])\ndef test_to_dask_array_raises(as_frame):\n s = pd.Series([1, 2, 3, 4, 5, 6], name=\"foo\")\n a = dd.from_pandas(s, npartitions=2)\n\n if as_frame:\n a = a.to_frame()\n\n with pytest.raises(ValueError, match=\"4 != 2\"):\n a.to_dask_array((1, 2, 3, 4))\n\n with pytest.raises(ValueError, match=\"Unexpected value\"):\n a.to_dask_array(5)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_to_dask_array_unknown_test_to_dask_array_unknown.assert_all_np_isnan_x_fo": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_to_dask_array_unknown_test_to_dask_array_unknown.assert_all_np_isnan_x_fo", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 2620, "end_line": 2640, "span_ids": ["test_to_dask_array_unknown"], "tokens": 160}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"as_frame\", [False, True])\ndef test_to_dask_array_unknown(as_frame):\n s = pd.Series([1, 2, 3, 4, 5], name=\"foo\")\n a = dd.from_pandas(s, chunksize=2)\n\n if as_frame:\n a = a.to_frame()\n\n result = a.to_dask_array()\n assert isinstance(result, da.Array)\n result = result.chunks\n\n if as_frame:\n assert len(result) == 2\n assert result[1] == (1,)\n else:\n assert len(result) == 1\n\n result = result[0]\n assert len(result) == 2\n assert all(np.isnan(x) for x in result)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_to_dask_array_test_to_dask_array.assert_result_chunks_e": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_to_dask_array_test_to_dask_array.assert_result_chunks_e", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 2643, "end_line": 2666, "span_ids": ["test_to_dask_array"], "tokens": 180}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"lengths,as_frame,meta\",\n [\n ([2, 3], False, None),\n (True, False, None),\n (True, False, np.array([], dtype=\"f4\")),\n ],\n)\ndef test_to_dask_array(meta, as_frame, lengths):\n s = pd.Series([1, 2, 3, 4, 5], name=\"foo\", dtype=\"i4\")\n a = dd.from_pandas(s, chunksize=2)\n\n if as_frame:\n a = a.to_frame()\n\n result = a.to_dask_array(lengths=lengths, meta=meta)\n assert isinstance(result, da.Array)\n\n expected_chunks = ((2, 3),)\n\n if as_frame:\n expected_chunks = expected_chunks + ((1,),)\n\n assert result.chunks == expected_chunks", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_apply_test_apply.None_4.ddf_apply_lambda_xy_xy_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_apply_test_apply.None_4.ddf_apply_lambda_xy_xy_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 2663, "end_line": 2703, "span_ids": ["test_apply"], "tokens": 422}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_apply():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4], \"y\": [10, 20, 30, 40]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n func = lambda row: row[\"x\"] + row[\"y\"]\n assert_eq(\n ddf.x.apply(lambda x: x + 1, meta=(\"x\", int)), df.x.apply(lambda x: x + 1)\n )\n\n # specify meta\n assert_eq(\n ddf.apply(lambda xy: xy[0] + xy[1], axis=1, meta=(None, int)),\n df.apply(lambda xy: xy[0] + xy[1], axis=1),\n )\n assert_eq(\n ddf.apply(lambda xy: xy[0] + xy[1], axis=\"columns\", meta=(None, int)),\n df.apply(lambda xy: xy[0] + xy[1], axis=\"columns\"),\n )\n\n # inference\n with pytest.warns(None):\n assert_eq(\n ddf.apply(lambda xy: xy[0] + xy[1], axis=1),\n df.apply(lambda xy: xy[0] + xy[1], axis=1),\n )\n with pytest.warns(None):\n assert_eq(ddf.apply(lambda xy: xy, axis=1), df.apply(lambda xy: xy, axis=1))\n\n # specify meta\n func = lambda x: pd.Series([x, x])\n assert_eq(ddf.x.apply(func, meta=[(0, int), (1, int)]), df.x.apply(func))\n # inference\n with pytest.warns(None):\n assert_eq(ddf.x.apply(func), df.x.apply(func))\n\n # axis=0\n with pytest.raises(NotImplementedError):\n ddf.apply(lambda xy: xy, axis=0)\n\n with pytest.raises(NotImplementedError):\n ddf.apply(lambda xy: xy, axis=\"index\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_apply_warns_test_apply_warns.assert_int64_in_str_w_0": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_apply_warns_test_apply_warns.assert_int64_in_str_w_0", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 2706, "end_line": 2724, "span_ids": ["test_apply_warns"], "tokens": 197}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_apply_warns():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4], \"y\": [10, 20, 30, 40]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n func = lambda row: row[\"x\"] + row[\"y\"]\n\n with pytest.warns(UserWarning) as w:\n ddf.apply(func, axis=1)\n assert len(w) == 1\n\n with pytest.warns(None) as w:\n ddf.apply(func, axis=1, meta=(None, int))\n assert len(w) == 0\n\n with pytest.warns(UserWarning) as w:\n ddf.apply(lambda x: x, axis=1)\n assert len(w) == 1\n assert \"'x'\" in str(w[0].message)\n assert \"int64\" in str(w[0].message)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_applymap_test_applymap.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_applymap_test_applymap.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 2727, "end_line": 2732, "span_ids": ["test_applymap"], "tokens": 108}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_applymap():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4], \"y\": [10, 20, 30, 40]})\n ddf = dd.from_pandas(df, npartitions=2)\n assert_eq(ddf.applymap(lambda x: x + 1), df.applymap(lambda x: x + 1))\n\n assert_eq(ddf.applymap(lambda x: (x, x)), df.applymap(lambda x: (x, x)))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_abs_test_round.assert_eq_ddf_round_2_d": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_abs_test_round.assert_eq_ddf_round_2_d", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 2735, "end_line": 2754, "span_ids": ["test_round", "test_abs"], "tokens": 239}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_abs():\n df = pd.DataFrame(\n {\n \"A\": [1, -2, 3, -4, 5],\n \"B\": [-6.0, -7, -8, -9, 10],\n \"C\": [\"a\", \"b\", \"c\", \"d\", \"e\"],\n }\n )\n ddf = dd.from_pandas(df, npartitions=2)\n assert_eq(ddf.A.abs(), df.A.abs())\n assert_eq(ddf[[\"A\", \"B\"]].abs(), df[[\"A\", \"B\"]].abs())\n pytest.raises(ValueError, lambda: ddf.C.abs())\n pytest.raises(TypeError, lambda: ddf.abs())\n\n\ndef test_round():\n df = pd.DataFrame({\"col1\": [1.123, 2.123, 3.123], \"col2\": [1.234, 2.234, 3.234]})\n ddf = dd.from_pandas(df, npartitions=2)\n assert_eq(ddf.round(), df.round())\n assert_eq(ddf.round(2), df.round(2))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_cov_test_cov.None_7": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_cov_test_cov.None_7", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 2757, "end_line": 2796, "span_ids": ["test_cov"], "tokens": 375}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_cov():\n # DataFrame\n df = _compat.makeMissingDataframe()\n ddf = dd.from_pandas(df, npartitions=6)\n\n res = ddf.cov()\n res2 = ddf.cov(split_every=2)\n res3 = ddf.cov(10)\n res4 = ddf.cov(10, split_every=2)\n sol = df.cov()\n sol2 = df.cov(10)\n assert_eq(res, sol)\n assert_eq(res2, sol)\n assert_eq(res3, sol2)\n assert_eq(res4, sol2)\n assert res._name == ddf.cov()._name\n assert res._name != res2._name\n assert res3._name != res4._name\n assert res._name != res3._name\n\n # Series\n a = df.A\n b = df.B\n da = dd.from_pandas(a, npartitions=6)\n db = dd.from_pandas(b, npartitions=7)\n\n res = da.cov(db)\n res2 = da.cov(db, split_every=2)\n res3 = da.cov(db, 10)\n res4 = da.cov(db, 10, split_every=2)\n sol = a.cov(b)\n sol2 = a.cov(b, 10)\n assert_eq(res, sol)\n assert_eq(res2, sol)\n assert_eq(res3, sol2)\n assert_eq(res4, sol2)\n assert res._name == da.cov(db)._name\n assert res._name != res2._name\n assert res3._name != res4._name\n assert res._name != res3._name", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_corr_test_corr.pytest_raises_TypeError_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_corr_test_corr.pytest_raises_TypeError_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 2799, "end_line": 2843, "span_ids": ["test_corr"], "tokens": 442}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_corr():\n # DataFrame\n df = _compat.makeMissingDataframe()\n ddf = dd.from_pandas(df, npartitions=6)\n\n res = ddf.corr()\n res2 = ddf.corr(split_every=2)\n res3 = ddf.corr(min_periods=10)\n res4 = ddf.corr(min_periods=10, split_every=2)\n sol = df.corr()\n sol2 = df.corr(min_periods=10)\n assert_eq(res, sol)\n assert_eq(res2, sol)\n assert_eq(res3, sol2)\n assert_eq(res4, sol2)\n assert res._name == ddf.corr()._name\n assert res._name != res2._name\n assert res3._name != res4._name\n assert res._name != res3._name\n\n pytest.raises(NotImplementedError, lambda: ddf.corr(method=\"spearman\"))\n\n # Series\n a = df.A\n b = df.B\n da = dd.from_pandas(a, npartitions=6)\n db = dd.from_pandas(b, npartitions=7)\n\n res = da.corr(db)\n res2 = da.corr(db, split_every=2)\n res3 = da.corr(db, min_periods=10)\n res4 = da.corr(db, min_periods=10, split_every=2)\n sol = da.corr(db)\n sol2 = da.corr(db, min_periods=10)\n assert_eq(res, sol)\n assert_eq(res2, sol)\n assert_eq(res3, sol2)\n assert_eq(res4, sol2)\n assert res._name == da.corr(db)._name\n assert res._name != res2._name\n assert res3._name != res4._name\n assert res._name != res3._name\n\n pytest.raises(NotImplementedError, lambda: da.corr(db, method=\"spearman\"))\n pytest.raises(TypeError, lambda: da.corr(ddf))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_corr_same_name_test_corr_same_name.assert_eq_result2_expect": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_corr_same_name_test_corr_same_name.assert_eq_result2_expect", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 2846, "end_line": 2858, "span_ids": ["test_corr_same_name"], "tokens": 123}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_corr_same_name():\n # Series with same names (see https://github.com/dask/dask/issues/4906)\n\n df = _compat.makeMissingDataframe()\n ddf = dd.from_pandas(df, npartitions=6)\n\n result = ddf.A.corr(ddf.B.rename(\"A\"))\n expected = ddf.A.corr(ddf.B)\n assert_eq(result, expected)\n\n # test with split_every\n result2 = ddf.A.corr(ddf.B.rename(\"A\"), split_every=2)\n assert_eq(result2, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_cov_corr_meta_test_cov_corr_stable.assert_eq_ddf_corr_split_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_cov_corr_meta_test_cov_corr_stable.assert_eq_ddf_corr_split_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 2861, "end_line": 2882, "span_ids": ["test_cov_corr_stable", "test_cov_corr_meta"], "tokens": 263}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_cov_corr_meta():\n df = pd.DataFrame(\n {\n \"a\": np.array([1, 2, 3]),\n \"b\": np.array([1.0, 2.0, 3.0], dtype=\"f4\"),\n \"c\": np.array([1.0, 2.0, 3.0]),\n },\n index=pd.Index([1, 2, 3], name=\"myindex\"),\n )\n ddf = dd.from_pandas(df, npartitions=2)\n assert_eq(ddf.corr(), df.corr())\n assert_eq(ddf.cov(), df.cov())\n assert ddf.a.cov(ddf.b)._meta.dtype == \"f8\"\n assert ddf.a.corr(ddf.b)._meta.dtype == \"f8\"\n\n\n@pytest.mark.slow\ndef test_cov_corr_stable():\n df = pd.DataFrame(np.random.uniform(-1, 1, (20000000, 2)), columns=[\"a\", \"b\"])\n ddf = dd.from_pandas(df, npartitions=50)\n assert_eq(ddf.cov(split_every=8), df.cov())\n assert_eq(ddf.corr(split_every=8), df.corr())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_cov_corr_mixed_test_cov_corr_mixed.assert_eq_ddf_cov_split_e": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_cov_corr_mixed_test_cov_corr_mixed.assert_eq_ddf_cov_split_e", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 2885, "end_line": 2908, "span_ids": ["test_cov_corr_mixed"], "tokens": 354}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_cov_corr_mixed():\n size = 1000\n d = {\n \"dates\": pd.date_range(\"2015-01-01\", periods=size, freq=\"1T\"),\n \"unique_id\": np.arange(0, size),\n \"ints\": np.random.randint(0, size, size=size),\n \"floats\": np.random.randn(size),\n \"bools\": np.random.choice([0, 1], size=size),\n \"int_nans\": np.random.choice([0, 1, np.nan], size=size),\n \"float_nans\": np.random.choice([0.0, 1.0, np.nan], size=size),\n \"constant\": 1,\n \"int_categorical\": np.random.choice([10, 20, 30, 40, 50], size=size),\n \"categorical_binary\": np.random.choice([\"a\", \"b\"], size=size),\n \"categorical_nans\": np.random.choice([\"a\", \"b\", \"c\"], size=size),\n }\n df = pd.DataFrame(d)\n df[\"hardbools\"] = df[\"bools\"] == 1\n df[\"categorical_nans\"] = df[\"categorical_nans\"].replace(\"c\", np.nan)\n df[\"categorical_binary\"] = df[\"categorical_binary\"].astype(\"category\")\n df[\"unique_id\"] = df[\"unique_id\"].astype(str)\n\n ddf = dd.from_pandas(df, npartitions=20)\n assert_eq(ddf.corr(split_every=4), df.corr(), check_divisions=False)\n assert_eq(ddf.cov(split_every=4), df.cov(), check_divisions=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_autocorr_test_autocorr.pytest_raises_TypeError_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_autocorr_test_autocorr.pytest_raises_TypeError_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 2911, "end_line": 2918, "span_ids": ["test_autocorr"], "tokens": 121}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_autocorr():\n x = pd.Series(np.random.random(100))\n dx = dd.from_pandas(x, npartitions=10)\n assert_eq(dx.autocorr(2), x.autocorr(2))\n assert_eq(dx.autocorr(0), x.autocorr(0))\n assert_eq(dx.autocorr(-2), x.autocorr(-2))\n assert_eq(dx.autocorr(2, split_every=3), x.autocorr(2))\n pytest.raises(TypeError, lambda: dx.autocorr(1.5))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_apply_infer_columns_test_apply_infer_columns.None_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_apply_infer_columns_test_apply_infer_columns.None_5", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 2921, "end_line": 2958, "span_ids": ["test_apply_infer_columns"], "tokens": 375}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_apply_infer_columns():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4], \"y\": [10, 20, 30, 40]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n def return_df(x):\n # will create new DataFrame which columns is ['sum', 'mean']\n return pd.Series([x.sum(), x.mean()], index=[\"sum\", \"mean\"])\n\n # DataFrame to completely different DataFrame\n with pytest.warns(None):\n result = ddf.apply(return_df, axis=1)\n assert isinstance(result, dd.DataFrame)\n tm.assert_index_equal(result.columns, pd.Index([\"sum\", \"mean\"]))\n assert_eq(result, df.apply(return_df, axis=1))\n\n # DataFrame to Series\n with pytest.warns(None):\n result = ddf.apply(lambda x: 1, axis=1)\n assert isinstance(result, dd.Series)\n assert result.name is None\n assert_eq(result, df.apply(lambda x: 1, axis=1))\n\n def return_df2(x):\n return pd.Series([x * 2, x * 3], index=[\"x2\", \"x3\"])\n\n # Series to completely different DataFrame\n with pytest.warns(None):\n result = ddf.x.apply(return_df2)\n assert isinstance(result, dd.DataFrame)\n tm.assert_index_equal(result.columns, pd.Index([\"x2\", \"x3\"]))\n assert_eq(result, df.x.apply(return_df2))\n\n # Series to Series\n with pytest.warns(None):\n result = ddf.x.apply(lambda x: 1)\n assert isinstance(result, dd.Series)\n assert result.name == \"x\"\n assert_eq(result, df.x.apply(lambda x: 1))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_index_time_properties_test_nlargest_nsmallest.for_m_in_nlargest_ns.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_index_time_properties_test_nlargest_nsmallest.for_m_in_nlargest_ns.None_2", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 2961, "end_line": 3005, "span_ids": ["test_index_time_properties", "test_nlargest_nsmallest"], "tokens": 411}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_index_time_properties():\n i = _compat.makeTimeSeries()\n a = dd.from_pandas(i, npartitions=3)\n\n assert \"day\" in dir(a.index)\n # returns a numpy array in pandas, but a Index in dask\n assert_eq(a.index.day, pd.Index(i.index.day))\n assert_eq(a.index.month, pd.Index(i.index.month))\n\n\ndef test_nlargest_nsmallest():\n from string import ascii_lowercase\n\n df = pd.DataFrame(\n {\n \"a\": np.random.permutation(20),\n \"b\": list(ascii_lowercase[:20]),\n \"c\": np.random.permutation(20).astype(\"float64\"),\n }\n )\n ddf = dd.from_pandas(df, npartitions=3)\n\n for m in [\"nlargest\", \"nsmallest\"]:\n f = lambda df, *args, **kwargs: getattr(df, m)(*args, **kwargs)\n\n res = f(ddf, 5, \"a\")\n res2 = f(ddf, 5, \"a\", split_every=2)\n sol = f(df, 5, \"a\")\n assert_eq(res, sol)\n assert_eq(res2, sol)\n assert res._name != res2._name\n\n res = f(ddf, 5, [\"a\", \"c\"])\n res2 = f(ddf, 5, [\"a\", \"c\"], split_every=2)\n sol = f(df, 5, [\"a\", \"c\"])\n assert_eq(res, sol)\n assert_eq(res2, sol)\n assert res._name != res2._name\n\n res = f(ddf.a, 5)\n res2 = f(ddf.a, 5, split_every=2)\n sol = f(df.a, 5)\n assert_eq(res, sol)\n assert_eq(res2, sol)\n assert res._name != res2._name", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_reset_index_test_reset_index.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_reset_index_test_reset_index.None_3", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 3008, "end_line": 3030, "span_ids": ["test_reset_index"], "tokens": 224}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_reset_index():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4], \"y\": [10, 20, 30, 40]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n sol = df.reset_index()\n res = ddf.reset_index()\n assert all(d is None for d in res.divisions)\n assert_eq(res, sol, check_index=False)\n\n sol = df.reset_index(drop=True)\n res = ddf.reset_index(drop=True)\n assert all(d is None for d in res.divisions)\n assert_eq(res, sol, check_index=False)\n\n sol = df.x.reset_index()\n res = ddf.x.reset_index()\n assert all(d is None for d in res.divisions)\n assert_eq(res, sol, check_index=False)\n\n sol = df.x.reset_index(drop=True)\n res = ddf.x.reset_index(drop=True)\n assert all(d is None for d in res.divisions)\n assert_eq(res, sol, check_index=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_dataframe_compute_forward_kwargs_test_dataframe_itertuples.for_a_b_in_zip_df_iter.assert_a_b": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_dataframe_compute_forward_kwargs_test_dataframe_itertuples.for_a_b_in_zip_df_iter.assert_a_b", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 3033, "end_line": 3065, "span_ids": ["test_series_iter", "test_dataframe_itertuples", "test_series_iteritems", "test_dataframe_iterrows", "test_dataframe_compute_forward_kwargs"], "tokens": 350}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_dataframe_compute_forward_kwargs():\n x = dd.from_pandas(pd.DataFrame({\"a\": range(10)}), npartitions=2).a.sum()\n x.compute(bogus_keyword=10)\n\n\ndef test_series_iteritems():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4]})\n ddf = dd.from_pandas(df, npartitions=2)\n for (a, b) in zip(df[\"x\"].iteritems(), ddf[\"x\"].iteritems()):\n assert a == b\n\n\ndef test_series_iter():\n s = pd.DataFrame({\"x\": [1, 2, 3, 4]})\n ds = dd.from_pandas(s, npartitions=2)\n for (a, b) in zip(s[\"x\"], ds[\"x\"]):\n assert a == b\n\n\ndef test_dataframe_iterrows():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4], \"y\": [10, 20, 30, 40]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n for (a, b) in zip(df.iterrows(), ddf.iterrows()):\n tm.assert_series_equal(a[1], b[1])\n\n\ndef test_dataframe_itertuples():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4], \"y\": [10, 20, 30, 40]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n for (a, b) in zip(df.itertuples(), ddf.itertuples()):\n assert a == b", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_dataframe_items_test_dataframe_items.for_a_b_in_zip_df_item._column_values": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_dataframe_items_test_dataframe_items.for_a_b_in_zip_df_item._column_values", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 3068, "end_line": 3081, "span_ids": ["test_dataframe_items"], "tokens": 157}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"columns\",\n [\n (\"x\", \"y\"),\n (\"x\", \"x\"),\n pd.MultiIndex.from_tuples([(\"x\", 1), (\"x\", 2)], names=(\"letter\", \"number\")),\n ],\n)\ndef test_dataframe_items(columns):\n df = pd.DataFrame([[1, 10], [2, 20], [3, 30], [4, 40]], columns=columns)\n ddf = dd.from_pandas(df, npartitions=2)\n for (a, b) in zip(df.items(), ddf.items()):\n assert a[0] == b[0] # column name\n assert_eq(a[1], b[1].compute()) # column values", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_dataframe_itertuples_with_index_false_test_astype.assert_eq_a_x_astype_floa": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_dataframe_itertuples_with_index_false_test_astype.assert_eq_a_x_astype_floa", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 3084, "end_line": 3108, "span_ids": ["test_astype", "test_dataframe_itertuples_with_name_none", "test_dataframe_itertuples_with_index_false"], "tokens": 283}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_dataframe_itertuples_with_index_false():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4], \"y\": [10, 20, 30, 40]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n for (a, b) in zip(df.itertuples(index=False), ddf.itertuples(index=False)):\n assert a == b\n\n\ndef test_dataframe_itertuples_with_name_none():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4], \"y\": [10, 20, 30, 40]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n for (a, b) in zip(df.itertuples(name=None), ddf.itertuples(name=None)):\n assert a == b\n assert type(a) is type(b)\n\n\ndef test_astype():\n df = pd.DataFrame(\n {\"x\": [1, 2, 3, None], \"y\": [10, 20, 30, 40]}, index=[10, 20, 30, 40]\n )\n a = dd.from_pandas(df, 2)\n\n assert_eq(a.astype(float), df.astype(float))\n assert_eq(a.x.astype(float), df.x.astype(float))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_astype_categoricals_test_astype_categoricals.assert_dx_compute_dtype": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_astype_categoricals_test_astype_categoricals.assert_dx_compute_dtype", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 3111, "end_line": 3132, "span_ids": ["test_astype_categoricals"], "tokens": 204}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_astype_categoricals():\n df = pd.DataFrame(\n {\n \"x\": [\"a\", \"b\", \"c\", \"b\", \"c\"],\n \"y\": [\"x\", \"y\", \"z\", \"x\", \"y\"],\n \"z\": [1, 2, 3, 4, 5],\n }\n )\n df = df.astype({\"y\": \"category\"})\n ddf = dd.from_pandas(df, 2)\n assert ddf.y.cat.known\n\n ddf2 = ddf.astype({\"x\": \"category\"})\n assert not ddf2.x.cat.known\n assert ddf2.y.cat.known\n assert ddf2.x.dtype == \"category\"\n assert ddf2.compute().x.dtype == \"category\"\n\n dx = ddf.x.astype(\"category\")\n assert not dx.cat.known\n assert dx.dtype == \"category\"\n assert dx.compute().dtype == \"category\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_astype_categoricals_known_test_astype_categoricals_known.for_dtype_known_in_ca.assert_dx2_cat_known_k": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_astype_categoricals_known_test_astype_categoricals_known.for_dtype_known_in_ca.assert_dx2_cat_known_k", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 3135, "end_line": 3161, "span_ids": ["test_astype_categoricals_known"], "tokens": 290}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_astype_categoricals_known():\n df = pd.DataFrame(\n {\n \"x\": [\"a\", \"b\", \"c\", \"b\", \"c\"],\n \"y\": [\"x\", \"y\", \"z\", \"y\", \"z\"],\n \"z\": [\"b\", \"b\", \"b\", \"c\", \"b\"],\n \"other\": [1, 2, 3, 4, 5],\n }\n )\n ddf = dd.from_pandas(df, 2)\n\n abc = pd.api.types.CategoricalDtype([\"a\", \"b\", \"c\"], ordered=False)\n category = pd.api.types.CategoricalDtype(ordered=False)\n\n # DataFrame\n ddf2 = ddf.astype({\"x\": abc, \"y\": category, \"z\": \"category\", \"other\": \"f8\"})\n\n for col, known in [(\"x\", True), (\"y\", False), (\"z\", False)]:\n x = getattr(ddf2, col)\n assert pd.api.types.is_categorical_dtype(x.dtype)\n assert x.cat.known == known\n\n # Series\n for dtype, known in [(\"category\", False), (category, False), (abc, True)]:\n dx2 = ddf.x.astype(dtype)\n assert pd.api.types.is_categorical_dtype(dx2.dtype)\n assert dx2.cat.known == known", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_groupby_callable_test_groupby_callable.assert_eq_a_y_groupby_ise": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_groupby_callable_test_groupby_callable.assert_eq_a_y_groupby_ise", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 3164, "end_line": 3172, "span_ids": ["test_groupby_callable"], "tokens": 123}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_groupby_callable():\n a = pd.DataFrame({\"x\": [1, 2, 3, None], \"y\": [10, 20, 30, 40]}, index=[1, 2, 3, 4])\n b = dd.from_pandas(a, 2)\n\n def iseven(x):\n return x % 2 == 0\n\n assert_eq(a.groupby(iseven).y.sum(), b.groupby(iseven).y.sum())\n assert_eq(a.y.groupby(iseven).sum(), b.y.groupby(iseven).sum())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_methods_tokenize_differently__assert_info.assert_stdout_pd_stdou": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_methods_tokenize_differently__assert_info.assert_stdout_pd_stdou", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 3175, "end_line": 3199, "span_ids": ["_assert_info", "test_methods_tokenize_differently"], "tokens": 203}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_methods_tokenize_differently():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4]})\n df = dd.from_pandas(df, npartitions=1)\n assert (\n df.x.map_partitions(lambda x: pd.Series(x.min()))._name\n != df.x.map_partitions(lambda x: pd.Series(x.max()))._name\n )\n\n\ndef _assert_info(df, ddf, memory_usage=True):\n from io import StringIO\n\n assert isinstance(df, pd.DataFrame)\n assert isinstance(ddf, dd.DataFrame)\n\n buf_pd, buf_da = StringIO(), StringIO()\n\n df.info(buf=buf_pd, memory_usage=memory_usage)\n ddf.info(buf=buf_da, verbose=True, memory_usage=memory_usage)\n\n stdout_pd = buf_pd.getvalue()\n stdout_da = buf_da.getvalue()\n stdout_da = stdout_da.replace(str(type(ddf)), str(type(df)))\n # TODO\n assert stdout_pd == stdout_da", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_info_test_info.assert_ddf_info_buf_None_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_info_test_info.assert_ddf_info_buf_None_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 3202, "end_line": 3234, "span_ids": ["test_info"], "tokens": 279}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(not dd._compat.PANDAS_GT_100, reason=\"Changed info repr\")\ndef test_info():\n from io import StringIO\n\n pandas_format._put_lines = put_lines\n\n test_frames = [\n pd.DataFrame(\n {\"x\": [1, 2, 3, 4], \"y\": [1, 0, 1, 0]}, index=pd.Int64Index(range(4))\n ), # No RangeIndex in dask\n pd.DataFrame(),\n ]\n\n for df in test_frames:\n ddf = dd.from_pandas(df, npartitions=4)\n _assert_info(df, ddf)\n\n buf = StringIO()\n ddf = dd.from_pandas(\n pd.DataFrame({\"x\": [1, 2, 3, 4], \"y\": [1, 0, 1, 0]}, index=range(4)),\n npartitions=4,\n )\n\n # Verbose=False\n ddf.info(buf=buf, verbose=False)\n assert buf.getvalue() == (\n \"\\n\"\n \"Columns: 2 entries, x to y\\n\"\n \"dtypes: int64(2)\"\n )\n\n # buf=None\n assert ddf.info(buf=None) is None", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_groupby_multilevel_info_test_groupby_multilevel_info.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_groupby_multilevel_info_test_groupby_multilevel_info.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 3237, "end_line": 3270, "span_ids": ["test_groupby_multilevel_info"], "tokens": 329}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(not dd._compat.PANDAS_GT_100, reason=\"Changed info repr\")\ndef test_groupby_multilevel_info():\n # GH 1844\n from io import StringIO\n\n pandas_format._put_lines = put_lines\n\n df = pd.DataFrame({\"A\": [1, 1, 2, 2], \"B\": [1, 2, 3, 4], \"C\": [1, 2, 3, 4]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n g = ddf.groupby([\"A\", \"B\"]).sum()\n # slight difference between memory repr (single additional space)\n _assert_info(g.compute(), g, memory_usage=False)\n\n buf = StringIO()\n g.info(buf, verbose=False)\n assert buf.getvalue() == (\n \"\\n\"\n \"Columns: 1 entries, C to C\\n\"\n \"dtypes: int64(1)\"\n )\n\n # multilevel\n g = ddf.groupby([\"A\", \"B\"]).agg([\"count\", \"sum\"])\n _assert_info(g.compute(), g, memory_usage=False)\n\n buf = StringIO()\n g.info(buf, verbose=False)\n expected = (\n \"\\n\"\n \"Columns: 2 entries, ('C', 'count') to ('C', 'sum')\\n\"\n \"dtypes: int64(2)\"\n )\n assert buf.getvalue() == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_categorize_info_test_categorize_info.assert_buf_getvalue_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_categorize_info_test_categorize_info.assert_buf_getvalue_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 3273, "end_line": 3301, "span_ids": ["test_categorize_info"], "tokens": 328}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(not dd._compat.PANDAS_GT_100, reason=\"Changed info repr\")\ndef test_categorize_info():\n # assert that we can call info after categorize\n # workaround for: https://github.com/pydata/pandas/issues/14368\n from io import StringIO\n\n pandas_format._put_lines = put_lines\n\n df = pd.DataFrame(\n {\"x\": [1, 2, 3, 4], \"y\": pd.Series(list(\"aabc\")), \"z\": pd.Series(list(\"aabc\"))},\n index=pd.Int64Index(range(4)),\n ) # No RangeIndex in dask\n ddf = dd.from_pandas(df, npartitions=4).categorize([\"y\"])\n\n # Verbose=False\n buf = StringIO()\n ddf.info(buf=buf, verbose=True)\n expected = (\n \"\\n\"\n \"Int64Index: 4 entries, 0 to 3\\n\"\n \"Data columns (total 3 columns):\\n\"\n \" # Column Non-Null Count Dtype\\n\"\n \"--- ------ -------------- -----\\n\"\n \" 0 x 4 non-null int64\\n\"\n \" 1 y 4 non-null category\\n\"\n \" 2 z 4 non-null object\\n\"\n \"dtypes: category(1), object(1), int64(1)\"\n )\n assert buf.getvalue() == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_gh_1301_test_column_assignment.assert_z_not_in_orig_co": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_gh_1301_test_column_assignment.assert_z_not_in_orig_co", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 3304, "end_line": 3328, "span_ids": ["test_column_assignment", "test_timeseries_sorted", "test_gh_1301"], "tokens": 258}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_gh_1301():\n df = pd.DataFrame([[\"1\", \"2\"], [\"3\", \"4\"]])\n ddf = dd.from_pandas(df, npartitions=2)\n ddf2 = ddf.assign(y=ddf[1].astype(int))\n assert_eq(ddf2, df.assign(y=df[1].astype(int)))\n\n assert ddf2.dtypes[\"y\"] == np.dtype(int)\n\n\ndef test_timeseries_sorted():\n df = _compat.makeTimeDataFrame()\n ddf = dd.from_pandas(df.reset_index(), npartitions=2)\n df.index.name = \"index\"\n assert_eq(ddf.set_index(\"index\", sorted=True, drop=True), df)\n\n\ndef test_column_assignment():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4], \"y\": [1, 0, 1, 0]})\n ddf = dd.from_pandas(df, npartitions=2)\n orig = ddf.copy()\n ddf[\"z\"] = ddf.x + ddf.y\n df[\"z\"] = df.x + df.y\n\n assert_eq(df, ddf)\n assert \"z\" not in orig.columns", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_array_assignment_test_array_assignment.None_1.ddf_z_darr": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_array_assignment_test_array_assignment.None_1.ddf_z_darr", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 3331, "end_line": 3354, "span_ids": ["test_array_assignment"], "tokens": 228}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_array_assignment():\n df = pd.DataFrame({\"x\": np.random.normal(size=50), \"y\": np.random.normal(size=50)})\n ddf = dd.from_pandas(df, npartitions=2)\n orig = ddf.copy()\n\n arr = np.array(np.random.normal(size=50))\n darr = da.from_array(arr, chunks=25)\n\n df[\"z\"] = arr\n ddf[\"z\"] = darr\n assert_eq(df, ddf)\n assert \"z\" not in orig.columns\n\n arr = np.array(np.random.normal(size=(50, 50)))\n darr = da.from_array(arr, chunks=25)\n msg = \"Array assignment only supports 1-D arrays\"\n with pytest.raises(ValueError, match=msg):\n ddf[\"z\"] = darr\n\n arr = np.array(np.random.normal(size=50))\n darr = da.from_array(arr, chunks=10)\n msg = \"Number of partitions do not match\"\n with pytest.raises(ValueError, match=msg):\n ddf[\"z\"] = darr", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_columns_assignment_test_columns_assignment.assert_eq_df_ddf_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_columns_assignment_test_columns_assignment.assert_eq_df_ddf_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 3357, "end_line": 3367, "span_ids": ["test_columns_assignment"], "tokens": 135}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_columns_assignment():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n df2 = df.assign(y=df.x + 1, z=df.x - 1)\n df[[\"a\", \"b\"]] = df2[[\"y\", \"z\"]]\n\n ddf2 = ddf.assign(y=ddf.x + 1, z=ddf.x - 1)\n ddf[[\"a\", \"b\"]] = ddf2[[\"y\", \"z\"]]\n\n assert_eq(df, ddf)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_attribute_assignment_test_setitem_triggering_realign.assert_len_a_12": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_attribute_assignment_test_setitem_triggering_realign.assert_len_a_12", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 3370, "end_line": 3382, "span_ids": ["test_setitem_triggering_realign", "test_attribute_assignment"], "tokens": 171}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_attribute_assignment():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4, 5], \"y\": [1.0, 2.0, 3.0, 4.0, 5.0]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n ddf.y = ddf.x + ddf.y\n assert_eq(ddf, df.assign(y=df.x + df.y))\n\n\ndef test_setitem_triggering_realign():\n a = dd.from_pandas(pd.DataFrame({\"A\": range(12)}), npartitions=3)\n b = dd.from_pandas(pd.Series(range(12), name=\"B\"), npartitions=4)\n a[\"C\"] = b\n assert len(a) == 12", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_inplace_operators_test_inplace_operators.assert_eq_ddf_df_assign_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_inplace_operators_test_inplace_operators.assert_eq_ddf_df_assign_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 3385, "end_line": 3392, "span_ids": ["test_inplace_operators"], "tokens": 117}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_inplace_operators():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4, 5], \"y\": [1.0, 2.0, 3.0, 4.0, 5.0]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n ddf.y **= 0.5\n\n assert_eq(ddf.y, df.y ** 0.5)\n assert_eq(ddf, df.assign(y=df.y ** 0.5))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_idxmaxmin_test_idxmaxmin.with_warnings_catch_warni.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_idxmaxmin_test_idxmaxmin.with_warnings_catch_warni.None_3", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 3395, "end_line": 3444, "span_ids": ["test_idxmaxmin"], "tokens": 557}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"skipna\", [True, False])\n@pytest.mark.parametrize(\n \"idx\",\n [\n np.arange(100),\n sorted(np.random.random(size=100)),\n pd.date_range(\"20150101\", periods=100),\n ],\n)\ndef test_idxmaxmin(idx, skipna):\n df = pd.DataFrame(np.random.randn(100, 5), columns=list(\"abcde\"), index=idx)\n df.b.iloc[31] = np.nan\n df.d.iloc[78] = np.nan\n ddf = dd.from_pandas(df, npartitions=3)\n\n with warnings.catch_warnings(record=True):\n assert_eq(df.idxmax(axis=1, skipna=skipna), ddf.idxmax(axis=1, skipna=skipna))\n assert_eq(df.idxmin(axis=1, skipna=skipna), ddf.idxmin(axis=1, skipna=skipna))\n\n assert_eq(df.idxmax(skipna=skipna), ddf.idxmax(skipna=skipna))\n assert_eq(df.idxmax(skipna=skipna), ddf.idxmax(skipna=skipna, split_every=2))\n assert (\n ddf.idxmax(skipna=skipna)._name\n != ddf.idxmax(skipna=skipna, split_every=2)._name\n )\n\n assert_eq(df.idxmin(skipna=skipna), ddf.idxmin(skipna=skipna))\n assert_eq(df.idxmin(skipna=skipna), ddf.idxmin(skipna=skipna, split_every=2))\n assert (\n ddf.idxmin(skipna=skipna)._name\n != ddf.idxmin(skipna=skipna, split_every=2)._name\n )\n\n assert_eq(df.a.idxmax(skipna=skipna), ddf.a.idxmax(skipna=skipna))\n assert_eq(\n df.a.idxmax(skipna=skipna), ddf.a.idxmax(skipna=skipna, split_every=2)\n )\n assert (\n ddf.a.idxmax(skipna=skipna)._name\n != ddf.a.idxmax(skipna=skipna, split_every=2)._name\n )\n\n assert_eq(df.a.idxmin(skipna=skipna), ddf.a.idxmin(skipna=skipna))\n assert_eq(\n df.a.idxmin(skipna=skipna), ddf.a.idxmin(skipna=skipna, split_every=2)\n )\n assert (\n ddf.a.idxmin(skipna=skipna)._name\n != ddf.a.idxmin(skipna=skipna, split_every=2)._name\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_idxmaxmin_empty_partitions_test_idxmaxmin_empty_partitions.None_2.ddf_b_idxmax_compute_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_idxmaxmin_empty_partitions_test_idxmaxmin_empty_partitions.None_2.ddf_b_idxmax_compute_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 3447, "end_line": 3473, "span_ids": ["test_idxmaxmin_empty_partitions"], "tokens": 283}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_idxmaxmin_empty_partitions():\n df = pd.DataFrame(\n {\"a\": [1, 2, 3], \"b\": [1.5, 2, 3], \"c\": [np.NaN] * 3, \"d\": [1, 2, np.NaN]}\n )\n empty = df.iloc[:0]\n\n ddf = dd.concat(\n [dd.from_pandas(df, npartitions=1)]\n + [dd.from_pandas(empty, npartitions=1)] * 10\n )\n\n for skipna in [True, False]:\n assert_eq(ddf.idxmin(skipna=skipna, split_every=3), df.idxmin(skipna=skipna))\n\n assert_eq(\n ddf[[\"a\", \"b\", \"d\"]].idxmin(skipna=skipna, split_every=3),\n df[[\"a\", \"b\", \"d\"]].idxmin(skipna=skipna),\n )\n\n assert_eq(ddf.b.idxmax(split_every=3), df.b.idxmax())\n\n # Completely empty raises\n ddf = dd.concat([dd.from_pandas(empty, npartitions=1)] * 10)\n with pytest.raises(ValueError):\n ddf.idxmax().compute()\n with pytest.raises(ValueError):\n ddf.b.idxmax().compute()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_getitem_meta_test_getitem_multilevel.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_getitem_meta_test_getitem_multilevel.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 3476, "end_line": 3490, "span_ids": ["test_getitem_meta", "test_getitem_multilevel"], "tokens": 207}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_getitem_meta():\n data = {\"col1\": [\"a\", \"a\", \"b\"], \"col2\": [0, 1, 0]}\n\n df = pd.DataFrame(data=data, columns=[\"col1\", \"col2\"])\n ddf = dd.from_pandas(df, npartitions=1)\n\n assert_eq(df.col2[df.col1 == \"a\"], ddf.col2[ddf.col1 == \"a\"])\n\n\ndef test_getitem_multilevel():\n pdf = pd.DataFrame({(\"A\", \"0\"): [1, 2, 2], (\"B\", \"1\"): [1, 2, 3]})\n ddf = dd.from_pandas(pdf, npartitions=3)\n\n assert_eq(pdf[\"A\", \"0\"], ddf[\"A\", \"0\"])\n assert_eq(pdf[[(\"A\", \"0\"), (\"B\", \"1\")]], ddf[[(\"A\", \"0\"), (\"B\", \"1\")]])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_getitem_string_subclass_test_ipython_completion.assert_c_not_in_complet": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_getitem_string_subclass_test_ipython_completion.assert_c_not_in_complet", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 3499, "end_line": 3533, "span_ids": ["test_ipython_completion", "test_getitem_column_types", "test_getitem_string_subclass", "test_getitem_with_bool_dataframe_as_key"], "tokens": 328}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_getitem_string_subclass():\n df = pd.DataFrame({\"column_1\": list(range(10))})\n ddf = dd.from_pandas(df, npartitions=3)\n\n class string_subclass(str):\n pass\n\n column_1 = string_subclass(\"column_1\")\n\n assert_eq(df[column_1], ddf[column_1])\n\n\n@pytest.mark.parametrize(\"col_type\", [list, np.array, pd.Series, pd.Index])\ndef test_getitem_column_types(col_type):\n df = pd.DataFrame({\"A\": [1, 2], \"B\": [3, 4], \"C\": [5, 6]})\n ddf = dd.from_pandas(df, 2)\n cols = col_type([\"C\", \"A\", \"B\"])\n\n assert_eq(df[cols], ddf[cols])\n\n\ndef test_getitem_with_bool_dataframe_as_key():\n df = pd.DataFrame({\"A\": [1, 2], \"B\": [3, 4], \"C\": [5, 6]})\n ddf = dd.from_pandas(df, 2)\n assert_eq(df[df > 3], ddf[ddf > 3])\n\n\ndef test_ipython_completion():\n df = pd.DataFrame({\"a\": [1], \"b\": [2]})\n ddf = dd.from_pandas(df, npartitions=1)\n\n completions = ddf._ipython_key_completions_()\n assert \"a\" in completions\n assert \"b\" in completions\n assert \"c\" not in completions", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_diff_test_diff.pytest_raises_TypeError_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_diff_test_diff.pytest_raises_TypeError_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 3524, "end_line": 3542, "span_ids": ["test_diff"], "tokens": 222}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_diff():\n df = pd.DataFrame(np.random.randn(100, 5), columns=list(\"abcde\"))\n ddf = dd.from_pandas(df, 5)\n\n assert_eq(ddf.diff(), df.diff())\n assert_eq(ddf.diff(0), df.diff(0))\n assert_eq(ddf.diff(2), df.diff(2))\n assert_eq(ddf.diff(-2), df.diff(-2))\n\n assert_eq(ddf.diff(2, axis=1), df.diff(2, axis=1))\n\n assert_eq(ddf.a.diff(), df.a.diff())\n assert_eq(ddf.a.diff(0), df.a.diff(0))\n assert_eq(ddf.a.diff(2), df.a.diff(2))\n assert_eq(ddf.a.diff(-2), df.a.diff(-2))\n\n assert ddf.diff(2)._name == ddf.diff(2)._name\n assert ddf.diff(2)._name != ddf.diff(3)._name\n pytest.raises(TypeError, lambda: ddf.diff(1.5))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_shift_test_shift.with_pytest_raises_TypeEr.ddf_shift_1_5_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_shift_test_shift.with_pytest_raises_TypeEr.ddf_shift_1_5_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 3545, "end_line": 3564, "span_ids": ["test_shift"], "tokens": 186}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_shift():\n df = _compat.makeTimeDataFrame()\n ddf = dd.from_pandas(df, npartitions=4)\n\n # DataFrame\n assert_eq(ddf.shift(), df.shift())\n assert_eq(ddf.shift(0), df.shift(0))\n assert_eq(ddf.shift(2), df.shift(2))\n assert_eq(ddf.shift(-2), df.shift(-2))\n\n assert_eq(ddf.shift(2, axis=1), df.shift(2, axis=1))\n\n # Series\n assert_eq(ddf.A.shift(), df.A.shift())\n assert_eq(ddf.A.shift(0), df.A.shift(0))\n assert_eq(ddf.A.shift(2), df.A.shift(2))\n assert_eq(ddf.A.shift(-2), df.A.shift(-2))\n\n with pytest.raises(TypeError):\n ddf.shift(1.5)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_shift_with_freq_DatetimeIndex_test_shift_with_freq_DatetimeIndex.assert_res_known_division": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_shift_with_freq_DatetimeIndex_test_shift_with_freq_DatetimeIndex.assert_res_known_division", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 3567, "end_line": 3580, "span_ids": ["test_shift_with_freq_DatetimeIndex"], "tokens": 223}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"data_freq,divs1\", [(\"B\", False), (\"D\", True), (\"H\", True)])\ndef test_shift_with_freq_DatetimeIndex(data_freq, divs1):\n df = _compat.makeTimeDataFrame()\n df = df.set_index(_compat.makeDateIndex(30, freq=data_freq))\n ddf = dd.from_pandas(df, npartitions=4)\n for freq, divs2 in [(\"S\", True), (\"W\", False), (pd.Timedelta(10, unit=\"h\"), True)]:\n for d, p in [(ddf, df), (ddf.A, df.A), (ddf.index, df.index)]:\n res = d.shift(2, freq=freq)\n assert_eq(res, p.shift(2, freq=freq))\n assert res.known_divisions == divs2\n # Index shifts also work with freq=None\n res = ddf.index.shift(2)\n assert_eq(res, df.index.shift(2))\n assert res.known_divisions == divs1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_shift_with_freq_PeriodIndex_test_shift_with_freq_PeriodIndex.with_pytest_raises_ValueE._freq_keyword_not_suppor": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_shift_with_freq_PeriodIndex_test_shift_with_freq_PeriodIndex.with_pytest_raises_ValueE._freq_keyword_not_suppor", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 3583, "end_line": 3600, "span_ids": ["test_shift_with_freq_PeriodIndex"], "tokens": 228}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"data_freq,divs\", [(\"B\", False), (\"D\", True), (\"H\", True)])\ndef test_shift_with_freq_PeriodIndex(data_freq, divs):\n df = _compat.makeTimeDataFrame()\n # PeriodIndex\n df = df.set_index(pd.period_range(\"2000-01-01\", periods=30, freq=data_freq))\n ddf = dd.from_pandas(df, npartitions=4)\n for d, p in [(ddf, df), (ddf.A, df.A)]:\n res = d.shift(2, freq=data_freq)\n assert_eq(res, p.shift(2, freq=data_freq))\n assert res.known_divisions == divs\n # PeriodIndex.shift doesn't have `freq` parameter\n res = ddf.index.shift(2)\n assert_eq(res, df.index.shift(2))\n assert res.known_divisions == divs\n\n df = _compat.makeTimeDataFrame()\n with pytest.raises(ValueError):\n ddf.index.shift(2, freq=\"D\") # freq keyword not supported", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_shift_with_freq_TimedeltaIndex_test_shift_with_freq_errors.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_shift_with_freq_TimedeltaIndex_test_shift_with_freq_errors.None_2", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 3603, "end_line": 3626, "span_ids": ["test_shift_with_freq_errors", "test_shift_with_freq_TimedeltaIndex"], "tokens": 288}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_shift_with_freq_TimedeltaIndex():\n df = _compat.makeTimeDataFrame()\n # TimedeltaIndex\n for data_freq in [\"T\", \"D\", \"H\"]:\n df = df.set_index(_compat.makeTimedeltaIndex(30, freq=data_freq))\n ddf = dd.from_pandas(df, npartitions=4)\n for freq in [\"S\", pd.Timedelta(10, unit=\"h\")]:\n for d, p in [(ddf, df), (ddf.A, df.A), (ddf.index, df.index)]:\n res = d.shift(2, freq=freq)\n assert_eq(res, p.shift(2, freq=freq))\n assert res.known_divisions\n # Index shifts also work with freq=None\n res = ddf.index.shift(2)\n assert_eq(res, df.index.shift(2))\n assert res.known_divisions\n\n\ndef test_shift_with_freq_errors():\n # Other index types error\n df = _compat.makeDataFrame()\n ddf = dd.from_pandas(df, npartitions=4)\n pytest.raises(NotImplementedError, lambda: ddf.shift(2, freq=\"S\"))\n pytest.raises(NotImplementedError, lambda: ddf.A.shift(2, freq=\"S\"))\n pytest.raises(NotImplementedError, lambda: ddf.index.shift(2))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_first_and_last_test_first_and_last.for_freq_in_freqs_.for_offset_in_offsets_.assert_eq_f_ddf_A_offset": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_first_and_last_test_first_and_last.for_freq_in_freqs_.for_offset_in_offsets_.assert_eq_f_ddf_A_offset", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 3629, "end_line": 3642, "span_ids": ["test_first_and_last"], "tokens": 203}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"method\", [\"first\", \"last\"])\ndef test_first_and_last(method):\n f = lambda x, offset: getattr(x, method)(offset)\n freqs = [\"12h\", \"D\"]\n offsets = [\"0d\", \"100h\", \"20d\", \"20B\", \"3W\", \"3M\", \"400d\", \"13M\"]\n for freq in freqs:\n index = pd.date_range(\"1/1/2000\", \"1/1/2001\", freq=freq)[::4]\n df = pd.DataFrame(\n np.random.random((len(index), 4)), index=index, columns=[\"A\", \"B\", \"C\", \"D\"]\n )\n ddf = dd.from_pandas(df, npartitions=10)\n for offset in offsets:\n assert_eq(f(ddf, offset), f(df, offset))\n assert_eq(f(ddf.A, offset), f(df.A, offset))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_hash_split_unique_test_hash_split_unique.assert_sorted_dropped_com": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_hash_split_unique_test_hash_split_unique.assert_sorted_dropped_com", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 3645, "end_line": 3663, "span_ids": ["test_hash_split_unique"], "tokens": 219}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"npartitions\", [1, 4, 20])\n@pytest.mark.parametrize(\"split_every\", [2, 5])\n@pytest.mark.parametrize(\"split_out\", [None, 1, 5, 20])\ndef test_hash_split_unique(npartitions, split_every, split_out):\n from string import ascii_lowercase\n\n s = pd.Series(np.random.choice(list(ascii_lowercase), 1000, replace=True))\n ds = dd.from_pandas(s, npartitions=npartitions)\n\n dropped = ds.unique(split_every=split_every, split_out=split_out)\n\n dsk = dropped.__dask_optimize__(dropped.dask, dropped.__dask_keys__())\n from dask.core import get_deps\n\n dependencies, dependents = get_deps(dsk)\n\n assert len([k for k, v in dependencies.items() if not v]) == npartitions\n assert dropped.npartitions == (split_out or 1)\n assert sorted(dropped.compute(scheduler=\"sync\")) == sorted(s.unique())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_split_out_drop_duplicates_test_split_out_drop_duplicates.for_subset_keep_in_produ.assert_eq_sol_res_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_split_out_drop_duplicates_test_split_out_drop_duplicates.for_subset_keep_in_produ.assert_eq_sol_res_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 3666, "end_line": 3684, "span_ids": ["test_split_out_drop_duplicates"], "tokens": 219}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"split_every\", [None, 2])\ndef test_split_out_drop_duplicates(split_every):\n x = np.concatenate([np.arange(10)] * 100)[:, None]\n y = x.copy()\n z = np.concatenate([np.arange(20)] * 50)[:, None]\n rs = np.random.RandomState(1)\n rs.shuffle(x)\n rs.shuffle(y)\n rs.shuffle(z)\n df = pd.DataFrame(np.concatenate([x, y, z], axis=1), columns=[\"x\", \"y\", \"z\"])\n ddf = dd.from_pandas(df, npartitions=20)\n\n for subset, keep in product([None, [\"x\", \"z\"]], [\"first\", \"last\"]):\n sol = df.drop_duplicates(subset=subset, keep=keep)\n res = ddf.drop_duplicates(\n subset=subset, keep=keep, split_every=split_every, split_out=10\n )\n assert res.npartitions == 10\n assert_eq(sol, res)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_split_out_value_counts_test_split_out_value_counts.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_split_out_value_counts_test_split_out_value_counts.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 3687, "end_line": 3695, "span_ids": ["test_split_out_value_counts"], "tokens": 112}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"split_every\", [None, 2])\ndef test_split_out_value_counts(split_every):\n df = pd.DataFrame({\"x\": [1, 2, 3] * 100})\n ddf = dd.from_pandas(df, npartitions=5)\n\n assert ddf.x.value_counts(split_out=10, split_every=split_every).npartitions == 10\n assert_eq(\n ddf.x.value_counts(split_out=10, split_every=split_every), df.x.value_counts()\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_values_test_values.assert_eq_df_index_values": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_values_test_values.assert_eq_df_index_values", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 3698, "end_line": 3710, "span_ids": ["test_values"], "tokens": 140}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_values():\n from dask.array.utils import assert_eq\n\n df = pd.DataFrame(\n {\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [2, 3, 4, 5]},\n index=pd.Index([1.0, 2.0, 3.0, 4.0], name=\"ind\"),\n )\n ddf = dd.from_pandas(df, 2)\n\n assert_eq(df.values, ddf.values)\n assert_eq(df.x.values, ddf.x.values)\n assert_eq(df.y.values, ddf.y.values)\n assert_eq(df.index.values, ddf.index.values)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_copy_test_del.assert_eq_a_df_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_copy_test_del.assert_eq_a_df_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 3713, "end_line": 3738, "span_ids": ["test_copy", "test_del"], "tokens": 189}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_copy():\n df = pd.DataFrame({\"x\": [1, 2, 3]})\n\n a = dd.from_pandas(df, npartitions=2)\n b = a.copy()\n\n a[\"y\"] = a.x * 2\n\n assert_eq(b, df)\n\n df[\"y\"] = df.x * 2\n\n\ndef test_del():\n df = pd.DataFrame(\n {\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [2, 3, 4, 5]},\n index=pd.Index([1.0, 2.0, 3.0, 4.0], name=\"ind\"),\n )\n a = dd.from_pandas(df, 2)\n b = a.copy()\n\n del a[\"x\"]\n assert_eq(b, df)\n\n del df[\"x\"]\n assert_eq(a, df)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_memory_usage_test_memory_usage.assert_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_memory_usage_test_memory_usage.assert_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 3741, "end_line": 3754, "span_ids": ["test_memory_usage"], "tokens": 156}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"index\", [True, False])\n@pytest.mark.parametrize(\"deep\", [True, False])\ndef test_memory_usage(index, deep):\n df = pd.DataFrame({\"x\": [1, 2, 3], \"y\": [1.0, 2.0, 3.0], \"z\": [\"a\", \"b\", \"c\"]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n assert_eq(\n df.memory_usage(index=index, deep=deep),\n ddf.memory_usage(index=index, deep=deep),\n )\n assert (\n df.x.memory_usage(index=index, deep=deep)\n == ddf.x.memory_usage(index=index, deep=deep).compute()\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_memory_usage_per_partition_test_memory_usage_per_partition.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_memory_usage_per_partition_test_memory_usage_per_partition.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 3757, "end_line": 3782, "span_ids": ["test_memory_usage_per_partition"], "tokens": 252}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"index\", [True, False])\n@pytest.mark.parametrize(\"deep\", [True, False])\ndef test_memory_usage_per_partition(index, deep):\n df = pd.DataFrame(\n {\n \"x\": [1, 2, 3, 4, 5],\n \"y\": [1.0, 2.0, 3.0, 4.0, 5.0],\n \"z\": [\"a\", \"b\", \"c\", \"d\", \"e\"],\n }\n )\n ddf = dd.from_pandas(df, npartitions=2)\n\n # DataFrame.memory_usage_per_partition\n expected = pd.Series(\n part.compute().memory_usage(index=index, deep=deep).sum()\n for part in ddf.partitions\n )\n result = ddf.memory_usage_per_partition(index=index, deep=deep)\n assert_eq(expected, result)\n\n # Series.memory_usage_per_partition\n expected = pd.Series(\n part.x.compute().memory_usage(index=index, deep=deep) for part in ddf.partitions\n )\n result = ddf.x.memory_usage_per_partition(index=index, deep=deep)\n assert_eq(expected, result)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_dataframe_reductions_arithmetic_test_dataframe_reductions_arithmetic.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_dataframe_reductions_arithmetic_test_dataframe_reductions_arithmetic.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 3785, "end_line": 3808, "span_ids": ["test_dataframe_reductions_arithmetic"], "tokens": 177}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"reduction\",\n [\n \"sum\",\n \"mean\",\n \"std\",\n \"var\",\n \"count\",\n \"min\",\n \"max\",\n \"idxmin\",\n \"idxmax\",\n \"prod\",\n \"all\",\n \"sem\",\n ],\n)\ndef test_dataframe_reductions_arithmetic(reduction):\n df = pd.DataFrame({\"x\": [1, 2, 3, 4, 5], \"y\": [1.1, 2.2, 3.3, 4.4, 5.5]})\n ddf = dd.from_pandas(df, npartitions=3)\n\n assert_eq(\n ddf - (getattr(ddf, reduction)() + 1), df - (getattr(df, reduction)() + 1)\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_dataframe_mode_test_dataframe_mode.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_dataframe_mode_test_dataframe_mode.None_2", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 3811, "end_line": 3824, "span_ids": ["test_dataframe_mode"], "tokens": 173}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_dataframe_mode():\n data = [[\"Tom\", 10, 7], [\"Farahn\", 14, 7], [\"Julie\", 14, 5], [\"Nick\", 10, 10]]\n\n df = pd.DataFrame(data, columns=[\"Name\", \"Num\", \"Num\"])\n ddf = dd.from_pandas(df, npartitions=3)\n\n assert_eq(ddf.mode(), df.mode())\n assert_eq(ddf.Name.mode(), df.Name.mode())\n\n # test empty\n df = pd.DataFrame(columns=[\"a\", \"b\"])\n ddf = dd.from_pandas(df, npartitions=1)\n # check_index=False should be removed once https://github.com/pandas-dev/pandas/issues/33321 is resolved.\n assert_eq(ddf.mode(), df.mode(), check_index=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_datetime_loc_open_slicing_test_datetime_loc_open_slicing.assert_eq_df_0_loc_02_0": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_datetime_loc_open_slicing_test_datetime_loc_open_slicing.assert_eq_df_0_loc_02_0", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 3827, "end_line": 3834, "span_ids": ["test_datetime_loc_open_slicing"], "tokens": 184}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_datetime_loc_open_slicing():\n dtRange = pd.date_range(\"01.01.2015\", \"05.05.2015\")\n df = pd.DataFrame(np.random.random((len(dtRange), 2)), index=dtRange)\n ddf = dd.from_pandas(df, npartitions=5)\n assert_eq(df.loc[:\"02.02.2015\"], ddf.loc[:\"02.02.2015\"])\n assert_eq(df.loc[\"02.02.2015\":], ddf.loc[\"02.02.2015\":])\n assert_eq(df[0].loc[:\"02.02.2015\"], ddf[0].loc[:\"02.02.2015\"])\n assert_eq(df[0].loc[\"02.02.2015\":], ddf[0].loc[\"02.02.2015\":])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_to_datetime_test_to_datetime.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_to_datetime_test_to_datetime.None_2", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 3837, "end_line": 3856, "span_ids": ["test_to_datetime"], "tokens": 199}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_datetime():\n df = pd.DataFrame({\"year\": [2015, 2016], \"month\": [2, 3], \"day\": [4, 5]})\n df.index.name = \"ix\"\n ddf = dd.from_pandas(df, npartitions=2)\n\n assert_eq(pd.to_datetime(df), dd.to_datetime(ddf))\n\n s = pd.Series([\"3/11/2000\", \"3/12/2000\", \"3/13/2000\"] * 100)\n s.index = s.values\n ds = dd.from_pandas(s, npartitions=10, sort=False)\n\n assert_eq(\n pd.to_datetime(s, infer_datetime_format=True),\n dd.to_datetime(ds, infer_datetime_format=True),\n )\n assert_eq(\n pd.to_datetime(s.index, infer_datetime_format=True),\n dd.to_datetime(ds.index, infer_datetime_format=True),\n check_divisions=False,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_to_timedelta_test_isna.assert_eq_pd_isna_s_dd_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_to_timedelta_test_isna.assert_eq_pd_isna_s_dd_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 3859, "end_line": 3877, "span_ids": ["test_isna", "test_to_timedelta"], "tokens": 208}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_timedelta():\n s = pd.Series(range(10))\n ds = dd.from_pandas(s, npartitions=2)\n\n assert_eq(pd.to_timedelta(s), dd.to_timedelta(ds))\n assert_eq(pd.to_timedelta(s, unit=\"h\"), dd.to_timedelta(ds, unit=\"h\"))\n\n s = pd.Series([1, 2, \"this will error\"])\n ds = dd.from_pandas(s, npartitions=2)\n assert_eq(pd.to_timedelta(s, errors=\"coerce\"), dd.to_timedelta(ds, errors=\"coerce\"))\n\n\n@pytest.mark.skipif(PANDAS_VERSION < \"0.22.0\", reason=\"No isna method\")\n@pytest.mark.parametrize(\"values\", [[np.NaN, 0], [1, 1]])\ndef test_isna(values):\n s = pd.Series(values)\n ds = dd.from_pandas(s, npartitions=2)\n\n assert_eq(pd.isna(s), dd.isna(ds))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_slice_on_filtered_boundary_test_slice_on_filtered_boundary.assert_eq_result_expecte": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_slice_on_filtered_boundary_test_slice_on_filtered_boundary.assert_eq_result_expecte", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 3880, "end_line": 3891, "span_ids": ["test_slice_on_filtered_boundary"], "tokens": 169}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"drop\", [0, 9])\ndef test_slice_on_filtered_boundary(drop):\n # https://github.com/dask/dask/issues/2211\n x = np.arange(10)\n x[[5, 6]] -= 2\n df = pd.DataFrame({\"A\": x, \"B\": np.arange(len(x))})\n pdf = df.set_index(\"A\").query(\"B != {}\".format(drop))\n ddf = dd.from_pandas(df, 1).set_index(\"A\").query(\"B != {}\".format(drop))\n\n result = dd.concat([ddf, ddf.rename(columns={\"B\": \"C\"})], axis=1)\n expected = pd.concat([pdf, pdf.rename(columns={\"B\": \"C\"})], axis=1)\n assert_eq(result, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_boundary_slice_nonmonotonic_test_boundary_slice_empty.tm_assert_frame_equal_res": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_boundary_slice_nonmonotonic_test_boundary_slice_empty.tm_assert_frame_equal_res", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 3894, "end_line": 3922, "span_ids": ["test_boundary_slice_empty", "test_boundary_slice_nonmonotonic"], "tokens": 246}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_boundary_slice_nonmonotonic():\n x = np.array([-1, -2, 2, 4, 3])\n df = pd.DataFrame({\"B\": range(len(x))}, index=x)\n result = methods.boundary_slice(df, 0, 4)\n expected = df.iloc[2:]\n tm.assert_frame_equal(result, expected)\n\n result = methods.boundary_slice(df, -1, 4)\n expected = df.drop(-2)\n tm.assert_frame_equal(result, expected)\n\n result = methods.boundary_slice(df, -2, 3)\n expected = df.drop(4)\n tm.assert_frame_equal(result, expected)\n\n result = methods.boundary_slice(df, -2, 3.5)\n expected = df.drop(4)\n tm.assert_frame_equal(result, expected)\n\n result = methods.boundary_slice(df, -2, 4)\n expected = df\n tm.assert_frame_equal(result, expected)\n\n\ndef test_boundary_slice_empty():\n df = pd.DataFrame()\n result = methods.boundary_slice(df, 1, 4)\n expected = pd.DataFrame()\n tm.assert_frame_equal(result, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_with_boundary_test_with_boundary.tm_assert_frame_equal_res": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_with_boundary_test_with_boundary.tm_assert_frame_equal_res", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 3925, "end_line": 3946, "span_ids": ["test_with_boundary"], "tokens": 277}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"start, stop, right_boundary, left_boundary, drop\",\n [\n (-1, None, False, False, [-1, -2]),\n (-1, None, False, True, [-2]),\n (None, 3, False, False, [3, 4]),\n (None, 3, True, False, [4]),\n # Missing keys\n (-0.5, None, False, False, [-1, -2]),\n (-0.5, None, False, True, [-1, -2]),\n (-1.5, None, False, True, [-2]),\n (None, 3.5, False, False, [4]),\n (None, 3.5, True, False, [4]),\n (None, 2.5, False, False, [3, 4]),\n ],\n)\ndef test_with_boundary(start, stop, right_boundary, left_boundary, drop):\n x = np.array([-1, -2, 2, 4, 3])\n df = pd.DataFrame({\"B\": range(len(x))}, index=x)\n result = methods.boundary_slice(df, start, stop, right_boundary, left_boundary)\n expected = df.drop(drop)\n tm.assert_frame_equal(result, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_boundary_slice_same_test_boundary_slice_same.tm_assert_frame_equal_res": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_boundary_slice_same_test_boundary_slice_same.tm_assert_frame_equal_res", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 3949, "end_line": 3969, "span_ids": ["test_boundary_slice_same"], "tokens": 281}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"index, left, right\",\n [\n (range(10), 0, 9),\n (range(10), -1, None),\n (range(10), None, 10),\n ([-1, 0, 2, 1], None, None),\n ([-1, 0, 2, 1], -1, None),\n ([-1, 0, 2, 1], None, 2),\n ([-1, 0, 2, 1], -2, 3),\n (pd.date_range(\"2017\", periods=10), None, None),\n (pd.date_range(\"2017\", periods=10), pd.Timestamp(\"2017\"), None),\n (pd.date_range(\"2017\", periods=10), None, pd.Timestamp(\"2017-01-10\")),\n (pd.date_range(\"2017\", periods=10), pd.Timestamp(\"2016\"), None),\n (pd.date_range(\"2017\", periods=10), None, pd.Timestamp(\"2018\")),\n ],\n)\ndef test_boundary_slice_same(index, left, right):\n df = pd.DataFrame({\"A\": range(len(index))}, index=index)\n result = methods.boundary_slice(df, left, right)\n tm.assert_frame_equal(result, df)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_better_errors_object_reductions_test_bool.for_cond_in_conditions_.with_pytest_raises_ValueE.bool_cond_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_better_errors_object_reductions_test_bool.for_cond_in_conditions_.with_pytest_raises_ValueE.bool_cond_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 3972, "end_line": 4007, "span_ids": ["test_bool", "test_better_errors_object_reductions", "test_sample_empty_partitions", "test_coerce"], "tokens": 338}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_better_errors_object_reductions():\n # GH2452\n s = pd.Series([\"a\", \"b\", \"c\", \"d\"])\n ds = dd.from_pandas(s, npartitions=2)\n with pytest.raises(ValueError) as err:\n ds.mean()\n assert str(err.value) == \"`mean` not supported with object series\"\n\n\ndef test_sample_empty_partitions():\n @dask.delayed\n def make_df(n):\n return pd.DataFrame(np.zeros((n, 4)), columns=list(\"abcd\"))\n\n ddf = dd.from_delayed([make_df(0), make_df(100), make_df(0)])\n ddf2 = ddf.sample(frac=0.2)\n # smoke test sample on empty partitions\n res = ddf2.compute()\n assert res.dtypes.equals(ddf2.dtypes)\n\n\ndef test_coerce():\n df = pd.DataFrame(np.arange(100).reshape((10, 10)))\n ddf = dd.from_pandas(df, npartitions=2)\n funcs = (int, float, complex)\n for d, t in product(funcs, (ddf, ddf[0])):\n pytest.raises(TypeError, lambda: t(d))\n\n\ndef test_bool():\n df = pd.DataFrame(np.arange(100).reshape((10, 10)))\n ddf = dd.from_pandas(df, npartitions=2)\n conditions = [ddf, ddf[0], ddf == ddf, ddf[0] == ddf[0]]\n for cond in conditions:\n with pytest.raises(ValueError):\n bool(cond)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_cumulative_multiple_columns_test_cumulative_multiple_columns.assert_eq_ddf_df_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_cumulative_multiple_columns_test_cumulative_multiple_columns.assert_eq_ddf_df_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 4010, "end_line": 4022, "span_ids": ["test_cumulative_multiple_columns"], "tokens": 129}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_cumulative_multiple_columns():\n # GH 3037\n df = pd.DataFrame(np.random.randn(100, 5), columns=list(\"abcde\"))\n ddf = dd.from_pandas(df, 5)\n\n for d in [ddf, df]:\n for c in df.columns:\n d[c + \"cs\"] = d[c].cumsum()\n d[c + \"cmin\"] = d[c].cummin()\n d[c + \"cmax\"] = d[c].cummax()\n d[c + \"cp\"] = d[c].cumprod()\n\n assert_eq(ddf, df)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_map_partition_array_test_map_partition_array.for_pre_in_lambda_a_a_.assert_x_chunks_0_np": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_map_partition_array_test_map_partition_array.for_pre_in_lambda_a_a_.assert_x_chunks_0_np", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 4025, "end_line": 4045, "span_ids": ["test_map_partition_array"], "tokens": 206}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"func\", [np.asarray, M.to_records])\ndef test_map_partition_array(func):\n from dask.array.utils import assert_eq\n\n df = pd.DataFrame(\n {\"x\": [1, 2, 3, 4, 5], \"y\": [6.0, 7.0, 8.0, 9.0, 10.0]},\n index=[\"a\", \"b\", \"c\", \"d\", \"e\"],\n )\n ddf = dd.from_pandas(df, npartitions=2)\n\n for pre in [lambda a: a, lambda a: a.x, lambda a: a.y, lambda a: a.index]:\n\n try:\n expected = func(pre(df))\n except Exception:\n continue\n x = pre(ddf).map_partitions(func)\n assert_eq(x, expected)\n\n assert isinstance(x, da.Array)\n assert x.chunks[0] == (np.nan, np.nan)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_map_partition_sparse_test_map_partition_sparse.for_pre_in_lambda_a_a_.assert_computed_coords_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_map_partition_sparse_test_map_partition_sparse.for_pre_in_lambda_a_a_.assert_computed_coords_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 4060, "end_line": 4081, "span_ids": ["test_map_partition_sparse"], "tokens": 228}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.xfail(_numpy_120, reason=\"sparse-383\")\ndef test_map_partition_sparse():\n sparse = pytest.importorskip(\"sparse\")\n # Avoid searchsorted failure.\n pytest.importorskip(\"numba\", minversion=\"0.40.0\")\n\n df = pd.DataFrame(\n {\"x\": [1, 2, 3, 4, 5], \"y\": [6.0, 7.0, 8.0, 9.0, 10.0]},\n index=[\"a\", \"b\", \"c\", \"d\", \"e\"],\n )\n ddf = dd.from_pandas(df, npartitions=2)\n\n def f(d):\n return sparse.COO(np.array(d))\n\n for pre in [lambda a: a, lambda a: a.x]:\n expected = f(pre(df))\n result = pre(ddf).map_partitions(f)\n assert isinstance(result, da.Array)\n computed = result.compute()\n assert (computed.data == expected.data).all()\n assert (computed.coords == expected.coords).all()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_mixed_dask_array_operations_test_mixed_dask_array_operations.None_4": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_mixed_dask_array_operations_test_mixed_dask_array_operations.None_4", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 4072, "end_line": 4082, "span_ids": ["test_mixed_dask_array_operations"], "tokens": 149}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_mixed_dask_array_operations():\n df = pd.DataFrame({\"x\": [1, 2, 3]}, index=[4, 5, 6])\n ddf = dd.from_pandas(df, npartitions=2)\n\n assert_eq(df.x + df.x.values, ddf.x + ddf.x.values)\n assert_eq(df.x.values + df.x, ddf.x.values + ddf.x)\n\n assert_eq(df.x + df.index.values, ddf.x + ddf.index.values)\n assert_eq(df.index.values + df.x, ddf.index.values + ddf.x)\n\n assert_eq(df.x + df.x.values.sum(), ddf.x + ddf.x.values.sum())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_mixed_dask_array_operations_errors_test_mixed_dask_array_operations_errors.assert_add_in_str_info_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_mixed_dask_array_operations_errors_test_mixed_dask_array_operations_errors.assert_add_in_str_info_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 4085, "end_line": 4099, "span_ids": ["test_mixed_dask_array_operations_errors"], "tokens": 154}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_mixed_dask_array_operations_errors():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4, 5]}, index=[4, 5, 6, 7, 8])\n ddf = dd.from_pandas(df, npartitions=2)\n\n x = da.arange(5, chunks=((1, 4),))\n x._chunks = ((np.nan, np.nan),)\n\n with pytest.raises(ValueError):\n (ddf.x + x).compute()\n\n x = da.arange(5, chunks=((2, 2, 1),))\n with pytest.raises(ValueError) as info:\n ddf.x + x\n\n assert \"add\" in str(info.value)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_mixed_dask_array_multi_dimensional_test_mixed_dask_array_multi_dimensional.assert_eq_ddf_y_x_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_mixed_dask_array_multi_dimensional_test_mixed_dask_array_multi_dimensional.assert_eq_ddf_y_x_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 4102, "end_line": 4113, "span_ids": ["test_mixed_dask_array_multi_dimensional"], "tokens": 191}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_mixed_dask_array_multi_dimensional():\n df = pd.DataFrame(\n {\"x\": [1, 2, 3, 4, 5], \"y\": [5.0, 6.0, 7.0, 8.0, 9.0]}, columns=[\"x\", \"y\"]\n )\n ddf = dd.from_pandas(df, npartitions=2)\n\n x = (df.values + 1).astype(float)\n dx = (ddf.values + 1).astype(float)\n\n assert_eq(ddf + dx + 1, df + x + 1)\n assert_eq(ddf + dx.rechunk((None, 1)) + 1, df + x + 1)\n assert_eq(ddf[[\"y\", \"x\"]] + dx + 1, df[[\"y\", \"x\"]] + x + 1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_meta_raises_test_meta_raises.assert_meta_not_in_str": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_meta_raises_test_meta_raises.assert_meta_not_in_str", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 4116, "end_line": 4132, "span_ids": ["test_meta_raises"], "tokens": 162}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_meta_raises():\n # Raise when we use a user defined function\n s = pd.Series([\"abcd\", \"abcd\"])\n ds = dd.from_pandas(s, npartitions=2)\n try:\n ds.map(lambda x: x[3])\n except ValueError as e:\n assert \"meta=\" in str(e)\n\n # But not otherwise\n df = pd.DataFrame({\"a\": [\"x\", \"y\", \"y\"], \"b\": [\"x\", \"y\", \"z\"], \"c\": [1, 2, 3]})\n ddf = dd.from_pandas(df, npartitions=1)\n\n with pytest.raises(Exception) as info:\n ddf.a + ddf.c\n\n assert \"meta=\" not in str(info.value)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_dask_dataframe_holds_scipy_sparse_containers_test_dask_dataframe_holds_scipy_sparse_containers.assert_all_isinstance_v_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_dask_dataframe_holds_scipy_sparse_containers_test_dask_dataframe_holds_scipy_sparse_containers.assert_all_isinstance_v_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 4135, "end_line": 4147, "span_ids": ["test_dask_dataframe_holds_scipy_sparse_containers"], "tokens": 143}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_dask_dataframe_holds_scipy_sparse_containers():\n sparse = pytest.importorskip(\"scipy.sparse\")\n da = pytest.importorskip(\"dask.array\")\n x = da.random.random((1000, 10), chunks=(100, 10))\n x[x < 0.9] = 0\n df = dd.from_dask_array(x)\n y = df.map_partitions(sparse.csr_matrix)\n\n assert isinstance(y, da.Array)\n\n vs = y.to_delayed().flatten().tolist()\n values = dask.compute(*vs, scheduler=\"single-threaded\")\n assert all(isinstance(v, sparse.csr_matrix) for v in values)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_map_partitions_delays_large_inputs_test_map_partitions_delays_large_inputs.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_map_partitions_delays_large_inputs_test_map_partitions_delays_large_inputs.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 4150, "end_line": 4160, "span_ids": ["test_map_partitions_delays_large_inputs"], "tokens": 117}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_map_partitions_delays_large_inputs():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n big = np.ones(1000000)\n\n b = ddf.map_partitions(lambda x, y: x, y=big)\n assert any(big is v for v in b.dask.values())\n\n a = ddf.map_partitions(lambda x, y: x, big)\n assert any(big is v for v in a.dask.values())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_partitions_indexer_test_partitions_indexer.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_partitions_indexer_test_partitions_indexer.None_2", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 4163, "end_line": 4174, "span_ids": ["test_partitions_indexer"], "tokens": 152}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_partitions_indexer():\n df = pd.DataFrame({\"x\": range(10)})\n ddf = dd.from_pandas(df, npartitions=5)\n\n assert_eq(ddf.partitions[0], ddf.get_partition(0))\n assert_eq(ddf.partitions[3], ddf.get_partition(3))\n assert_eq(ddf.partitions[-1], ddf.get_partition(4))\n\n assert ddf.partitions[:3].npartitions == 3\n assert ddf.x.partitions[:3].npartitions == 3\n\n assert ddf.x.partitions[::2].compute().tolist() == [0, 1, 4, 5, 8, 9]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_mod_eq_test_meta_error_message.assert_pandas_in_str_in": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_mod_eq_test_meta_error_message.assert_pandas_in_str_in", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 4189, "end_line": 4249, "span_ids": ["test_meta_error_message", "test_mod_eq", "test_scalar_with_array", "test_has_parallel_type", "test_setitem_with_numeric_column_name_raises_not_implemented", "test_setitem_with_bool_dataframe_as_key", "test_broadcast", "test_setitem"], "tokens": 580}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_mod_eq():\n df = pd.DataFrame({\"a\": [1, 2, 3]})\n ddf = dd.from_pandas(df, npartitions=1)\n assert_eq(df, ddf)\n assert_eq(df.a, ddf.a)\n assert_eq(df.a + 2, ddf.a + 2)\n assert_eq(df.a + 2 == 0, ddf.a + 2 == 0)\n\n\ndef test_setitem():\n df = pd.DataFrame({\"A\": [1, 2], \"B\": [3, 4]})\n ddf = dd.from_pandas(df.copy(), 2)\n df[df.columns] = 1\n ddf[ddf.columns] = 1\n assert_eq(df, ddf)\n\n\ndef test_setitem_with_bool_dataframe_as_key():\n df = pd.DataFrame({\"A\": [1, 4], \"B\": [3, 2]})\n ddf = dd.from_pandas(df.copy(), 2)\n df[df > 2] = 5\n ddf[ddf > 2] = 5\n assert_eq(df, ddf)\n\n\ndef test_setitem_with_numeric_column_name_raises_not_implemented():\n df = pd.DataFrame({0: [1, 4], 1: [3, 2]})\n ddf = dd.from_pandas(df.copy(), 2)\n # works for pandas\n df[0] = 5\n # raises error for dask\n with pytest.raises(NotImplementedError, match=\"not supported\"):\n ddf[0] = 5\n\n\ndef test_broadcast():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4, 5]})\n ddf = dd.from_pandas(df, npartitions=2)\n assert_eq(ddf - (ddf.sum() + 1), df - (df.sum() + 1))\n\n\ndef test_scalar_with_array():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4, 5]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n da.utils.assert_eq(df.x.values + df.x.mean(), ddf.x.values + ddf.x.mean())\n\n\ndef test_has_parallel_type():\n assert has_parallel_type(pd.DataFrame())\n assert has_parallel_type(pd.Series(dtype=float))\n assert not has_parallel_type(123)\n\n\ndef test_meta_error_message():\n with pytest.raises(TypeError) as info:\n dd.DataFrame({(\"x\", 1): 123}, \"x\", pd.Series(dtype=float), [None, None])\n\n assert \"Series\" in str(info.value)\n assert \"DataFrame\" in str(info.value)\n assert \"pandas\" in str(info.value)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_map_index_test_map_index.assert_applied_divisions_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_map_index_test_map_index.assert_applied_divisions_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 4222, "end_line": 4232, "span_ids": ["test_map_index"], "tokens": 130}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_map_index():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4, 5]})\n ddf = dd.from_pandas(df, npartitions=2)\n assert ddf.known_divisions is True\n\n cleared = ddf.index.map(lambda x: x * 10)\n assert cleared.known_divisions is False\n\n applied = ddf.index.map(lambda x: x * 10, is_monotonic=True)\n assert applied.known_divisions is True\n assert applied.divisions == tuple(x * 10 for x in ddf.divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_assign_index_test_index_divisions.assert_eq_ddf_index_df": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_assign_index_test_index_divisions.assert_eq_ddf_index_df", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 4235, "end_line": 4256, "span_ids": ["test_index_divisions", "test_assign_index"], "tokens": 186}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_assign_index():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4, 5]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n ddf_copy = ddf.copy()\n\n ddf.index = ddf.index * 10\n\n expected = df.copy()\n expected.index = expected.index * 10\n\n assert_eq(ddf, expected)\n assert_eq(ddf_copy, df)\n\n\ndef test_index_divisions():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4, 5]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n assert_eq(ddf.index + 1, df.index + 1)\n assert_eq(10 * ddf.index, 10 * df.index)\n assert_eq(-ddf.index, -df.index)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_replace_test_replace.assert_eq_df_x_replace_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_replace_test_replace.assert_eq_df_x_replace_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 4259, "end_line": 4266, "span_ids": ["test_replace"], "tokens": 151}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_replace():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4, 5]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n assert_eq(df.replace(1, 10), ddf.replace(1, 10))\n assert_eq(df.replace({1: 10, 2: 20}), ddf.replace({1: 10, 2: 20}))\n assert_eq(df.x.replace(1, 10), ddf.x.replace(1, 10))\n assert_eq(df.x.replace({1: 10, 2: 20}), ddf.x.replace({1: 10, 2: 20}))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_map_partitions_delays_lists_test_map_partitions_delays_lists.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_map_partitions_delays_lists_test_map_partitions_delays_lists.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 4269, "end_line": 4278, "span_ids": ["test_map_partitions_delays_lists"], "tokens": 138}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_map_partitions_delays_lists():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4, 5]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n L = list(range(100))\n out = ddf.map_partitions(lambda x, y: x + sum(y), y=L)\n assert any(str(L) == str(v) for v in out.__dask_graph__().values())\n\n out = ddf.map_partitions(lambda x, y: x + sum(y), L)\n assert any(str(L) == str(v) for v in out.__dask_graph__().values())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_dtype_cast_test_dtype_cast.None_7": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_dtype_cast_test_dtype_cast.None_7", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 4281, "end_line": 4303, "span_ids": ["test_dtype_cast"], "tokens": 198}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_dtype_cast():\n df = pd.DataFrame(\n {\n \"A\": np.arange(10, dtype=np.int32),\n \"B\": np.arange(10, dtype=np.int64),\n \"C\": np.arange(10, dtype=np.float32),\n }\n )\n ddf = dd.from_pandas(df, npartitions=2)\n assert ddf.A.dtype == np.int32\n assert ddf.B.dtype == np.int64\n assert ddf.C.dtype == np.float32\n\n col = pd.Series(np.arange(10, dtype=np.float32)) / 2\n assert col.dtype == np.float32\n\n ddf = ddf.assign(D=col)\n assert ddf.D.dtype == np.float32\n assert ddf.C.dtype == np.float32\n # fails\n assert ddf.B.dtype == np.int64\n # fails\n assert ddf.A.dtype == np.int32", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_series_map_test_series_map.dd_utils_assert_eq_expect": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_series_map_test_series_map.dd_utils_assert_eq_expect", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 4306, "end_line": 4328, "span_ids": ["test_series_map"], "tokens": 277}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"base_npart\", [1, 4])\n@pytest.mark.parametrize(\"map_npart\", [1, 3])\n@pytest.mark.parametrize(\"sorted_index\", [False, True])\n@pytest.mark.parametrize(\"sorted_map_index\", [False, True])\ndef test_series_map(base_npart, map_npart, sorted_index, sorted_map_index):\n base = pd.Series(\n [\"\".join(np.random.choice([\"a\", \"b\", \"c\"], size=3)) for x in range(100)]\n )\n if not sorted_index:\n index = np.arange(100)\n np.random.shuffle(index)\n base.index = index\n map_index = [\"\".join(x) for x in product(\"abc\", repeat=3)]\n mapper = pd.Series(np.random.randint(50, size=len(map_index)), index=map_index)\n if not sorted_map_index:\n map_index = np.array(map_index)\n np.random.shuffle(map_index)\n mapper.index = map_index\n expected = base.map(mapper)\n dask_base = dd.from_pandas(base, npartitions=base_npart, sort=False)\n dask_map = dd.from_pandas(mapper, npartitions=map_npart, sort=False)\n result = dask_base.map(dask_map)\n dd.utils.assert_eq(expected, result)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_dataframe_explode_test_dataframe_explode.assert_eq_exploded_ddf_co": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_dataframe_explode_test_dataframe_explode.assert_eq_exploded_ddf_co", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 4331, "end_line": 4340, "span_ids": ["test_dataframe_explode"], "tokens": 137}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n PANDAS_VERSION < \"0.25.0\", reason=\"Explode not implemented in pandas < 0.25.0\"\n)\ndef test_dataframe_explode():\n df = pd.DataFrame({\"A\": [[1, 2, 3], \"foo\", [3, 4]], \"B\": 1})\n exploded_df = df.explode(\"A\")\n ddf = dd.from_pandas(df, npartitions=2)\n exploded_ddf = ddf.explode(\"A\")\n assert ddf.divisions == exploded_ddf.divisions\n assert_eq(exploded_ddf.compute(), exploded_df)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_series_explode_test_pop.assert_eq_ddf_df_x_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_series_explode_test_pop.assert_eq_ddf_df_x_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 4343, "end_line": 4363, "span_ids": ["test_series_explode", "test_pop"], "tokens": 195}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n PANDAS_VERSION < \"0.25.0\", reason=\"Explode not implemented in pandas < 0.25.0\"\n)\ndef test_series_explode():\n s = pd.Series([[1, 2, 3], \"foo\", [3, 4]])\n exploded_s = s.explode()\n ds = dd.from_pandas(s, npartitions=2)\n exploded_ds = ds.explode()\n assert_eq(exploded_ds, exploded_s)\n assert ds.divisions == exploded_ds.divisions\n\n\ndef test_pop():\n df = pd.DataFrame({\"x\": range(10), \"y\": range(10)})\n\n ddf = dd.from_pandas(df, npartitions=2)\n\n s = ddf.pop(\"y\")\n assert s.name == \"y\"\n assert ddf.columns == [\"x\"]\n assert_eq(ddf, df[[\"x\"]])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_simple_map_partitions_test_simple_map_partitions.assert_v_0_M_clip_or_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_simple_map_partitions_test_simple_map_partitions.assert_v_0_M_clip_or_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 4366, "end_line": 4373, "span_ids": ["test_simple_map_partitions"], "tokens": 135}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_simple_map_partitions():\n data = {\"col_0\": [9, -3, 0, -1, 5], \"col_1\": [-2, -7, 6, 8, -5]}\n df = pd.DataFrame(data)\n ddf = dd.from_pandas(df, npartitions=2)\n ddf = ddf.clip(-4, 6)\n task = ddf.__dask_graph__()[ddf.__dask_keys__()[0]]\n [v] = task[0].dsk.values()\n assert v[0] == M.clip or v[1] == M.clip", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_iter_test_dataframe_groupby_agg_empty_partitions.assert_eq_ddf_ddf_x_5_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_iter_test_dataframe_groupby_agg_empty_partitions.assert_eq_ddf_ddf_x_5_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 4376, "end_line": 4388, "span_ids": ["test_dataframe_groupby_agg_empty_partitions", "test_iter"], "tokens": 169}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_iter():\n df = pd.DataFrame({\"A\": [1, 2, 3, 4], \"B\": [1, 2, 3, 4]})\n ddf = dd.from_pandas(df, 2)\n\n assert list(df) == list(ddf)\n for col, expected in zip(ddf, [\"A\", \"B\"]):\n assert col == expected\n\n\ndef test_dataframe_groupby_agg_empty_partitions():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4, 5, 6, 7, 8]})\n ddf = dd.from_pandas(df, npartitions=4)\n assert_eq(ddf[ddf.x < 5].x.cumsum(), df[df.x < 5].x.cumsum())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_extensions.py_from_decimal_import_Decim_test_register_extension_type.assert_eq_df_ddf_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_extensions.py_from_decimal_import_Decim_test_register_extension_type.assert_eq_df_ddf_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_extensions.py", "file_name": "test_extensions.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 35, "span_ids": ["imports", "test_register_extension_type", "__1", "_"], "tokens": 246}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from decimal import Decimal\nimport pytest\n\nimport dask.dataframe as dd\nfrom dask.dataframe.utils import assert_eq, PANDAS_VERSION\n\npd = pytest.importorskip(\"pandas\", minversion=\"0.23.4\")\n\nfrom pandas.tests.extension.decimal.array import DecimalArray, DecimalDtype\nfrom dask.dataframe.extensions import make_array_nonempty, make_scalar\n\n\n@make_array_nonempty.register(DecimalDtype)\ndef _(dtype):\n kwargs = {}\n if PANDAS_VERSION >= \"0.24.0rc1\":\n kwargs[\"dtype\"] = dtype\n\n return DecimalArray._from_sequence([Decimal(\"0\"), Decimal(\"NaN\")], **kwargs)\n\n\n@make_scalar.register(Decimal)\ndef _(x):\n return Decimal(\"1\")\n\n\ndef test_register_extension_type():\n arr = DecimalArray._from_sequence([Decimal(\"1.0\")] * 10)\n ser = pd.Series(arr)\n dser = dd.from_pandas(ser, 2)\n assert_eq(ser, dser)\n\n df = pd.DataFrame({\"A\": ser})\n ddf = dd.from_pandas(df, 2)\n assert_eq(df, ddf)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_extensions.py_test_reduction_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_extensions.py_test_reduction_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_extensions.py", "file_name": "test_extensions.py", "file_type": "text/x-python", "category": "test", "start_line": 38, "end_line": 54, "span_ids": ["test_reduction", "test_scalar"], "tokens": 169}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_reduction():\n pytest.importorskip(\"pandas\", minversion=\"0.24.0\")\n ser = pd.Series(DecimalArray._from_sequence([Decimal(\"0\"), Decimal(\"1\")]))\n dser = dd.from_pandas(ser, 2)\n assert_eq(ser.mean(skipna=False), dser.mean(skipna=False))\n\n # It's unclear whether this can be reliably provided, at least with the current\n # implementation, which uses pandas.DataFrame.sum(), returning a (homogenous)\n # series which has potentially cast values.\n\n # assert_eq(ser.to_frame().mean(skipna=False), dser.to_frame().mean(skipna=False))\n\n\ndef test_scalar():\n result = dd.utils.make_meta(Decimal(\"1.0\"))\n assert result == Decimal(\"1.0\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_format.py_pd_test_repr_meta_mutation.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_format.py_pd_test_repr_meta_mutation.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_format.py", "file_name": "test_format.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 41, "span_ids": ["test_repr", "imports", "test_repr_meta_mutation"], "tokens": 268}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pandas as pd\nfrom textwrap import dedent\n\nimport dask.dataframe as dd\nimport dask.array as da\nimport numpy as np\n\n\nstyle = \"\"\"\n\"\"\"\n\n\ndef test_repr():\n df = pd.DataFrame({\"x\": list(range(100))})\n ddf = dd.from_pandas(df, 3)\n\n for x in [ddf, ddf.index, ddf.x]:\n assert type(x).__name__ in repr(x)\n assert str(x.npartitions) in repr(x)\n\n\ndef test_repr_meta_mutation():\n # Check that the repr changes when meta changes\n df = pd.DataFrame({\"a\": range(5), \"b\": [\"a\", \"b\", \"c\", \"d\", \"e\"]})\n ddf = dd.from_pandas(df, npartitions=2)\n s1 = repr(ddf)\n assert repr(ddf) == s1\n ddf.b = ddf.b.astype(\"category\")\n assert repr(ddf) != s1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_format.py_test_dataframe_format_test_dataframe_format.assert_ddf__repr_html__": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_format.py_test_dataframe_format_test_dataframe_format.assert_ddf__repr_html__", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_format.py", "file_name": "test_format.py", "file_type": "text/x-python", "category": "test", "start_line": 44, "end_line": 134, "span_ids": ["test_dataframe_format"], "tokens": 687}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_dataframe_format():\n df = pd.DataFrame(\n {\n \"A\": [1, 2, 3, 4, 5, 6, 7, 8],\n \"B\": list(\"ABCDEFGH\"),\n \"C\": pd.Categorical(list(\"AAABBBCC\")),\n }\n )\n ddf = dd.from_pandas(df, 3)\n exp = (\n \"Dask DataFrame Structure:\\n\"\n \" A B C\\n\"\n \"npartitions=3 \\n\"\n \"0 int64 object category[known]\\n\"\n \"3 ... ... ...\\n\"\n \"6 ... ... ...\\n\"\n \"7 ... ... ...\\n\"\n \"Dask Name: from_pandas, 3 tasks\"\n )\n assert repr(ddf) == exp\n assert str(ddf) == exp\n\n exp = (\n \" A B C\\n\"\n \"npartitions=3 \\n\"\n \"0 int64 object category[known]\\n\"\n \"3 ... ... ...\\n\"\n \"6 ... ... ...\\n\"\n \"7 ... ... ...\"\n )\n assert ddf.to_string() == exp\n\n exp_table = \"\"\"\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
ABC
npartitions=3
0int64objectcategory[known]
3.........
6.........
7.........
\"\"\"\n\n exp = \"\"\"
Dask DataFrame Structure:
\n{exp_table}\n
Dask Name: from_pandas, 3 tasks
\"\"\".format(\n exp_table=exp_table\n )\n assert ddf.to_html() == exp\n\n # table is boxed with div and has style\n exp = \"\"\"
Dask DataFrame Structure:
\n
\n{style}{exp_table}\n
\n
Dask Name: from_pandas, 3 tasks
\"\"\".format(\n style=style, exp_table=exp_table\n )\n assert ddf._repr_html_() == exp", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_format.py_test_dataframe_format_with_index_test_dataframe_format_with_index.assert_ddf__repr_html__": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_format.py_test_dataframe_format_with_index_test_dataframe_format_with_index.assert_ddf__repr_html__", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_format.py", "file_name": "test_format.py", "file_type": "text/x-python", "category": "test", "start_line": 137, "end_line": 218, "span_ids": ["test_dataframe_format_with_index"], "tokens": 609}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_dataframe_format_with_index():\n df = pd.DataFrame(\n {\n \"A\": [1, 2, 3, 4, 5, 6, 7, 8],\n \"B\": list(\"ABCDEFGH\"),\n \"C\": pd.Categorical(list(\"AAABBBCC\")),\n },\n index=list(\"ABCDEFGH\"),\n )\n ddf = dd.from_pandas(df, 3)\n exp = (\n \"Dask DataFrame Structure:\\n\"\n \" A B C\\n\"\n \"npartitions=3 \\n\"\n \"A int64 object category[known]\\n\"\n \"D ... ... ...\\n\"\n \"G ... ... ...\\n\"\n \"H ... ... ...\\n\"\n \"Dask Name: from_pandas, 3 tasks\"\n )\n assert repr(ddf) == exp\n assert str(ddf) == exp\n\n exp_table = \"\"\"\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
ABC
npartitions=3
Aint64objectcategory[known]
D.........
G.........
H.........
\"\"\"\n\n exp = \"\"\"
Dask DataFrame Structure:
\n{exp_table}\n
Dask Name: from_pandas, 3 tasks
\"\"\".format(\n exp_table=exp_table\n )\n assert ddf.to_html() == exp\n\n # table is boxed with div and has style\n exp = \"\"\"
Dask DataFrame Structure:
\n
\n{style}{exp_table}\n
\n
Dask Name: from_pandas, 3 tasks
\"\"\".format(\n style=style, exp_table=exp_table\n )\n assert ddf._repr_html_() == exp", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_format.py_test_dataframe_format_unknown_divisions_test_dataframe_format_unknown_divisions.assert_ddf__repr_html__": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_format.py_test_dataframe_format_unknown_divisions_test_dataframe_format_unknown_divisions.assert_ddf__repr_html__", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_format.py", "file_name": "test_format.py", "file_type": "text/x-python", "category": "test", "start_line": 221, "end_line": 314, "span_ids": ["test_dataframe_format_unknown_divisions"], "tokens": 694}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_dataframe_format_unknown_divisions():\n df = pd.DataFrame(\n {\n \"A\": [1, 2, 3, 4, 5, 6, 7, 8],\n \"B\": list(\"ABCDEFGH\"),\n \"C\": pd.Categorical(list(\"AAABBBCC\")),\n }\n )\n ddf = dd.from_pandas(df, 3)\n ddf = ddf.clear_divisions()\n assert not ddf.known_divisions\n\n exp = (\n \"Dask DataFrame Structure:\\n\"\n \" A B C\\n\"\n \"npartitions=3 \\n\"\n \" int64 object category[known]\\n\"\n \" ... ... ...\\n\"\n \" ... ... ...\\n\"\n \" ... ... ...\\n\"\n \"Dask Name: from_pandas, 3 tasks\"\n )\n assert repr(ddf) == exp\n assert str(ddf) == exp\n\n exp = (\n \" A B C\\n\"\n \"npartitions=3 \\n\"\n \" int64 object category[known]\\n\"\n \" ... ... ...\\n\"\n \" ... ... ...\\n\"\n \" ... ... ...\"\n )\n assert ddf.to_string() == exp\n\n exp_table = \"\"\"\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
ABC
npartitions=3
int64objectcategory[known]
.........
.........
.........
\"\"\"\n\n exp = \"\"\"
Dask DataFrame Structure:
\n{exp_table}\n
Dask Name: from_pandas, 3 tasks
\"\"\".format(\n exp_table=exp_table\n )\n assert ddf.to_html() == exp\n\n # table is boxed with div and has style\n exp = \"\"\"
Dask DataFrame Structure:
\n
\n{style}{exp_table}\n
\n
Dask Name: from_pandas, 3 tasks
\"\"\".format(\n style=style, exp_table=exp_table\n )\n assert ddf._repr_html_() == exp", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_format.py_test_dataframe_format_long_test_dataframe_format_long.assert_ddf__repr_html__": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_format.py_test_dataframe_format_long_test_dataframe_format_long.assert_ddf__repr_html__", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_format.py", "file_name": "test_format.py", "file_type": "text/x-python", "category": "test", "start_line": 317, "end_line": 415, "span_ids": ["test_dataframe_format_long"], "tokens": 753}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_dataframe_format_long():\n df = pd.DataFrame(\n {\n \"A\": [1, 2, 3, 4, 5, 6, 7, 8] * 10,\n \"B\": list(\"ABCDEFGH\") * 10,\n \"C\": pd.Categorical(list(\"AAABBBCC\") * 10),\n }\n )\n ddf = dd.from_pandas(df, 10)\n exp = (\n \"Dask DataFrame Structure:\\n\"\n \" A B C\\n\"\n \"npartitions=10 \\n\"\n \"0 int64 object category[known]\\n\"\n \"8 ... ... ...\\n\"\n \"... ... ... ...\\n\"\n \"72 ... ... ...\\n\"\n \"79 ... ... ...\\n\"\n \"Dask Name: from_pandas, 10 tasks\"\n )\n assert repr(ddf) == exp\n assert str(ddf) == exp\n\n exp = (\n \" A B C\\n\"\n \"npartitions=10 \\n\"\n \"0 int64 object category[known]\\n\"\n \"8 ... ... ...\\n\"\n \"... ... ... ...\\n\"\n \"72 ... ... ...\\n\"\n \"79 ... ... ...\"\n )\n assert ddf.to_string() == exp\n\n exp_table = \"\"\"\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
ABC
npartitions=10
0int64objectcategory[known]
8.........
............
72.........
79.........
\"\"\"\n\n exp = \"\"\"
Dask DataFrame Structure:
\n{exp_table}\n
Dask Name: from_pandas, 10 tasks
\"\"\".format(\n exp_table=exp_table\n )\n assert ddf.to_html() == exp\n\n # table is boxed with div\n exp = \"\"\"
Dask DataFrame Structure:
\n
\n{style}{exp_table}\n
\n
Dask Name: from_pandas, 10 tasks
\"\"\".format(\n style=style, exp_table=exp_table\n )\n assert ddf._repr_html_() == exp", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_format.py_test_series_format_test_series_format.None_4": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_format.py_test_series_format_test_series_format.None_4", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_format.py", "file_name": "test_format.py", "file_type": "text/x-python", "category": "test", "start_line": 418, "end_line": 450, "span_ids": ["test_series_format"], "tokens": 264}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_series_format():\n s = pd.Series([1, 2, 3, 4, 5, 6, 7, 8], index=list(\"ABCDEFGH\"))\n ds = dd.from_pandas(s, 3)\n exp = \"\"\"Dask Series Structure:\nnpartitions=3\nA int64\nD ...\nG ...\nH ...\ndtype: int64\nDask Name: from_pandas, 3 tasks\"\"\"\n assert repr(ds) == exp\n assert str(ds) == exp\n\n exp = \"\"\"npartitions=3\nA int64\nD ...\nG ...\nH ...\"\"\"\n assert ds.to_string() == exp\n\n s = pd.Series([1, 2, 3, 4, 5, 6, 7, 8], index=list(\"ABCDEFGH\"), name=\"XXX\")\n ds = dd.from_pandas(s, 3)\n exp = \"\"\"Dask Series Structure:\nnpartitions=3\nA int64\nD ...\nG ...\nH ...\nName: XXX, dtype: int64\nDask Name: from_pandas, 3 tasks\"\"\"\n assert repr(ds) == exp\n assert str(ds) == exp", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_format.py_test_series_format_long_test_series_format_long.assert_ds_to_string_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_format.py_test_series_format_long_test_series_format_long.assert_ds_to_string_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_format.py", "file_name": "test_format.py", "file_type": "text/x-python", "category": "test", "start_line": 453, "end_line": 465, "span_ids": ["test_series_format_long"], "tokens": 186}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_series_format_long():\n s = pd.Series([1, 2, 3, 4, 5, 6, 7, 8, 9, 10] * 10, index=list(\"ABCDEFGHIJ\") * 10)\n ds = dd.from_pandas(s, 10)\n exp = (\n \"Dask Series Structure:\\nnpartitions=10\\nA int64\\nB ...\\n\"\n \" ... \\nJ ...\\nJ ...\\ndtype: int64\\n\"\n \"Dask Name: from_pandas, 10 tasks\"\n )\n assert repr(ds) == exp\n assert str(ds) == exp\n\n exp = \"npartitions=10\\nA int64\\nB ...\\n ... \\nJ ...\\nJ ...\"\n assert ds.to_string() == exp", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_format.py_test_index_format_test_index_format.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_format.py_test_index_format_test_index_format.None_3", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_format.py", "file_name": "test_format.py", "file_type": "text/x-python", "category": "test", "start_line": 468, "end_line": 499, "span_ids": ["test_index_format"], "tokens": 280}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_index_format():\n s = pd.Series([1, 2, 3, 4, 5, 6, 7, 8], index=list(\"ABCDEFGH\"))\n ds = dd.from_pandas(s, 3)\n exp = \"\"\"Dask Index Structure:\nnpartitions=3\nA object\nD ...\nG ...\nH ...\ndtype: object\nDask Name: from_pandas, 6 tasks\"\"\"\n assert repr(ds.index) == exp\n assert str(ds.index) == exp\n\n s = pd.Series(\n [1, 2, 3, 4, 5, 6, 7, 8],\n index=pd.CategoricalIndex([1, 2, 3, 4, 5, 6, 7, 8], name=\"YYY\"),\n )\n ds = dd.from_pandas(s, 3)\n exp = dedent(\n \"\"\"\\\n Dask Index Structure:\n npartitions=3\n 1 category[known]\n 4 ...\n 7 ...\n 8 ...\n Name: YYY, dtype: category\n Dask Name: from_pandas, 6 tasks\"\"\"\n )\n assert repr(ds.index) == exp\n assert str(ds.index) == exp", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_format.py_test_categorical_format_test_duplicate_columns_repr.repr_frame_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_format.py_test_categorical_format_test_duplicate_columns_repr.repr_frame_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_format.py", "file_name": "test_format.py", "file_type": "text/x-python", "category": "test", "start_line": 502, "end_line": 529, "span_ids": ["test_categorical_format", "test_duplicate_columns_repr"], "tokens": 240}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_categorical_format():\n s = pd.Series([\"a\", \"b\", \"c\"]).astype(\"category\")\n known = dd.from_pandas(s, npartitions=1)\n unknown = known.cat.as_unknown()\n exp = (\n \"Dask Series Structure:\\n\"\n \"npartitions=1\\n\"\n \"0 category[known]\\n\"\n \"2 ...\\n\"\n \"dtype: category\\n\"\n \"Dask Name: from_pandas, 1 tasks\"\n )\n assert repr(known) == exp\n exp = (\n \"Dask Series Structure:\\n\"\n \"npartitions=1\\n\"\n \"0 category[unknown]\\n\"\n \"2 ...\\n\"\n \"dtype: category\\n\"\n \"Dask Name: from_pandas, 1 tasks\"\n )\n assert repr(unknown) == exp\n\n\ndef test_duplicate_columns_repr():\n arr = da.from_array(np.arange(10).reshape(5, 2), chunks=(5, 2))\n frame = dd.from_dask_array(arr, columns=[\"a\", \"a\"])\n repr(frame)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_format.py_test_empty_repr_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_format.py_test_empty_repr_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_format.py", "file_name": "test_format.py", "file_type": "text/x-python", "category": "test", "start_line": 532, "end_line": 568, "span_ids": ["test_empty_repr"], "tokens": 257}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_empty_repr():\n df = pd.DataFrame()\n ddf = dd.from_pandas(df, npartitions=1)\n exp = (\n \"Empty Dask DataFrame Structure:\\n\"\n \"Columns: []\\n\"\n \"Divisions: [, ]\\n\"\n \"Dask Name: from_pandas, 1 tasks\"\n )\n assert repr(ddf) == exp\n exp_table = \"\"\"\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
npartitions=1
\"\"\"\n exp = \"\"\"
Dask DataFrame Structure:
\n
\n{style}{exp_table}\n
\n
Dask Name: from_pandas, 1 tasks
\"\"\".format(\n style=style, exp_table=exp_table\n )\n assert ddf._repr_html_() == exp", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_collections_agg_func.return.request_param": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_collections_agg_func.return.request_param", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 45, "span_ids": ["imports", "agg_func"], "tokens": 193}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import collections\nimport warnings\nfrom packaging import version\n\nimport numpy as np\nimport pandas as pd\n\nimport pytest\n\nimport dask\nfrom dask.utils import M\nimport dask.dataframe as dd\nfrom dask.dataframe._compat import tm, PANDAS_GT_100\nfrom dask.dataframe import _compat\nfrom dask.dataframe.utils import (\n assert_eq,\n assert_dask_graph,\n assert_max_deps,\n PANDAS_VERSION,\n)\n\nAGG_FUNCS = [\n \"sum\",\n \"mean\",\n \"min\",\n \"max\",\n \"count\",\n \"size\",\n \"std\",\n \"var\",\n \"cov\",\n \"corr\",\n \"nunique\",\n \"first\",\n \"last\",\n \"prod\",\n]\n\n\n@pytest.fixture(params=AGG_FUNCS)\ndef agg_func(request):\n \"\"\"\n Aggregations supported for groups\n \"\"\"\n return request.param", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_internal_repr_xfail_test_groupby_internal_repr_xfail.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_internal_repr_xfail_test_groupby_internal_repr_xfail.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 48, "end_line": 60, "span_ids": ["test_groupby_internal_repr_xfail"], "tokens": 162}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.xfail(reason=\"uncertain how to handle. See issue #3481.\")\ndef test_groupby_internal_repr_xfail():\n pdf = pd.DataFrame({\"x\": [0, 1, 2, 3, 4, 6, 7, 8, 9, 10], \"y\": list(\"abcbabbcda\")})\n ddf = dd.from_pandas(pdf, 3)\n\n gp = pdf.groupby(\"y\")[\"x\"]\n dp = ddf.groupby(\"y\")[\"x\"]\n assert isinstance(dp.obj, dd.Series)\n assert_eq(dp.obj, gp.obj)\n\n gp = pdf.groupby(pdf.y)[\"x\"]\n dp = ddf.groupby(ddf.y)[\"x\"]\n assert isinstance(dp.obj, dd.Series)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_internal_repr_test_groupby_internal_repr.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_internal_repr_test_groupby_internal_repr.None_2", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 63, "end_line": 98, "span_ids": ["test_groupby_internal_repr"], "tokens": 376}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_groupby_internal_repr():\n pdf = pd.DataFrame({\"x\": [0, 1, 2, 3, 4, 6, 7, 8, 9, 10], \"y\": list(\"abcbabbcda\")})\n ddf = dd.from_pandas(pdf, 3)\n\n gp = pdf.groupby(\"y\")\n dp = ddf.groupby(\"y\")\n assert isinstance(dp, dd.groupby.DataFrameGroupBy)\n assert isinstance(dp._meta, pd.core.groupby.DataFrameGroupBy)\n assert isinstance(dp.obj, dd.DataFrame)\n assert_eq(dp.obj, gp.obj)\n\n gp = pdf.groupby(\"y\")[\"x\"]\n dp = ddf.groupby(\"y\")[\"x\"]\n assert isinstance(dp, dd.groupby.SeriesGroupBy)\n assert isinstance(dp._meta, pd.core.groupby.SeriesGroupBy)\n\n gp = pdf.groupby(\"y\")[[\"x\"]]\n dp = ddf.groupby(\"y\")[[\"x\"]]\n assert isinstance(dp, dd.groupby.DataFrameGroupBy)\n assert isinstance(dp._meta, pd.core.groupby.DataFrameGroupBy)\n # slicing should not affect to internal\n assert isinstance(dp.obj, dd.DataFrame)\n assert_eq(dp.obj, gp.obj)\n\n gp = pdf.groupby(pdf.y)[\"x\"]\n dp = ddf.groupby(ddf.y)[\"x\"]\n assert isinstance(dp, dd.groupby.SeriesGroupBy)\n assert isinstance(dp._meta, pd.core.groupby.SeriesGroupBy)\n\n gp = pdf.groupby(pdf.y)[[\"x\"]]\n dp = ddf.groupby(ddf.y)[[\"x\"]]\n assert isinstance(dp, dd.groupby.DataFrameGroupBy)\n assert isinstance(dp._meta, pd.core.groupby.DataFrameGroupBy)\n # slicing should not affect to internal\n assert isinstance(dp.obj, dd.DataFrame)\n assert_eq(dp.obj, gp.obj)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_error_test_groupby_error.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_error_test_groupby_error.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 101, "end_line": 121, "span_ids": ["test_groupby_error"], "tokens": 177}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_groupby_error():\n pdf = pd.DataFrame({\"x\": [0, 1, 2, 3, 4, 6, 7, 8, 9, 10], \"y\": list(\"abcbabbcda\")})\n ddf = dd.from_pandas(pdf, 3)\n\n with pytest.raises(KeyError):\n ddf.groupby(\"A\")\n\n with pytest.raises(KeyError):\n ddf.groupby([\"x\", \"A\"])\n\n dp = ddf.groupby(\"y\")\n\n msg = \"Column not found: \"\n with pytest.raises(KeyError) as err:\n dp[\"A\"]\n assert msg in str(err.value)\n\n msg = \"Columns not found: \"\n with pytest.raises(KeyError) as err:\n dp[[\"x\", \"A\"]]\n assert msg in str(err.value)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_full_groupby_test_full_groupby.with_warnings_catch_warni.assert_eq_df_groupby_a_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_full_groupby_test_full_groupby.with_warnings_catch_warni.assert_eq_df_groupby_a_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 124, "end_line": 142, "span_ids": ["test_full_groupby"], "tokens": 233}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_full_groupby():\n df = pd.DataFrame(\n {\"a\": [1, 2, 3, 4, 5, 6, 7, 8, 9], \"b\": [4, 5, 6, 3, 2, 1, 0, 0, 0]},\n index=[0, 1, 3, 5, 6, 8, 9, 9, 9],\n )\n ddf = dd.from_pandas(df, npartitions=3)\n\n pytest.raises(KeyError, lambda: ddf.groupby(\"does_not_exist\"))\n pytest.raises(AttributeError, lambda: ddf.groupby(\"a\").does_not_exist)\n assert \"b\" in dir(ddf.groupby(\"a\"))\n\n def func(df):\n return df.assign(b=df.b - df.b.mean())\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n assert ddf.groupby(\"a\").apply(func)._name.startswith(\"func\")\n\n assert_eq(df.groupby(\"a\").apply(func), ddf.groupby(\"a\").apply(func))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_full_groupby_apply_multiarg_test_full_groupby_apply_multiarg.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_full_groupby_apply_multiarg_test_full_groupby_apply_multiarg.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 145, "end_line": 211, "span_ids": ["test_full_groupby_apply_multiarg"], "tokens": 626}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_full_groupby_apply_multiarg():\n df = pd.DataFrame(\n {\"a\": [1, 2, 3, 4, 5, 6, 7, 8, 9], \"b\": [4, 5, 6, 3, 2, 1, 0, 0, 0]},\n index=[0, 1, 3, 5, 6, 8, 9, 9, 9],\n )\n ddf = dd.from_pandas(df, npartitions=3)\n\n def func(df, c, d=3):\n return df.assign(b=df.b - df.b.mean() + c * d)\n\n c = df.a.sum()\n d = df.b.mean()\n\n c_scalar = ddf.a.sum()\n d_scalar = ddf.b.mean()\n c_delayed = dask.delayed(lambda: c)()\n d_delayed = dask.delayed(lambda: d)()\n\n meta = df.groupby(\"a\").apply(func, c)\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n assert_eq(\n df.groupby(\"a\").apply(func, c, d=d),\n ddf.groupby(\"a\").apply(func, c, d=d_scalar),\n )\n\n assert_eq(df.groupby(\"a\").apply(func, c), ddf.groupby(\"a\").apply(func, c))\n\n assert_eq(\n df.groupby(\"a\").apply(func, c, d=d), ddf.groupby(\"a\").apply(func, c, d=d)\n )\n\n assert_eq(\n df.groupby(\"a\").apply(func, c),\n ddf.groupby(\"a\").apply(func, c_scalar),\n check_dtype=False,\n )\n\n assert_eq(\n df.groupby(\"a\").apply(func, c),\n ddf.groupby(\"a\").apply(func, c_scalar, meta=meta),\n )\n\n assert_eq(\n df.groupby(\"a\").apply(func, c, d=d),\n ddf.groupby(\"a\").apply(func, c, d=d_scalar, meta=meta),\n )\n\n # Delayed arguments work, but only if metadata is provided\n with pytest.raises(ValueError) as exc:\n ddf.groupby(\"a\").apply(func, c, d=d_delayed)\n assert \"dask.delayed\" in str(exc.value) and \"meta\" in str(exc.value)\n\n with pytest.raises(ValueError) as exc:\n ddf.groupby(\"a\").apply(func, c_delayed, d=d)\n assert \"dask.delayed\" in str(exc.value) and \"meta\" in str(exc.value)\n\n assert_eq(\n df.groupby(\"a\").apply(func, c),\n ddf.groupby(\"a\").apply(func, c_delayed, meta=meta),\n )\n\n assert_eq(\n df.groupby(\"a\").apply(func, c, d=d),\n ddf.groupby(\"a\").apply(func, c, d=d_delayed, meta=meta),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_full_groupby_multilevel_test_groupby_dir.assert_b_c_d_e_not_in_d": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_full_groupby_multilevel_test_groupby_dir.assert_b_c_d_e_not_in_d", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 214, "end_line": 259, "span_ids": ["test_groupby_dir", "test_full_groupby_multilevel"], "tokens": 448}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"grouper\",\n [\n lambda df: [\"a\"],\n lambda df: [\"a\", \"b\"],\n lambda df: df[\"a\"],\n lambda df: [df[\"a\"], df[\"b\"]],\n pytest.param(\n lambda df: [df[\"a\"] > 2, df[\"b\"] > 1],\n marks=pytest.mark.xfail(reason=\"not yet supported\"),\n ),\n ],\n)\n@pytest.mark.parametrize(\"reverse\", [True, False])\ndef test_full_groupby_multilevel(grouper, reverse):\n index = [0, 1, 3, 5, 6, 8, 9, 9, 9]\n if reverse:\n index = index[::-1]\n df = pd.DataFrame(\n {\n \"a\": [1, 2, 3, 4, 5, 6, 7, 8, 9],\n \"d\": [1, 2, 3, 4, 5, 6, 7, 8, 9],\n \"b\": [4, 5, 6, 3, 2, 1, 0, 0, 0],\n },\n index=index,\n )\n ddf = dd.from_pandas(df, npartitions=3)\n\n def func(df):\n return df.assign(b=df.b - df.b.mean())\n\n # last one causes a DeprecationWarning from pandas.\n # See https://github.com/pandas-dev/pandas/issues/16481\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n assert_eq(\n df.groupby(grouper(df)).apply(func), ddf.groupby(grouper(ddf)).apply(func)\n )\n\n\ndef test_groupby_dir():\n df = pd.DataFrame({\"a\": range(10), \"b c d e\": range(10)})\n ddf = dd.from_pandas(df, npartitions=2)\n g = ddf.groupby(\"a\")\n assert \"a\" in dir(g)\n assert \"b c d e\" not in dir(g)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_on_index_test_groupby_on_index.with_dask_config_set_sche.with_pytest_warns_None_.None_4": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_on_index_test_groupby_on_index.with_dask_config_set_sche.with_pytest_warns_None_.None_4", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 262, "end_line": 304, "span_ids": ["test_groupby_on_index"], "tokens": 400}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"scheduler\", [\"sync\", \"threads\"])\ndef test_groupby_on_index(scheduler):\n pdf = pd.DataFrame(\n {\"a\": [1, 2, 3, 4, 5, 6, 7, 8, 9], \"b\": [4, 5, 6, 3, 2, 1, 0, 0, 0]},\n index=[0, 1, 3, 5, 6, 8, 9, 9, 9],\n )\n ddf = dd.from_pandas(pdf, npartitions=3)\n\n ddf2 = ddf.set_index(\"a\")\n pdf2 = pdf.set_index(\"a\")\n assert_eq(ddf.groupby(\"a\").b.mean(), ddf2.groupby(ddf2.index).b.mean())\n\n def func(df):\n return df.assign(b=df.b - df.b.mean())\n\n def func2(df):\n return df[[\"b\"]] - df[[\"b\"]].mean()\n\n def func3(df):\n return df.mean()\n\n with dask.config.set(scheduler=scheduler):\n with pytest.warns(None):\n assert_eq(ddf.groupby(\"a\").apply(func), pdf.groupby(\"a\").apply(func))\n\n assert_eq(\n ddf.groupby(\"a\").apply(func).set_index(\"a\"),\n pdf.groupby(\"a\").apply(func).set_index(\"a\"),\n )\n\n assert_eq(\n pdf2.groupby(pdf2.index).apply(func2),\n ddf2.groupby(ddf2.index).apply(func2),\n )\n\n assert_eq(\n ddf2.b.groupby(\"a\").apply(func3), pdf2.b.groupby(\"a\").apply(func3)\n )\n\n assert_eq(\n ddf2.b.groupby(ddf2.index).apply(func3),\n pdf2.b.groupby(pdf2.index).apply(func3),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_multilevel_getitem_test_groupby_multilevel_getitem.if_agg_func_mean_.else_.assert_eq_a_b_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_multilevel_getitem_test_groupby_multilevel_getitem.if_agg_func_mean_.else_.assert_eq_a_b_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 307, "end_line": 359, "span_ids": ["test_groupby_multilevel_getitem"], "tokens": 475}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"grouper\",\n [\n lambda df: df.groupby(\"a\")[\"b\"],\n lambda df: df.groupby([\"a\", \"b\"]),\n lambda df: df.groupby([\"a\", \"b\"])[\"c\"],\n lambda df: df.groupby(df[\"a\"])[[\"b\", \"c\"]],\n lambda df: df.groupby(\"a\")[[\"b\", \"c\"]],\n lambda df: df.groupby(\"a\")[[\"b\"]],\n lambda df: df.groupby([\"a\", \"b\", \"c\"]),\n ],\n)\ndef test_groupby_multilevel_getitem(grouper, agg_func):\n # nunique is not implemented for DataFrameGroupBy\n if agg_func == \"nunique\":\n return\n\n df = pd.DataFrame(\n {\n \"a\": [1, 2, 3, 1, 2, 3],\n \"b\": [1, 2, 1, 4, 2, 1],\n \"c\": [1, 3, 2, 1, 1, 2],\n \"d\": [1, 2, 1, 1, 2, 2],\n }\n )\n ddf = dd.from_pandas(df, 2)\n\n dask_group = grouper(ddf)\n pandas_group = grouper(df)\n\n # covariance/correlation only works with N+1 columns\n if isinstance(pandas_group, pd.core.groupby.SeriesGroupBy) and agg_func in (\n \"cov\",\n \"corr\",\n ):\n return\n\n dask_agg = getattr(dask_group, agg_func)\n pandas_agg = getattr(pandas_group, agg_func)\n\n assert isinstance(dask_group, dd.groupby._GroupBy)\n assert isinstance(pandas_group, pd.core.groupby.GroupBy)\n\n if agg_func == \"mean\":\n assert_eq(dask_agg(), pandas_agg().astype(float))\n else:\n a = dask_agg()\n with warnings.catch_warnings():\n # pandas does `.cov([[1], [1]])` which numpy warns on (all NaN).\n # Pandas does strange things with exceptions in groupby.\n warnings.simplefilter(\"ignore\", RuntimeWarning)\n b = pandas_agg()\n assert_eq(a, b)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_multilevel_agg_test_groupby_multilevel_agg.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_multilevel_agg_test_groupby_multilevel_agg.None_2", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 362, "end_line": 383, "span_ids": ["test_groupby_multilevel_agg"], "tokens": 227}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_groupby_multilevel_agg():\n df = pd.DataFrame(\n {\n \"a\": [1, 2, 3, 1, 2, 3],\n \"b\": [1, 2, 1, 4, 2, 1],\n \"c\": [1, 3, 2, 1, 1, 2],\n \"d\": [1, 2, 1, 1, 2, 2],\n }\n )\n ddf = dd.from_pandas(df, 2)\n\n sol = df.groupby([\"a\"]).mean()\n res = ddf.groupby([\"a\"]).mean()\n assert_eq(res, sol)\n\n sol = df.groupby([\"a\", \"c\"]).mean()\n res = ddf.groupby([\"a\", \"c\"]).mean()\n assert_eq(res, sol)\n\n sol = df.groupby([df[\"a\"], df[\"c\"]]).mean()\n res = ddf.groupby([ddf[\"a\"], ddf[\"c\"]]).mean()\n assert_eq(res, sol)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_get_group_test_groupby_get_group.for_ddkey_pdkey_in_b_.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_get_group_test_groupby_get_group.for_ddkey_pdkey_in_b_.None_3", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 386, "end_line": 404, "span_ids": ["test_groupby_get_group"], "tokens": 329}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_groupby_get_group():\n dsk = {\n (\"x\", 0): pd.DataFrame({\"a\": [1, 2, 6], \"b\": [4, 2, 7]}, index=[0, 1, 3]),\n (\"x\", 1): pd.DataFrame({\"a\": [4, 2, 6], \"b\": [3, 3, 1]}, index=[5, 6, 8]),\n (\"x\", 2): pd.DataFrame({\"a\": [4, 3, 7], \"b\": [1, 1, 3]}, index=[9, 9, 9]),\n }\n meta = dsk[(\"x\", 0)]\n d = dd.DataFrame(dsk, \"x\", meta, [0, 4, 9, 9])\n full = d.compute()\n\n for ddkey, pdkey in [(\"b\", \"b\"), (d.b, full.b), (d.b + 1, full.b + 1)]:\n ddgrouped = d.groupby(ddkey)\n pdgrouped = full.groupby(pdkey)\n # DataFrame\n assert_eq(ddgrouped.get_group(2), pdgrouped.get_group(2))\n assert_eq(ddgrouped.get_group(3), pdgrouped.get_group(3))\n # Series\n assert_eq(ddgrouped.a.get_group(3), pdgrouped.a.get_group(3))\n assert_eq(ddgrouped.a.get_group(2), pdgrouped.a.get_group(2))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_dataframe_groupby_nunique_test_series_groupby_propagates_names.assert_eq_result_expecte": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_dataframe_groupby_nunique_test_series_groupby_propagates_names.assert_eq_result_expecte", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 407, "end_line": 432, "span_ids": ["test_dataframe_groupby_nunique_across_group_same_value", "test_dataframe_groupby_nunique", "test_series_groupby_propagates_names"], "tokens": 291}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_dataframe_groupby_nunique():\n strings = list(\"aaabbccccdddeee\")\n data = np.random.randn(len(strings))\n ps = pd.DataFrame(dict(strings=strings, data=data))\n s = dd.from_pandas(ps, npartitions=3)\n expected = ps.groupby(\"strings\")[\"data\"].nunique()\n assert_eq(s.groupby(\"strings\")[\"data\"].nunique(), expected)\n\n\ndef test_dataframe_groupby_nunique_across_group_same_value():\n strings = list(\"aaabbccccdddeee\")\n data = list(map(int, \"123111223323412\"))\n ps = pd.DataFrame(dict(strings=strings, data=data))\n s = dd.from_pandas(ps, npartitions=3)\n expected = ps.groupby(\"strings\")[\"data\"].nunique()\n assert_eq(s.groupby(\"strings\")[\"data\"].nunique(), expected)\n\n\ndef test_series_groupby_propagates_names():\n df = pd.DataFrame({\"x\": [1, 2, 3], \"y\": [4, 5, 6]})\n ddf = dd.from_pandas(df, 2)\n func = lambda df: df[\"y\"].sum()\n with pytest.warns(UserWarning): # meta inference\n result = ddf.groupby(\"x\").apply(func)\n expected = df.groupby(\"x\").apply(func)\n assert_eq(result, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_series_groupby_test_series_groupby.for_dg_pdg_in_dask_gro.assert_eq_dg_prod_pdg_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_series_groupby_test_series_groupby.for_dg_pdg_in_dask_gro.assert_eq_dg_prod_pdg_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 435, "end_line": 453, "span_ids": ["test_series_groupby"], "tokens": 197}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_series_groupby():\n s = pd.Series([1, 2, 2, 1, 1])\n pd_group = s.groupby(s)\n\n ss = dd.from_pandas(s, npartitions=2)\n dask_group = ss.groupby(ss)\n\n pd_group2 = s.groupby(s + 1)\n dask_group2 = ss.groupby(ss + 1)\n\n for dg, pdg in [(dask_group, pd_group), (pd_group2, dask_group2)]:\n assert_eq(dg.count(), pdg.count())\n assert_eq(dg.sum(), pdg.sum())\n assert_eq(dg.min(), pdg.min())\n assert_eq(dg.max(), pdg.max())\n assert_eq(dg.size(), pdg.size())\n assert_eq(dg.first(), pdg.first())\n assert_eq(dg.last(), pdg.last())\n assert_eq(dg.prod(), pdg.prod())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_series_groupby_errors_test_series_groupby_errors.None_4._dask_should_raise_the_s": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_series_groupby_errors_test_series_groupby_errors.None_4._dask_should_raise_the_s", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 456, "end_line": 476, "span_ids": ["test_series_groupby_errors"], "tokens": 179}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_series_groupby_errors():\n s = pd.Series([1, 2, 2, 1, 1])\n\n ss = dd.from_pandas(s, npartitions=2)\n\n msg = \"No group keys passed!\"\n with pytest.raises(ValueError) as err:\n s.groupby([]) # pandas\n assert msg in str(err.value)\n with pytest.raises(ValueError) as err:\n ss.groupby([]) # dask should raise the same error\n assert msg in str(err.value)\n\n sss = dd.from_pandas(s, npartitions=5)\n with pytest.raises(NotImplementedError):\n ss.groupby(sss)\n\n with pytest.raises(KeyError):\n s.groupby(\"x\") # pandas\n with pytest.raises(KeyError):\n ss.groupby(\"x\") # dask should raise the same error", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_index_array_test_groupby_set_index.pytest_raises_TypeError_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_index_array_test_groupby_set_index.pytest_raises_TypeError_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 479, "end_line": 501, "span_ids": ["test_groupby_set_index", "test_groupby_index_array"], "tokens": 170}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_groupby_index_array():\n df = _compat.makeTimeDataFrame()\n ddf = dd.from_pandas(df, npartitions=2)\n\n # first select column, then group\n assert_eq(\n df.A.groupby(df.index.month).nunique(),\n ddf.A.groupby(ddf.index.month).nunique(),\n check_names=False,\n )\n\n # first group, then select column\n assert_eq(\n df.groupby(df.index.month).A.nunique(),\n ddf.groupby(ddf.index.month).A.nunique(),\n check_names=False,\n )\n\n\ndef test_groupby_set_index():\n df = _compat.makeTimeDataFrame()\n ddf = dd.from_pandas(df, npartitions=2)\n pytest.raises(TypeError, lambda: ddf.groupby(df.index.month, as_index=False))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_split_apply_combine_on_series_test_split_apply_combine_on_series.for_ddkey_pdkey_in_b_.None_1.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_split_apply_combine_on_series_test_split_apply_combine_on_series.for_ddkey_pdkey_in_b_.None_1.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 504, "end_line": 555, "span_ids": ["test_split_apply_combine_on_series"], "tokens": 740}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"empty\", [True, False])\n@pytest.mark.filterwarnings(\n \"ignore:0 should be:DeprecationWarning\"\n) # fixed in new pandas.\ndef test_split_apply_combine_on_series(empty):\n if empty:\n pdf = pd.DataFrame({\"a\": [1.0], \"b\": [1.0]}, index=[0]).iloc[:0]\n # There's a bug in pandas where df.groupby(...).var(ddof=0) results in\n # no columns. Just skip these checks for now.\n ddofs = []\n else:\n ddofs = [0, 1, 2]\n pdf = pd.DataFrame(\n {\"a\": [1, 2, 6, 4, 4, 6, 4, 3, 7], \"b\": [4, 2, 7, 3, 3, 1, 1, 1, 2]},\n index=[0, 1, 3, 5, 6, 8, 9, 9, 9],\n )\n ddf = dd.from_pandas(pdf, npartitions=3)\n\n for ddkey, pdkey in [(\"b\", \"b\"), (ddf.b, pdf.b), (ddf.b + 1, pdf.b + 1)]:\n assert_eq(ddf.groupby(ddkey).a.min(), pdf.groupby(pdkey).a.min())\n assert_eq(ddf.groupby(ddkey).a.max(), pdf.groupby(pdkey).a.max())\n assert_eq(ddf.groupby(ddkey).a.count(), pdf.groupby(pdkey).a.count())\n assert_eq(ddf.groupby(ddkey).a.mean(), pdf.groupby(pdkey).a.mean())\n assert_eq(ddf.groupby(ddkey).a.nunique(), pdf.groupby(pdkey).a.nunique())\n assert_eq(ddf.groupby(ddkey).a.size(), pdf.groupby(pdkey).a.size())\n assert_eq(ddf.groupby(ddkey).a.first(), pdf.groupby(pdkey).a.first())\n assert_eq(ddf.groupby(ddkey).a.last(), pdf.groupby(pdkey).a.last())\n for ddof in ddofs:\n assert_eq(ddf.groupby(ddkey).a.var(ddof), pdf.groupby(pdkey).a.var(ddof))\n assert_eq(ddf.groupby(ddkey).a.std(ddof), pdf.groupby(pdkey).a.std(ddof))\n\n assert_eq(ddf.groupby(ddkey).sum(), pdf.groupby(pdkey).sum())\n assert_eq(ddf.groupby(ddkey).min(), pdf.groupby(pdkey).min())\n assert_eq(ddf.groupby(ddkey).max(), pdf.groupby(pdkey).max())\n assert_eq(ddf.groupby(ddkey).count(), pdf.groupby(pdkey).count())\n assert_eq(ddf.groupby(ddkey).mean(), pdf.groupby(pdkey).mean())\n assert_eq(ddf.groupby(ddkey).size(), pdf.groupby(pdkey).size())\n assert_eq(ddf.groupby(ddkey).first(), pdf.groupby(pdkey).first())\n assert_eq(ddf.groupby(ddkey).last(), pdf.groupby(pdkey).last())\n assert_eq(ddf.groupby(ddkey).prod(), pdf.groupby(pdkey).prod())\n\n for ddof in ddofs:\n assert_eq(\n ddf.groupby(ddkey).var(ddof),\n pdf.groupby(pdkey).var(ddof),\n check_dtype=False,\n )\n assert_eq(\n ddf.groupby(ddkey).std(ddof),\n pdf.groupby(pdkey).std(ddof),\n check_dtype=False,\n )\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_split_apply_combine_on_series.for_ddkey_pdkey_in_ddf_test_split_apply_combine_on_series.for_ddkey_pdkey_in_ddf.for_ddof_in_ddofs_.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_split_apply_combine_on_series.for_ddkey_pdkey_in_ddf_test_split_apply_combine_on_series.for_ddkey_pdkey_in_ddf.for_ddof_in_ddofs_.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 557, "end_line": 591, "span_ids": ["test_split_apply_combine_on_series"], "tokens": 374}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"empty\", [True, False])\n@pytest.mark.filterwarnings(\n \"ignore:0 should be:DeprecationWarning\"\n) # fixed in new pandas.\ndef test_split_apply_combine_on_series(empty):\n # ... other code\n\n for ddkey, pdkey in [(ddf.b, pdf.b), (ddf.b + 1, pdf.b + 1)]:\n assert_eq(\n ddf.a.groupby(ddkey).sum(), pdf.a.groupby(pdkey).sum(), check_names=False\n )\n assert_eq(\n ddf.a.groupby(ddkey).max(), pdf.a.groupby(pdkey).max(), check_names=False\n )\n assert_eq(\n ddf.a.groupby(ddkey).count(),\n pdf.a.groupby(pdkey).count(),\n check_names=False,\n )\n assert_eq(\n ddf.a.groupby(ddkey).mean(), pdf.a.groupby(pdkey).mean(), check_names=False\n )\n assert_eq(\n ddf.a.groupby(ddkey).nunique(),\n pdf.a.groupby(pdkey).nunique(),\n check_names=False,\n )\n assert_eq(\n ddf.a.groupby(ddkey).first(),\n pdf.a.groupby(pdkey).first(),\n check_names=False,\n )\n assert_eq(\n ddf.a.groupby(ddkey).last(), pdf.a.groupby(pdkey).last(), check_names=False\n )\n assert_eq(\n ddf.a.groupby(ddkey).prod(), pdf.a.groupby(pdkey).prod(), check_names=False\n )\n\n for ddof in ddofs:\n assert_eq(ddf.a.groupby(ddkey).var(ddof), pdf.a.groupby(pdkey).var(ddof))\n assert_eq(ddf.a.groupby(ddkey).std(ddof), pdf.a.groupby(pdkey).std(ddof))\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_split_apply_combine_on_series.for_i_in_0_4_7__test_split_apply_combine_on_series.for_i_in_0_4_7_.for_ddof_in_ddofs_.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_split_apply_combine_on_series.for_i_in_0_4_7__test_split_apply_combine_on_series.for_i_in_0_4_7_.for_ddof_in_ddofs_.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 593, "end_line": 643, "span_ids": ["test_split_apply_combine_on_series"], "tokens": 1035}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"empty\", [True, False])\n@pytest.mark.filterwarnings(\n \"ignore:0 should be:DeprecationWarning\"\n) # fixed in new pandas.\ndef test_split_apply_combine_on_series(empty):\n # ... other code\n\n for i in [0, 4, 7]:\n assert_eq(ddf.groupby(ddf.b > i).a.sum(), pdf.groupby(pdf.b > i).a.sum())\n assert_eq(ddf.groupby(ddf.b > i).a.min(), pdf.groupby(pdf.b > i).a.min())\n assert_eq(ddf.groupby(ddf.b > i).a.max(), pdf.groupby(pdf.b > i).a.max())\n assert_eq(ddf.groupby(ddf.b > i).a.count(), pdf.groupby(pdf.b > i).a.count())\n assert_eq(ddf.groupby(ddf.b > i).a.mean(), pdf.groupby(pdf.b > i).a.mean())\n assert_eq(\n ddf.groupby(ddf.b > i).a.nunique(), pdf.groupby(pdf.b > i).a.nunique()\n )\n assert_eq(ddf.groupby(ddf.b > i).a.size(), pdf.groupby(pdf.b > i).a.size())\n assert_eq(ddf.groupby(ddf.b > i).a.first(), pdf.groupby(pdf.b > i).a.first())\n assert_eq(ddf.groupby(ddf.b > i).a.last(), pdf.groupby(pdf.b > i).a.last())\n assert_eq(ddf.groupby(ddf.b > i).a.prod(), pdf.groupby(pdf.b > i).a.prod())\n\n assert_eq(ddf.groupby(ddf.a > i).b.sum(), pdf.groupby(pdf.a > i).b.sum())\n assert_eq(ddf.groupby(ddf.a > i).b.min(), pdf.groupby(pdf.a > i).b.min())\n assert_eq(ddf.groupby(ddf.a > i).b.max(), pdf.groupby(pdf.a > i).b.max())\n assert_eq(ddf.groupby(ddf.a > i).b.count(), pdf.groupby(pdf.a > i).b.count())\n assert_eq(ddf.groupby(ddf.a > i).b.mean(), pdf.groupby(pdf.a > i).b.mean())\n assert_eq(\n ddf.groupby(ddf.a > i).b.nunique(), pdf.groupby(pdf.a > i).b.nunique()\n )\n assert_eq(ddf.groupby(ddf.b > i).b.size(), pdf.groupby(pdf.b > i).b.size())\n assert_eq(ddf.groupby(ddf.b > i).b.first(), pdf.groupby(pdf.b > i).b.first())\n assert_eq(ddf.groupby(ddf.b > i).b.last(), pdf.groupby(pdf.b > i).b.last())\n assert_eq(ddf.groupby(ddf.b > i).b.prod(), pdf.groupby(pdf.b > i).b.prod())\n\n assert_eq(ddf.groupby(ddf.b > i).sum(), pdf.groupby(pdf.b > i).sum())\n assert_eq(ddf.groupby(ddf.b > i).min(), pdf.groupby(pdf.b > i).min())\n assert_eq(ddf.groupby(ddf.b > i).max(), pdf.groupby(pdf.b > i).max())\n assert_eq(ddf.groupby(ddf.b > i).count(), pdf.groupby(pdf.b > i).count())\n assert_eq(ddf.groupby(ddf.b > i).mean(), pdf.groupby(pdf.b > i).mean())\n assert_eq(ddf.groupby(ddf.b > i).size(), pdf.groupby(pdf.b > i).size())\n assert_eq(ddf.groupby(ddf.b > i).first(), pdf.groupby(pdf.b > i).first())\n assert_eq(ddf.groupby(ddf.b > i).last(), pdf.groupby(pdf.b > i).last())\n assert_eq(ddf.groupby(ddf.b > i).prod(), pdf.groupby(pdf.b > i).prod())\n\n assert_eq(ddf.groupby(ddf.a > i).sum(), pdf.groupby(pdf.a > i).sum())\n assert_eq(ddf.groupby(ddf.a > i).min(), pdf.groupby(pdf.a > i).min())\n assert_eq(ddf.groupby(ddf.a > i).max(), pdf.groupby(pdf.a > i).max())\n assert_eq(ddf.groupby(ddf.a > i).count(), pdf.groupby(pdf.a > i).count())\n assert_eq(ddf.groupby(ddf.a > i).mean(), pdf.groupby(pdf.a > i).mean())\n assert_eq(ddf.groupby(ddf.a > i).size(), pdf.groupby(pdf.a > i).size())\n assert_eq(ddf.groupby(ddf.a > i).first(), pdf.groupby(pdf.a > i).first())\n assert_eq(ddf.groupby(ddf.a > i).last(), pdf.groupby(pdf.a > i).last())\n assert_eq(ddf.groupby(ddf.a > i).prod(), pdf.groupby(pdf.a > i).prod())\n\n for ddof in ddofs:\n assert_eq(\n ddf.groupby(ddf.b > i).std(ddof), pdf.groupby(pdf.b > i).std(ddof)\n )\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_split_apply_combine_on_series.for_ddkey_pdkey_in__test_split_apply_combine_on_series.None_7": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_split_apply_combine_on_series.for_ddkey_pdkey_in__test_split_apply_combine_on_series.None_7", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 645, "end_line": 696, "span_ids": ["test_split_apply_combine_on_series"], "tokens": 803}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"empty\", [True, False])\n@pytest.mark.filterwarnings(\n \"ignore:0 should be:DeprecationWarning\"\n) # fixed in new pandas.\ndef test_split_apply_combine_on_series(empty):\n # ... other code\n\n for ddkey, pdkey in [\n (\"a\", \"a\"),\n (ddf.a, pdf.a),\n (ddf.a + 1, pdf.a + 1),\n (ddf.a > 3, pdf.a > 3),\n ]:\n assert_eq(ddf.groupby(ddkey).b.sum(), pdf.groupby(pdkey).b.sum())\n assert_eq(ddf.groupby(ddkey).b.min(), pdf.groupby(pdkey).b.min())\n assert_eq(ddf.groupby(ddkey).b.max(), pdf.groupby(pdkey).b.max())\n assert_eq(ddf.groupby(ddkey).b.count(), pdf.groupby(pdkey).b.count())\n assert_eq(ddf.groupby(ddkey).b.mean(), pdf.groupby(pdkey).b.mean())\n assert_eq(ddf.groupby(ddkey).b.nunique(), pdf.groupby(pdkey).b.nunique())\n assert_eq(ddf.groupby(ddkey).b.size(), pdf.groupby(pdkey).b.size())\n assert_eq(ddf.groupby(ddkey).b.first(), pdf.groupby(pdkey).b.first())\n assert_eq(ddf.groupby(ddkey).last(), pdf.groupby(pdkey).last())\n assert_eq(ddf.groupby(ddkey).prod(), pdf.groupby(pdkey).prod())\n\n assert_eq(ddf.groupby(ddkey).sum(), pdf.groupby(pdkey).sum())\n assert_eq(ddf.groupby(ddkey).min(), pdf.groupby(pdkey).min())\n assert_eq(ddf.groupby(ddkey).max(), pdf.groupby(pdkey).max())\n assert_eq(ddf.groupby(ddkey).count(), pdf.groupby(pdkey).count())\n assert_eq(ddf.groupby(ddkey).mean(), pdf.groupby(pdkey).mean().astype(float))\n assert_eq(ddf.groupby(ddkey).size(), pdf.groupby(pdkey).size())\n assert_eq(ddf.groupby(ddkey).first(), pdf.groupby(pdkey).first())\n assert_eq(ddf.groupby(ddkey).last(), pdf.groupby(pdkey).last())\n assert_eq(ddf.groupby(ddkey).prod(), pdf.groupby(pdkey).prod())\n\n for ddof in ddofs:\n assert_eq(ddf.groupby(ddkey).b.std(ddof), pdf.groupby(pdkey).b.std(ddof))\n\n assert sorted(ddf.groupby(\"b\").a.sum().dask) == sorted(\n ddf.groupby(\"b\").a.sum().dask\n )\n assert sorted(ddf.groupby(ddf.a > 3).b.mean().dask) == sorted(\n ddf.groupby(ddf.a > 3).b.mean().dask\n )\n\n # test raises with incorrect key\n pytest.raises(KeyError, lambda: ddf.groupby(\"x\"))\n pytest.raises(KeyError, lambda: ddf.groupby([\"a\", \"x\"]))\n pytest.raises(KeyError, lambda: ddf.groupby(\"a\")[\"x\"])\n with warnings.catch_warnings():\n # pandas warns about using tuples before throwing the KeyError\n warnings.simplefilter(\"ignore\", FutureWarning)\n pytest.raises(KeyError, lambda: ddf.groupby(\"a\")[\"b\", \"x\"])\n pytest.raises(KeyError, lambda: ddf.groupby(\"a\")[[\"b\", \"x\"]])\n\n # test graph node labels\n assert_dask_graph(ddf.groupby(\"b\").a.sum(), \"series-groupby-sum\")\n assert_dask_graph(ddf.groupby(\"b\").a.min(), \"series-groupby-min\")\n assert_dask_graph(ddf.groupby(\"b\").a.max(), \"series-groupby-max\")\n assert_dask_graph(ddf.groupby(\"b\").a.count(), \"series-groupby-count\")\n # mean consists from sum and count operations\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_split_apply_combine_on_series.None_8_test_split_apply_combine_on_series.None_26": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_split_apply_combine_on_series.None_8_test_split_apply_combine_on_series.None_26", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 697, "end_line": 718, "span_ids": ["test_split_apply_combine_on_series"], "tokens": 485}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"empty\", [True, False])\n@pytest.mark.filterwarnings(\n \"ignore:0 should be:DeprecationWarning\"\n) # fixed in new pandas.\ndef test_split_apply_combine_on_series(empty):\n # ... other code\n pytest.raises(KeyError, lambda: ddf.groupby(\"a\")[[\"b\", \"x\"]])\n # ... other code\n assert_dask_graph(ddf.groupby(\"b\").a.var(), \"series-groupby-var\")\n assert_dask_graph(ddf.groupby(\"b\").a.cov(), \"series-groupby-cov\")\n assert_dask_graph(ddf.groupby(\"b\").a.first(), \"series-groupby-first\")\n assert_dask_graph(ddf.groupby(\"b\").a.last(), \"series-groupby-last\")\n assert_dask_graph(ddf.groupby(\"b\").a.prod(), \"series-groupby-prod\")\n # mean consists from sum and count operations\n assert_dask_graph(ddf.groupby(\"b\").a.mean(), \"series-groupby-sum\")\n assert_dask_graph(ddf.groupby(\"b\").a.mean(), \"series-groupby-count\")\n assert_dask_graph(ddf.groupby(\"b\").a.nunique(), \"series-groupby-nunique\")\n assert_dask_graph(ddf.groupby(\"b\").a.size(), \"series-groupby-size\")\n\n assert_dask_graph(ddf.groupby(\"b\").sum(), \"dataframe-groupby-sum\")\n assert_dask_graph(ddf.groupby(\"b\").min(), \"dataframe-groupby-min\")\n assert_dask_graph(ddf.groupby(\"b\").max(), \"dataframe-groupby-max\")\n assert_dask_graph(ddf.groupby(\"b\").count(), \"dataframe-groupby-count\")\n assert_dask_graph(ddf.groupby(\"b\").first(), \"dataframe-groupby-first\")\n assert_dask_graph(ddf.groupby(\"b\").last(), \"dataframe-groupby-last\")\n assert_dask_graph(ddf.groupby(\"b\").prod(), \"dataframe-groupby-prod\")\n # mean consists from sum and count operations\n assert_dask_graph(ddf.groupby(\"b\").mean(), \"dataframe-groupby-sum\")\n assert_dask_graph(ddf.groupby(\"b\").mean(), \"dataframe-groupby-count\")\n assert_dask_graph(ddf.groupby(\"b\").size(), \"dataframe-groupby-size\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_reduction_split_test_groupby_reduction_split.assert_call_ddf_a_groupby": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_reduction_split_test_groupby_reduction_split.assert_call_ddf_a_groupby", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 721, "end_line": 778, "span_ids": ["test_groupby_reduction_split"], "tokens": 714}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"keyword\", [\"split_every\", \"split_out\"])\ndef test_groupby_reduction_split(keyword):\n pdf = pd.DataFrame(\n {\"a\": [1, 2, 6, 4, 4, 6, 4, 3, 7] * 100, \"b\": [4, 2, 7, 3, 3, 1, 1, 1, 2] * 100}\n )\n ddf = dd.from_pandas(pdf, npartitions=15)\n\n def call(g, m, **kwargs):\n return getattr(g, m)(**kwargs)\n\n # DataFrame\n for m in AGG_FUNCS:\n # nunique is not implemented for DataFrameGroupBy\n # covariance/correlation is not a series aggregation\n if m in (\"nunique\", \"cov\", \"corr\"):\n continue\n res = call(ddf.groupby(\"b\"), m, **{keyword: 2})\n sol = call(pdf.groupby(\"b\"), m)\n assert_eq(res, sol)\n assert call(ddf.groupby(\"b\"), m)._name != res._name\n\n res = call(ddf.groupby(\"b\"), \"var\", ddof=2, **{keyword: 2})\n sol = call(pdf.groupby(\"b\"), \"var\", ddof=2)\n assert_eq(res, sol)\n assert call(ddf.groupby(\"b\"), \"var\", ddof=2)._name != res._name\n\n # Series, post select\n for m in AGG_FUNCS:\n # covariance/correlation is not a series aggregation\n if m in (\"cov\", \"corr\"):\n continue\n res = call(ddf.groupby(\"b\").a, m, **{keyword: 2})\n sol = call(pdf.groupby(\"b\").a, m)\n assert_eq(res, sol)\n assert call(ddf.groupby(\"b\").a, m)._name != res._name\n\n res = call(ddf.groupby(\"b\").a, \"var\", ddof=2, **{keyword: 2})\n sol = call(pdf.groupby(\"b\").a, \"var\", ddof=2)\n assert_eq(res, sol)\n assert call(ddf.groupby(\"b\").a, \"var\", ddof=2)._name != res._name\n\n # Series, pre select\n for m in AGG_FUNCS:\n # covariance/correlation is not a series aggregation\n if m in (\"cov\", \"corr\"):\n continue\n res = call(ddf.a.groupby(ddf.b), m, **{keyword: 2})\n sol = call(pdf.a.groupby(pdf.b), m)\n # There's a bug in pandas 0.18.0 with `pdf.a.groupby(pdf.b).count()`\n # not forwarding the series name. Skip name checks here for now.\n assert_eq(res, sol, check_names=False)\n assert call(ddf.a.groupby(ddf.b), m)._name != res._name\n\n res = call(ddf.a.groupby(ddf.b), \"var\", ddof=2, **{keyword: 2})\n sol = call(pdf.a.groupby(pdf.b), \"var\", ddof=2)\n\n assert_eq(res, sol)\n assert call(ddf.a.groupby(ddf.b), \"var\", ddof=2)._name != res._name", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_apply_or_transform_shuffle_test_apply_or_transform_shuffle.with_pytest_warns_UserWar.assert_eq_func_grouped_pd": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_apply_or_transform_shuffle_test_apply_or_transform_shuffle.with_pytest_warns_UserWar.assert_eq_func_grouped_pd", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 781, "end_line": 820, "span_ids": ["test_apply_or_transform_shuffle"], "tokens": 348}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"grouped\",\n [\n lambda df: df.groupby(\"A\"),\n lambda df: df.groupby(df[\"A\"]),\n lambda df: df.groupby(df[\"A\"] + 1),\n lambda df: df.groupby(\"A\")[\"B\"],\n # SeriesGroupBy:\n lambda df: df.groupby(\"A\")[\"B\"],\n lambda df: df.groupby(df[\"A\"])[\"B\"],\n lambda df: df.groupby(df[\"A\"] + 1)[\"B\"],\n # Series.groupby():\n lambda df: df.B.groupby(df[\"A\"]),\n lambda df: df.B.groupby(df[\"A\"] + 1),\n # DataFrameGroupBy with column slice:\n lambda df: df.groupby(\"A\")[[\"B\", \"C\"]],\n lambda df: df.groupby(df[\"A\"])[[\"B\", \"C\"]],\n lambda df: df.groupby(df[\"A\"] + 1)[[\"B\", \"C\"]],\n ],\n)\n@pytest.mark.parametrize(\n \"func\",\n [\n lambda grp: grp.apply(lambda x: x.sum()),\n lambda grp: grp.transform(lambda x: x.sum()),\n ],\n)\ndef test_apply_or_transform_shuffle(grouped, func):\n pdf = pd.DataFrame(\n {\n \"A\": [1, 2, 3, 4] * 5,\n \"B\": np.random.randn(20),\n \"C\": np.random.randn(20),\n \"D\": np.random.randn(20),\n }\n )\n ddf = dd.from_pandas(pdf, 3)\n\n with pytest.warns(UserWarning): # meta inference\n assert_eq(func(grouped(pdf)), func(grouped(ddf)))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_apply_or_transform_shuffle_multilevel_test_apply_or_transform_shuffle_multilevel.with_pytest_warns_UserWar.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_apply_or_transform_shuffle_multilevel_test_apply_or_transform_shuffle_multilevel.with_pytest_warns_UserWar.None_2", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 823, "end_line": 869, "span_ids": ["test_apply_or_transform_shuffle_multilevel"], "tokens": 371}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"grouper\",\n [\n lambda df: \"AA\",\n lambda df: [\"AA\", \"AB\"],\n lambda df: df[\"AA\"],\n lambda df: [df[\"AA\"], df[\"AB\"]],\n lambda df: df[\"AA\"] + 1,\n pytest.param(\n lambda df: [df[\"AA\"] + 1, df[\"AB\"] + 1],\n marks=pytest.mark.xfail(\"NotImplemented\"),\n ),\n ],\n)\n@pytest.mark.parametrize(\n \"func\",\n [\n lambda grouped: grouped.apply(lambda x: x.sum()),\n lambda grouped: grouped.transform(lambda x: x.sum()),\n ],\n)\ndef test_apply_or_transform_shuffle_multilevel(grouper, func):\n pdf = pd.DataFrame(\n {\n \"AB\": [1, 2, 3, 4] * 5,\n \"AA\": [1, 2, 3, 4] * 5,\n \"B\": np.random.randn(20),\n \"C\": np.random.randn(20),\n \"D\": np.random.randn(20),\n }\n )\n ddf = dd.from_pandas(pdf, 3)\n\n with pytest.warns(UserWarning):\n # DataFrameGroupBy\n assert_eq(func(ddf.groupby(grouper(ddf))), func(pdf.groupby(grouper(pdf))))\n\n # SeriesGroupBy\n assert_eq(\n func(ddf.groupby(grouper(ddf))[\"B\"]), func(pdf.groupby(grouper(pdf))[\"B\"])\n )\n\n # DataFrameGroupBy with column slice\n assert_eq(\n func(ddf.groupby(grouper(ddf))[[\"B\", \"C\"]]),\n func(pdf.groupby(grouper(pdf))[[\"B\", \"C\"]]),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_numeric_column_names_test_numeric_column_names.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_numeric_column_names_test_numeric_column_names.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 872, "end_line": 882, "span_ids": ["test_numeric_column_names"], "tokens": 197}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_numeric_column_names():\n # df.groupby(0)[df.columns] fails if all columns are numbers (pandas bug)\n # This ensures that we cast all column iterables to list beforehand.\n df = pd.DataFrame({0: [0, 1, 0, 1], 1: [1, 2, 3, 4], 2: [0, 1, 0, 1]})\n ddf = dd.from_pandas(df, npartitions=2)\n assert_eq(ddf.groupby(0).sum(), df.groupby(0).sum())\n assert_eq(ddf.groupby([0, 2]).sum(), df.groupby([0, 2]).sum())\n assert_eq(\n ddf.groupby(0).apply(lambda x: x, meta={0: int, 1: int, 2: int}),\n df.groupby(0).apply(lambda x: x),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_apply_tasks_test_groupby_apply_tasks.with_dask_config_set_shuf.for_ind_in_lambda_x_A_.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_apply_tasks_test_groupby_apply_tasks.with_dask_config_set_shuf.for_ind_in_lambda_x_A_.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 885, "end_line": 903, "span_ids": ["test_groupby_apply_tasks"], "tokens": 210}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_groupby_apply_tasks():\n df = _compat.makeTimeDataFrame()\n df[\"A\"] = df.A // 0.1\n df[\"B\"] = df.B // 0.1\n ddf = dd.from_pandas(df, npartitions=10)\n\n with dask.config.set(shuffle=\"tasks\"):\n for ind in [lambda x: \"A\", lambda x: x.A]:\n a = df.groupby(ind(df)).apply(len)\n with pytest.warns(UserWarning):\n b = ddf.groupby(ind(ddf)).apply(len)\n assert_eq(a, b.compute())\n assert not any(\"partd\" in k[0] for k in b.dask)\n\n a = df.groupby(ind(df)).B.apply(len)\n with pytest.warns(UserWarning):\n b = ddf.groupby(ind(ddf)).B.apply(len)\n assert_eq(a, b.compute())\n assert not any(\"partd\" in k[0] for k in b.dask)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_multiprocessing_test_groupby_multiprocessing.with_dask_config_set_sche.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_multiprocessing_test_groupby_multiprocessing.with_dask_config_set_sche.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 906, "end_line": 913, "span_ids": ["test_groupby_multiprocessing"], "tokens": 119}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_groupby_multiprocessing():\n df = pd.DataFrame({\"A\": [1, 2, 3, 4, 5], \"B\": [\"1\", \"1\", \"a\", \"a\", \"a\"]})\n ddf = dd.from_pandas(df, npartitions=3)\n with dask.config.set(scheduler=\"processes\"):\n assert_eq(\n ddf.groupby(\"B\").apply(lambda x: x, meta={\"A\": int, \"B\": object}),\n df.groupby(\"B\").apply(lambda x: x),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_normalize_index_test_groupby_normalize_index.None_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_normalize_index_test_groupby_normalize_index.None_5", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 916, "end_line": 929, "span_ids": ["test_groupby_normalize_index"], "tokens": 232}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_groupby_normalize_index():\n full = pd.DataFrame(\n {\"a\": [1, 2, 3, 4, 5, 6, 7, 8, 9], \"b\": [4, 5, 6, 3, 2, 1, 0, 0, 0]},\n index=[0, 1, 3, 5, 6, 8, 9, 9, 9],\n )\n d = dd.from_pandas(full, npartitions=3)\n\n assert d.groupby(\"a\").index == \"a\"\n assert d.groupby(d[\"a\"]).index == \"a\"\n assert d.groupby(d[\"a\"] > 2).index._name == (d[\"a\"] > 2)._name\n assert d.groupby([\"a\", \"b\"]).index == [\"a\", \"b\"]\n\n assert d.groupby([d[\"a\"], d[\"b\"]]).index == [\"a\", \"b\"]\n assert d.groupby([d[\"a\"], \"b\"]).index == [\"a\", \"b\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_aggregate__examples_test_aggregate__examples.if_not_PANDAS_GT_100_.with_pytest_warns_None_.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_aggregate__examples_test_aggregate__examples.if_not_PANDAS_GT_100_.with_pytest_warns_None_.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 932, "end_line": 988, "span_ids": ["test_aggregate__examples"], "tokens": 560}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"spec\",\n [\n {\"b\": {\"c\": \"mean\"}, \"c\": {\"a\": \"max\", \"b\": \"min\"}},\n {\"b\": \"mean\", \"c\": [\"min\", \"max\"]},\n {\"b\": np.sum, \"c\": [\"min\", np.max, np.std, np.var]},\n [\n \"sum\",\n \"mean\",\n \"min\",\n \"max\",\n \"count\",\n \"size\",\n \"std\",\n \"var\",\n \"first\",\n \"last\",\n \"prod\",\n ],\n \"var\",\n {\"b\": \"mean\", \"c\": \"first\", \"d\": \"last\", \"a\": [\"first\", \"last\"]},\n {\"b\": {\"c\": \"mean\"}, \"c\": {\"a\": \"first\", \"b\": \"last\"}},\n ],\n)\n@pytest.mark.parametrize(\"split_every\", [False, None])\n@pytest.mark.parametrize(\n \"grouper\",\n [\n lambda df: \"a\",\n lambda df: [\"a\", \"d\"],\n lambda df: [df[\"a\"], df[\"d\"]],\n lambda df: df[\"a\"],\n lambda df: df[\"a\"] > 2,\n ],\n)\ndef test_aggregate__examples(spec, split_every, grouper):\n pdf = pd.DataFrame(\n {\n \"a\": [1, 2, 3, 1, 1, 2, 4, 3, 7] * 10,\n \"b\": [4, 2, 7, 3, 3, 1, 1, 1, 2] * 10,\n \"c\": [0, 1, 2, 3, 4, 5, 6, 7, 8] * 10,\n \"d\": [3, 2, 1, 3, 2, 1, 2, 6, 4] * 10,\n },\n columns=[\"c\", \"b\", \"a\", \"d\"],\n )\n ddf = dd.from_pandas(pdf, npartitions=10)\n\n # Warning from pandas deprecation .agg(dict[dict])\n # it's from pandas, so no reason to assert the deprecation warning,\n # but we should still test it for now\n if not PANDAS_GT_100:\n # removed in pandas 1.0\n with pytest.warns(None):\n assert_eq(\n pdf.groupby(grouper(pdf)).agg(spec),\n ddf.groupby(grouper(ddf)).agg(spec, split_every=split_every),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_series_aggregate__examples_test_series_aggregate__examples.if_not_PANDAS_GT_100_.with_pytest_warns_None_.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_series_aggregate__examples_test_series_aggregate__examples.if_not_PANDAS_GT_100_.with_pytest_warns_None_.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 991, "end_line": 1029, "span_ids": ["test_series_aggregate__examples"], "tokens": 446}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"spec\",\n [\n {\"b\": \"sum\", \"c\": \"min\", \"d\": \"max\"},\n [\"sum\"],\n [\"sum\", \"mean\", \"min\", \"max\", \"count\", \"size\", \"std\", \"var\", \"first\", \"last\"],\n \"sum\",\n \"size\",\n ],\n)\n@pytest.mark.parametrize(\"split_every\", [False, None])\n@pytest.mark.parametrize(\n \"grouper\",\n [lambda df: [df[\"a\"], df[\"d\"]], lambda df: df[\"a\"], lambda df: df[\"a\"] > 2],\n)\ndef test_series_aggregate__examples(spec, split_every, grouper):\n pdf = pd.DataFrame(\n {\n \"a\": [1, 2, 3, 1, 1, 2, 4, 3, 7] * 10,\n \"b\": [4, 2, 7, 3, 3, 1, 1, 1, 2] * 10,\n \"c\": [0, 1, 2, 3, 4, 5, 6, 7, 8] * 10,\n \"d\": [3, 2, 1, 3, 2, 1, 2, 6, 4] * 10,\n },\n columns=[\"c\", \"b\", \"a\", \"d\"],\n )\n ps = pdf[\"c\"]\n\n ddf = dd.from_pandas(pdf, npartitions=10)\n ds = ddf[\"c\"]\n # Warning from pandas deprecation .agg(dict[dict])\n # it's from pandas, so no reason to assert the deprecation warning,\n # but we should still test it for now\n if not PANDAS_GT_100:\n # removed in pandas 1.0\n with pytest.warns(None):\n assert_eq(\n ps.groupby(grouper(pdf)).agg(spec),\n ds.groupby(grouper(ddf)).agg(spec, split_every=split_every),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_aggregate__single_element_groups_test_aggregate__single_element_groups.assert_eq_expected_ddf_g": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_aggregate__single_element_groups_test_aggregate__single_element_groups.assert_eq_expected_ddf_g", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1032, "end_line": 1051, "span_ids": ["test_aggregate__single_element_groups"], "tokens": 212}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_aggregate__single_element_groups(agg_func):\n spec = agg_func\n\n # nunique/cov is not supported in specs\n if spec in (\"nunique\", \"cov\", \"corr\"):\n return\n\n pdf = pd.DataFrame(\n {\"a\": [1, 1, 3, 3], \"b\": [4, 4, 16, 16], \"c\": [1, 1, 4, 4], \"d\": [1, 1, 3, 3]},\n columns=[\"c\", \"b\", \"a\", \"d\"],\n )\n ddf = dd.from_pandas(pdf, npartitions=3)\n\n expected = pdf.groupby([\"a\", \"d\"]).agg(spec)\n\n # NOTE: for std the result is not recast ot the original dtype\n if spec in {\"mean\", \"var\"}:\n expected = expected.astype(float)\n\n assert_eq(expected, ddf.groupby([\"a\", \"d\"]).agg(spec))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_aggregate_build_agg_args__reuse_of_intermediates_test_aggregate_build_agg_args__reuse_of_intermediates.assert_len_with_mean_fina": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_aggregate_build_agg_args__reuse_of_intermediates_test_aggregate_build_agg_args__reuse_of_intermediates.assert_len_with_mean_fina", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1054, "end_line": 1078, "span_ids": ["test_aggregate_build_agg_args__reuse_of_intermediates"], "tokens": 236}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_aggregate_build_agg_args__reuse_of_intermediates():\n \"\"\"Aggregate reuses intermediates. For example, with sum, count, and mean\n the sums and counts are only calculated once across the graph and reused to\n compute the mean.\n \"\"\"\n from dask.dataframe.groupby import _build_agg_args\n\n no_mean_spec = [(\"foo\", \"sum\", \"input\"), (\"bar\", \"count\", \"input\")]\n\n with_mean_spec = [\n (\"foo\", \"sum\", \"input\"),\n (\"bar\", \"count\", \"input\"),\n (\"baz\", \"mean\", \"input\"),\n ]\n\n no_mean_chunks, no_mean_aggs, no_mean_finalizers = _build_agg_args(no_mean_spec)\n with_mean_chunks, with_mean_aggs, with_mean_finalizers = _build_agg_args(\n with_mean_spec\n )\n\n assert len(no_mean_chunks) == len(with_mean_chunks)\n assert len(no_mean_aggs) == len(with_mean_aggs)\n\n assert len(no_mean_finalizers) == len(no_mean_spec)\n assert len(with_mean_finalizers) == len(with_mean_spec)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_aggregate__dask_test_aggregate__dask.for_spec_in_specs_.for_other_spec_in_specs_.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_aggregate__dask_test_aggregate__dask.for_spec_in_specs_.for_other_spec_in_specs_.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1081, "end_line": 1146, "span_ids": ["test_aggregate__dask"], "tokens": 615}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_aggregate__dask():\n dask_holder = collections.namedtuple(\"dask_holder\", [\"dask\"])\n get_agg_dask = lambda obj: dask_holder(\n {k: v for (k, v) in obj.dask.items() if k[0].startswith(\"aggregate\")}\n )\n\n specs = [\n {\"b\": {\"c\": \"mean\"}, \"c\": {\"a\": \"max\", \"b\": \"min\"}},\n {\"b\": \"mean\", \"c\": [\"min\", \"max\"]},\n [\n \"sum\",\n \"mean\",\n \"min\",\n \"max\",\n \"count\",\n \"size\",\n \"std\",\n \"var\",\n \"first\",\n \"last\",\n \"prod\",\n ],\n \"sum\",\n \"mean\",\n \"min\",\n \"max\",\n \"count\",\n \"std\",\n \"var\",\n \"first\",\n \"last\",\n \"prod\"\n # NOTE: the 'size' spec is special since it bypasses aggregate\n # 'size'\n ]\n\n pdf = pd.DataFrame(\n {\n \"a\": [1, 2, 3, 1, 1, 2, 4, 3, 7] * 100,\n \"b\": [4, 2, 7, 3, 3, 1, 1, 1, 2] * 100,\n \"c\": [0, 1, 2, 3, 4, 5, 6, 7, 8] * 100,\n \"d\": [3, 2, 1, 3, 2, 1, 2, 6, 4] * 100,\n },\n columns=[\"c\", \"b\", \"a\", \"d\"],\n )\n ddf = dd.from_pandas(pdf, npartitions=100)\n\n for spec in specs:\n result1 = ddf.groupby([\"a\", \"b\"]).agg(spec, split_every=2)\n result2 = ddf.groupby([\"a\", \"b\"]).agg(spec, split_every=2)\n\n agg_dask1 = get_agg_dask(result1)\n agg_dask2 = get_agg_dask(result2)\n\n # check that the number of partitions used is fixed by split_every\n assert_max_deps(agg_dask1, 2)\n assert_max_deps(agg_dask2, 2)\n\n # check for deterministic key names and values\n assert agg_dask1 == agg_dask2\n\n # the length of the dask does not depend on the passed spec\n for other_spec in specs:\n other = ddf.groupby([\"a\", \"b\"]).agg(other_spec, split_every=2)\n assert len(other.dask) == len(result1.dask)\n assert len(other.dask) == len(result2.dask)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_dataframe_aggregations_multilevel_test_dataframe_aggregations_multilevel.if_agg_func_nunique_.if_agg_func_in_cov_c.else_.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_dataframe_aggregations_multilevel_test_dataframe_aggregations_multilevel.if_agg_func_nunique_.if_agg_func_in_cov_c.else_.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1149, "end_line": 1203, "span_ids": ["test_dataframe_aggregations_multilevel"], "tokens": 582}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"grouper\",\n [\n lambda df: [\"a\"],\n lambda df: [\"a\", \"b\"],\n lambda df: df[\"a\"],\n lambda df: [df[\"a\"], df[\"b\"]],\n lambda df: [df[\"a\"] > 2, df[\"b\"] > 1],\n ],\n)\ndef test_dataframe_aggregations_multilevel(grouper, agg_func):\n def call(g, m, **kwargs):\n return getattr(g, m)(**kwargs)\n\n pdf = pd.DataFrame(\n {\n \"a\": [1, 2, 6, 4, 4, 6, 4, 3, 7] * 10,\n \"b\": [4, 2, 7, 3, 3, 1, 1, 1, 2] * 10,\n \"d\": [0, 1, 2, 3, 4, 5, 6, 7, 8] * 10,\n \"c\": [0, 1, 2, 3, 4, 5, 6, 7, 8] * 10,\n },\n columns=[\"c\", \"b\", \"a\", \"d\"],\n )\n\n ddf = dd.from_pandas(pdf, npartitions=10)\n\n # covariance only works with N+1 columns\n if agg_func not in (\"cov\", \"corr\"):\n assert_eq(\n call(pdf.groupby(grouper(pdf))[\"c\"], agg_func),\n call(ddf.groupby(grouper(ddf))[\"c\"], agg_func, split_every=2),\n )\n\n # not supported by pandas\n if agg_func != \"nunique\":\n assert_eq(\n call(pdf.groupby(grouper(pdf))[[\"c\", \"d\"]], agg_func),\n call(ddf.groupby(grouper(ddf))[[\"c\", \"d\"]], agg_func, split_every=2),\n )\n\n if agg_func in (\"cov\", \"corr\"):\n # there are sorting issues between pandas and chunk cov w/dask\n df = call(pdf.groupby(grouper(pdf)), agg_func).sort_index()\n cols = sorted(list(df.columns))\n df = df[cols]\n dddf = call(ddf.groupby(grouper(ddf)), agg_func, split_every=2).compute()\n dddf = dddf.sort_index()\n cols = sorted(list(dddf.columns))\n dddf = dddf[cols]\n assert_eq(df, dddf)\n else:\n assert_eq(\n call(pdf.groupby(grouper(pdf)), agg_func),\n call(ddf.groupby(grouper(ddf)), agg_func, split_every=2),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_series_aggregations_multilevel_test_series_aggregations_multilevel.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_series_aggregations_multilevel_test_series_aggregations_multilevel.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1206, "end_line": 1244, "span_ids": ["test_series_aggregations_multilevel"], "tokens": 377}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"grouper\",\n [\n lambda df: df[\"a\"],\n lambda df: [df[\"a\"], df[\"b\"]],\n lambda df: [df[\"a\"] > 2, df[\"b\"] > 1],\n ],\n)\ndef test_series_aggregations_multilevel(grouper, agg_func):\n \"\"\"\n similar to ``test_dataframe_aggregations_multilevel``, but series do not\n support all groupby args.\n \"\"\"\n\n def call(g, m, **kwargs):\n return getattr(g, m)(**kwargs)\n\n # covariance/correlation is not a series aggregation\n if agg_func in (\"cov\", \"corr\"):\n return\n\n pdf = pd.DataFrame(\n {\n \"a\": [1, 2, 6, 4, 4, 6, 4, 3, 7] * 10,\n \"b\": [4, 2, 7, 3, 3, 1, 1, 1, 2] * 10,\n \"c\": [0, 1, 2, 3, 4, 5, 6, 7, 8] * 10,\n },\n columns=[\"c\", \"b\", \"a\"],\n )\n\n ddf = dd.from_pandas(pdf, npartitions=10)\n\n assert_eq(\n call(pdf[\"c\"].groupby(grouper(pdf)), agg_func),\n call(ddf[\"c\"].groupby(grouper(ddf)), agg_func, split_every=2),\n # for pandas ~ 0.18, the name is not not properly propagated for\n # the mean aggregation\n check_names=(agg_func not in {\"mean\", \"nunique\"}),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_meta_content_test_groupby_meta_content.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_meta_content_test_groupby_meta_content.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1247, "end_line": 1287, "span_ids": ["test_groupby_meta_content"], "tokens": 403}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"grouper\",\n [\n lambda df: df[\"a\"],\n lambda df: df[\"a\"] > 2,\n lambda df: [df[\"a\"], df[\"b\"]],\n lambda df: [df[\"a\"] > 2],\n pytest.param(\n lambda df: [df[\"a\"] > 2, df[\"b\"] > 1],\n marks=pytest.mark.xfail(\n reason=\"index dtype does not coincide: boolean != empty\"\n ),\n ),\n ],\n)\n@pytest.mark.parametrize(\n \"group_and_slice\",\n [\n lambda df, grouper: df.groupby(grouper(df)),\n lambda df, grouper: df[\"c\"].groupby(grouper(df)),\n lambda df, grouper: df.groupby(grouper(df))[\"c\"],\n ],\n)\ndef test_groupby_meta_content(group_and_slice, grouper):\n pdf = pd.DataFrame(\n {\n \"a\": [1, 2, 6, 4, 4, 6, 4, 3, 7] * 10,\n \"b\": [4, 2, 7, 3, 3, 1, 1, 1, 2] * 10,\n \"c\": [0, 1, 2, 3, 4, 5, 6, 7, 8] * 10,\n },\n columns=[\"c\", \"b\", \"a\"],\n )\n\n ddf = dd.from_pandas(pdf, npartitions=10)\n\n expected = group_and_slice(pdf, grouper).first().head(0)\n meta = group_and_slice(ddf, grouper)._meta.first()\n meta_nonempty = group_and_slice(ddf, grouper)._meta_nonempty.first().head(0)\n\n assert_eq(expected, meta)\n assert_eq(expected, meta_nonempty)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupy_non_aligned_index_test_groupy_non_aligned_index.None_4.ddf3_groupby_ddf7_a_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupy_non_aligned_index_test_groupy_non_aligned_index.None_4.ddf3_groupby_ddf7_a_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1290, "end_line": 1321, "span_ids": ["test_groupy_non_aligned_index"], "tokens": 334}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_groupy_non_aligned_index():\n pdf = pd.DataFrame(\n {\n \"a\": [1, 2, 6, 4, 4, 6, 4, 3, 7] * 10,\n \"b\": [4, 2, 7, 3, 3, 1, 1, 1, 2] * 10,\n \"c\": [0, 1, 2, 3, 4, 5, 6, 7, 8] * 10,\n },\n columns=[\"c\", \"b\", \"a\"],\n )\n\n ddf3 = dd.from_pandas(pdf, npartitions=3)\n ddf7 = dd.from_pandas(pdf, npartitions=7)\n\n # working examples\n ddf3.groupby([\"a\", \"b\"])\n ddf3.groupby([ddf3[\"a\"], ddf3[\"b\"]])\n\n # misaligned divisions\n with pytest.raises(NotImplementedError):\n ddf3.groupby(ddf7[\"a\"])\n\n with pytest.raises(NotImplementedError):\n ddf3.groupby([ddf7[\"a\"], ddf7[\"b\"]])\n\n with pytest.raises(NotImplementedError):\n ddf3.groupby([ddf7[\"a\"], ddf3[\"b\"]])\n\n with pytest.raises(NotImplementedError):\n ddf3.groupby([ddf3[\"a\"], ddf7[\"b\"]])\n\n with pytest.raises(NotImplementedError):\n ddf3.groupby([ddf7[\"a\"], \"b\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupy_series_wrong_grouper_test_groupy_series_wrong_grouper.None_3.s_groupby_s_df_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupy_series_wrong_grouper_test_groupy_series_wrong_grouper.None_3.s_groupby_s_df_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1324, "end_line": 1352, "span_ids": ["test_groupy_series_wrong_grouper"], "tokens": 240}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_groupy_series_wrong_grouper():\n df = pd.DataFrame(\n {\n \"a\": [1, 2, 6, 4, 4, 6, 4, 3, 7] * 10,\n \"b\": [4, 2, 7, 3, 3, 1, 1, 1, 2] * 10,\n \"c\": [0, 1, 2, 3, 4, 5, 6, 7, 8] * 10,\n },\n columns=[\"c\", \"b\", \"a\"],\n )\n\n df = dd.from_pandas(df, npartitions=3)\n s = df[\"a\"]\n\n # working index values\n s.groupby(s)\n s.groupby([s, s])\n\n # non working index values\n with pytest.raises(KeyError):\n s.groupby(\"foo\")\n\n with pytest.raises(KeyError):\n s.groupby([s, \"foo\"])\n\n with pytest.raises(ValueError):\n s.groupby(df)\n\n with pytest.raises(ValueError):\n s.groupby([s, df])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_hash_groupby_aggregate_test_hash_groupby_aggregate.assert_eq_result_df_grou": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_hash_groupby_aggregate_test_hash_groupby_aggregate.assert_eq_result_df_grou", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1355, "end_line": 1372, "span_ids": ["test_hash_groupby_aggregate"], "tokens": 219}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"npartitions\", [1, 4, 20])\n@pytest.mark.parametrize(\"split_every\", [2, 5])\n@pytest.mark.parametrize(\"split_out\", [None, 1, 5, 20])\ndef test_hash_groupby_aggregate(npartitions, split_every, split_out):\n df = pd.DataFrame({\"x\": np.arange(100) % 10, \"y\": np.ones(100)})\n ddf = dd.from_pandas(df, npartitions)\n\n result = ddf.groupby(\"x\").y.var(split_every=split_every, split_out=split_out)\n\n dsk = result.__dask_optimize__(result.dask, result.__dask_keys__())\n from dask.core import get_deps\n\n dependencies, dependents = get_deps(dsk)\n\n assert result.npartitions == (split_out or 1)\n assert len([k for k, v in dependencies.items() if not v]) == npartitions\n\n assert_eq(result, df.groupby(\"x\").y.var())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_split_out_multi_column_groupby_test_split_out_multi_column_groupby.assert_eq_result_expecte": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_split_out_multi_column_groupby_test_split_out_multi_column_groupby.assert_eq_result_expecte", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1375, "end_line": 1385, "span_ids": ["test_split_out_multi_column_groupby"], "tokens": 120}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_split_out_multi_column_groupby():\n df = pd.DataFrame(\n {\"x\": np.arange(100) % 10, \"y\": np.ones(100), \"z\": [1, 2, 3, 4, 5] * 20}\n )\n\n ddf = dd.from_pandas(df, npartitions=10)\n\n result = ddf.groupby([\"x\", \"y\"]).z.mean(split_out=4)\n expected = df.groupby([\"x\", \"y\"]).z.mean()\n\n assert_eq(result, expected, check_dtype=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_split_out_num_test_groupby_split_out_num.with_pytest_raises_TypeEr.ddf_groupby_A_split_ou": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_split_out_num_test_groupby_split_out_num.with_pytest_raises_TypeEr.ddf_groupby_A_split_ou", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1388, "end_line": 1399, "span_ids": ["test_groupby_split_out_num"], "tokens": 154}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_groupby_split_out_num():\n # GH 1841\n ddf = dd.from_pandas(\n pd.DataFrame({\"A\": [1, 1, 2, 2], \"B\": [1, 2, 3, 4]}), npartitions=2\n )\n assert ddf.groupby(\"A\").sum().npartitions == 1\n assert ddf.groupby(\"A\").sum(split_out=2).npartitions == 2\n assert ddf.groupby(\"A\").sum(split_out=3).npartitions == 3\n\n with pytest.raises(TypeError):\n # groupby doesn't adcept split_out\n ddf.groupby(\"A\", split_out=2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_not_supported_test_groupby_numeric_column.assert_eq_ddf_groupby_ddf": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_not_supported_test_groupby_numeric_column.assert_eq_ddf_groupby_ddf", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1402, "end_line": 1420, "span_ids": ["test_groupby_numeric_column", "test_groupby_not_supported"], "tokens": 200}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_groupby_not_supported():\n ddf = dd.from_pandas(\n pd.DataFrame({\"A\": [1, 1, 2, 2], \"B\": [1, 2, 3, 4]}), npartitions=2\n )\n with pytest.raises(TypeError):\n ddf.groupby(\"A\", axis=1)\n with pytest.raises(TypeError):\n ddf.groupby(\"A\", level=1)\n with pytest.raises(TypeError):\n ddf.groupby(\"A\", as_index=False)\n with pytest.raises(TypeError):\n ddf.groupby(\"A\", squeeze=True)\n\n\ndef test_groupby_numeric_column():\n df = pd.DataFrame({\"A\": [\"foo\", \"foo\", \"bar\"], 0: [1, 2, 3]})\n ddf = dd.from_pandas(df, npartitions=3)\n\n assert_eq(ddf.groupby(ddf.A)[0].sum(), df.groupby(df.A)[0].sum())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_cumulative_test_cumulative.assert_eq_getattr_g_func": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_cumulative_test_cumulative.assert_eq_getattr_g_func", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1423, "end_line": 1440, "span_ids": ["test_cumulative"], "tokens": 253}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"sel\", [\"c\", \"d\", [\"c\", \"d\"]])\n@pytest.mark.parametrize(\"key\", [\"a\", [\"a\", \"b\"]])\n@pytest.mark.parametrize(\"func\", [\"cumsum\", \"cumprod\", \"cumcount\"])\ndef test_cumulative(func, key, sel):\n df = pd.DataFrame(\n {\n \"a\": [1, 2, 6, 4, 4, 6, 4, 3, 7] * 6,\n \"b\": [4, 2, 7, 3, 3, 1, 1, 1, 2] * 6,\n \"c\": np.random.randn(54),\n \"d\": np.random.randn(54),\n },\n columns=[\"a\", \"b\", \"c\", \"d\"],\n )\n df.iloc[[-18, -12, -6], -1] = np.nan\n ddf = dd.from_pandas(df, npartitions=10)\n\n g, dg = [d.groupby(key)[sel] for d in (df, ddf)]\n assert_eq(getattr(g, func)(), getattr(dg, func)())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_cumulative_axis1_test_cumulative_axis1.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_cumulative_axis1_test_cumulative_axis1.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1443, "end_line": 1456, "span_ids": ["test_cumulative_axis1"], "tokens": 150}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"func\", [\"cumsum\", \"cumprod\"])\ndef test_cumulative_axis1(func):\n df = pd.DataFrame(\n {\n \"a\": [1, 2, 6, 4, 4, 6, 4, 3, 7] * 2,\n \"b\": np.random.randn(18),\n \"c\": np.random.randn(18),\n }\n )\n df.iloc[-6, -1] = np.nan\n ddf = dd.from_pandas(df, npartitions=4)\n assert_eq(\n getattr(df.groupby(\"a\"), func)(axis=1), getattr(ddf.groupby(\"a\"), func)(axis=1)\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_unaligned_index_test_groupby_unaligned_index.for_res_sol_in_good_.assert_eq_res_sol_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_unaligned_index_test_groupby_unaligned_index.for_res_sol_in_good_.assert_eq_res_sol_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1459, "end_line": 1499, "span_ids": ["test_groupby_unaligned_index"], "tokens": 282}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_groupby_unaligned_index():\n df = pd.DataFrame(\n {\n \"a\": np.random.randint(0, 10, 50),\n \"b\": np.random.randn(50),\n \"c\": np.random.randn(50),\n }\n )\n ddf = dd.from_pandas(df, npartitions=5)\n filtered = df[df.b < 0.5]\n dfiltered = ddf[ddf.b < 0.5]\n\n ddf_group = dfiltered.groupby(ddf.a)\n ds_group = dfiltered.b.groupby(ddf.a)\n\n bad = [\n ddf_group.mean(),\n ddf_group.var(),\n ddf_group.b.nunique(),\n ddf_group.get_group(0),\n ds_group.mean(),\n ds_group.var(),\n ds_group.nunique(),\n ds_group.get_group(0),\n ]\n\n for obj in bad:\n with pytest.raises(ValueError):\n obj.compute()\n\n def add1(x):\n return x + 1\n\n df_group = filtered.groupby(df.a)\n good = [\n (ddf_group.apply(add1, meta=ddf), df_group.apply(add1)),\n (ddf_group.b.apply(add1, meta=ddf.b), df_group.b.apply(add1)),\n ]\n\n for (res, sol) in good:\n assert_eq(res, sol)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_string_label_test_groupby_string_label.tm_assert_frame_equal_res": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_string_label_test_groupby_string_label.tm_assert_frame_equal_res", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1502, "end_line": 1512, "span_ids": ["test_groupby_string_label"], "tokens": 144}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_groupby_string_label():\n df = pd.DataFrame({\"foo\": [1, 1, 4], \"B\": [2, 3, 4], \"C\": [5, 6, 7]})\n ddf = dd.from_pandas(pd.DataFrame(df), npartitions=1)\n ddf_group = ddf.groupby(\"foo\")\n result = ddf_group.get_group(1).compute()\n\n expected = pd.DataFrame(\n {\"foo\": [1, 1], \"B\": [2, 3], \"C\": [5, 6]}, index=pd.Index([0, 1])\n )\n\n tm.assert_frame_equal(result, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_dataframe_cum_caching_test_groupby_dataframe_cum_caching.for_op_in_ops_.assert_res1_a_equals_res1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_dataframe_cum_caching_test_groupby_dataframe_cum_caching.for_op_in_ops_.assert_res1_a_equals_res1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1515, "end_line": 1540, "span_ids": ["test_groupby_dataframe_cum_caching"], "tokens": 239}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_groupby_dataframe_cum_caching():\n \"\"\"Test caching behavior of cumulative operations on grouped dataframes.\n\n Relates to #3756.\n \"\"\"\n df = pd.DataFrame(\n dict(a=list(\"aabbcc\")), index=pd.date_range(start=\"20100101\", periods=6)\n )\n df[\"ones\"] = 1\n df[\"twos\"] = 2\n\n ddf = dd.from_pandas(df, npartitions=3)\n\n ops = [\"cumsum\", \"cumprod\"]\n\n for op in ops:\n ddf0 = getattr(ddf.groupby([\"a\"]), op)()\n ddf1 = ddf.rename(columns={\"ones\": \"foo\", \"twos\": \"bar\"})\n ddf1 = getattr(ddf1.groupby([\"a\"]), op)()\n\n # _a and _b dataframe should be equal\n res0_a, res1_a = dask.compute(ddf0, ddf1)\n res0_b, res1_b = ddf0.compute(), ddf1.compute()\n\n assert res0_a.equals(res0_b)\n assert res1_a.equals(res1_b)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_series_cum_caching_test_groupby_series_cum_caching.for_op_in_ops_.assert_res1_a_equals_res1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_series_cum_caching_test_groupby_series_cum_caching.for_op_in_ops_.assert_res1_a_equals_res1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1543, "end_line": 1565, "span_ids": ["test_groupby_series_cum_caching"], "tokens": 216}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_groupby_series_cum_caching():\n \"\"\"Test caching behavior of cumulative operations on grouped Series\n\n Relates to #3755\n \"\"\"\n df = pd.DataFrame(\n dict(a=list(\"aabbcc\")), index=pd.date_range(start=\"20100101\", periods=6)\n )\n df[\"ones\"] = 1\n df[\"twos\"] = 2\n\n ops = [\"cumsum\", \"cumprod\"]\n for op in ops:\n ddf = dd.from_pandas(df, npartitions=3)\n dcum = ddf.groupby([\"a\"])\n res0_a, res1_a = dask.compute(\n getattr(dcum[\"ones\"], op)(), getattr(dcum[\"twos\"], op)()\n )\n cum = df.groupby([\"a\"])\n res0_b, res1_b = (getattr(cum[\"ones\"], op)(), getattr(cum[\"twos\"], op)())\n\n assert res0_a.equals(res0_b)\n assert res1_a.equals(res1_b)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_slice_agg_reduces_test_groupby_agg_grouper_single.assert_eq_result_expecte": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_slice_agg_reduces_test_groupby_agg_grouper_single.assert_eq_result_expecte", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1568, "end_line": 1583, "span_ids": ["test_groupby_agg_grouper_single", "test_groupby_slice_agg_reduces"], "tokens": 206}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_groupby_slice_agg_reduces():\n d = pd.DataFrame({\"a\": [1, 2, 3, 4], \"b\": [2, 3, 4, 5]})\n a = dd.from_pandas(d, npartitions=2)\n result = a.groupby(\"a\")[\"b\"].agg([\"min\", \"max\"])\n expected = d.groupby(\"a\")[\"b\"].agg([\"min\", \"max\"])\n assert_eq(result, expected)\n\n\ndef test_groupby_agg_grouper_single():\n # https://github.com/dask/dask/issues/2255\n d = pd.DataFrame({\"a\": [1, 2, 3, 4]})\n a = dd.from_pandas(d, npartitions=2)\n\n result = a.groupby(\"a\")[\"a\"].agg([\"min\", \"max\"])\n expected = d.groupby(\"a\")[\"a\"].agg([\"min\", \"max\"])\n assert_eq(result, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_agg_grouper_multiple_test_groupby_agg_grouper_multiple.assert_eq_result_expecte": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_agg_grouper_multiple_test_groupby_agg_grouper_multiple.assert_eq_result_expecte", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1586, "end_line": 1594, "span_ids": ["test_groupby_agg_grouper_multiple"], "tokens": 142}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"slice_\", [\"a\", [\"a\"], [\"a\", \"b\"], [\"b\"]])\ndef test_groupby_agg_grouper_multiple(slice_):\n # https://github.com/dask/dask/issues/2255\n d = pd.DataFrame({\"a\": [1, 2, 3, 4], \"b\": [1, 2, 3, 4]})\n a = dd.from_pandas(d, npartitions=2)\n\n result = a.groupby(\"a\")[slice_].agg([\"min\", \"max\"])\n expected = d.groupby(\"a\")[slice_].agg([\"min\", \"max\"])\n assert_eq(result, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_column_and_index_agg_funcs_test_groupby_column_and_index_agg_funcs.None_5.assert_eq_expected_resul": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_column_and_index_agg_funcs_test_groupby_column_and_index_agg_funcs.None_5.assert_eq_expected_resul", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1597, "end_line": 1687, "span_ids": ["test_groupby_column_and_index_agg_funcs"], "tokens": 682}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"agg_func\",\n [\n \"cumprod\",\n \"cumcount\",\n \"cumsum\",\n \"var\",\n \"sum\",\n \"mean\",\n \"count\",\n \"size\",\n \"std\",\n \"min\",\n \"max\",\n \"first\",\n \"last\",\n \"prod\",\n ],\n)\ndef test_groupby_column_and_index_agg_funcs(agg_func):\n def call(g, m, **kwargs):\n return getattr(g, m)(**kwargs)\n\n df = pd.DataFrame(\n {\n \"idx\": [1, 1, 1, 2, 2, 2],\n \"a\": [1, 2, 1, 2, 1, 2],\n \"b\": np.arange(6),\n \"c\": [1, 1, 1, 2, 2, 2],\n }\n ).set_index(\"idx\")\n\n ddf = dd.from_pandas(df, npartitions=df.index.nunique())\n ddf_no_divs = dd.from_pandas(df, npartitions=df.index.nunique(), sort=False)\n\n # Index and then column\n\n # Compute expected result\n expected = call(df.groupby([\"idx\", \"a\"]), agg_func)\n if agg_func in {\"mean\", \"var\"}:\n expected = expected.astype(float)\n\n result = call(ddf.groupby([\"idx\", \"a\"]), agg_func)\n assert_eq(expected, result)\n\n result = call(ddf_no_divs.groupby([\"idx\", \"a\"]), agg_func)\n assert_eq(expected, result)\n\n # apply-combine-apply aggregation functions\n aca_agg = {\"sum\", \"mean\", \"var\", \"size\", \"std\", \"count\", \"first\", \"last\", \"prod\"}\n\n # Test aggregate strings\n if agg_func in aca_agg:\n result = ddf_no_divs.groupby([\"idx\", \"a\"]).agg(agg_func)\n assert_eq(expected, result)\n\n # Column and then index\n\n # Compute expected result\n expected = call(df.groupby([\"a\", \"idx\"]), agg_func)\n if agg_func in {\"mean\", \"var\"}:\n expected = expected.astype(float)\n\n result = call(ddf.groupby([\"a\", \"idx\"]), agg_func)\n assert_eq(expected, result)\n\n result = call(ddf_no_divs.groupby([\"a\", \"idx\"]), agg_func)\n assert_eq(expected, result)\n\n # Test aggregate strings\n if agg_func in aca_agg:\n result = ddf_no_divs.groupby([\"a\", \"idx\"]).agg(agg_func)\n assert_eq(expected, result)\n\n # Index only\n\n # Compute expected result\n expected = call(df.groupby(\"idx\"), agg_func)\n if agg_func in {\"mean\", \"var\"}:\n expected = expected.astype(float)\n\n result = call(ddf.groupby(\"idx\"), agg_func)\n assert_eq(expected, result)\n\n result = call(ddf_no_divs.groupby(\"idx\"), agg_func)\n assert_eq(expected, result)\n\n # Test aggregate strings\n if agg_func in aca_agg:\n result = ddf_no_divs.groupby(\"idx\").agg(agg_func)\n assert_eq(expected, result)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_column_and_index_apply_test_groupby_column_and_index_apply.with_warnings_catch_warni.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_column_and_index_apply_test_groupby_column_and_index_apply.with_warnings_catch_warni.None_3", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1690, "end_line": 1728, "span_ids": ["test_groupby_column_and_index_apply"], "tokens": 440}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"group_args\", [[\"idx\", \"a\"], [\"a\", \"idx\"], [\"idx\"], \"idx\"])\n@pytest.mark.parametrize(\n \"apply_func\", [np.min, np.mean, lambda s: np.max(s) - np.mean(s)]\n)\ndef test_groupby_column_and_index_apply(group_args, apply_func):\n df = pd.DataFrame(\n {\"idx\": [1, 1, 1, 2, 2, 2], \"a\": [1, 2, 1, 2, 1, 2], \"b\": np.arange(6)}\n ).set_index(\"idx\")\n\n ddf = dd.from_pandas(df, npartitions=df.index.nunique())\n ddf_no_divs = dd.from_pandas(df, npartitions=df.index.nunique(), sort=False)\n\n # Expected result\n expected = df.groupby(group_args).apply(apply_func)\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n\n # Compute on dask DataFrame with divisions (no shuffling)\n result = ddf.groupby(group_args).apply(apply_func)\n assert_eq(expected, result, check_divisions=False)\n\n # Check that partitioning is preserved\n assert ddf.divisions == result.divisions\n\n # Check that no shuffling occurred.\n # The groupby operation should add only 1 task per partition\n assert len(result.dask) == (len(ddf.dask) + ddf.npartitions)\n\n # Compute on dask DataFrame without divisions (requires shuffling)\n result = ddf_no_divs.groupby(group_args).apply(apply_func)\n assert_eq(expected, result, check_divisions=False)\n\n # Check that divisions were preserved (all None in this case)\n assert ddf_no_divs.divisions == result.divisions\n\n # Crude check to see if shuffling was performed.\n # The groupby operation should add only more than 1 task per partition\n assert len(result.dask) > (len(ddf_no_divs.dask) + ddf_no_divs.npartitions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_custom_mean_test_dataframe_groupby_agg_custom_sum.assert_eq_result_expecte": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_custom_mean_test_dataframe_groupby_agg_custom_sum.assert_eq_result_expecte", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1731, "end_line": 1757, "span_ids": ["impl:3", "test_dataframe_groupby_agg_custom_sum"], "tokens": 284}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "custom_mean = dd.Aggregation(\n \"mean\",\n lambda s: (s.count(), s.sum()),\n lambda s0, s1: (s0.sum(), s1.sum()),\n lambda s0, s1: s1 / s0,\n)\n\ncustom_sum = dd.Aggregation(\"sum\", lambda s: s.sum(), lambda s0: s0.sum())\n\n\n@pytest.mark.parametrize(\n \"pandas_spec, dask_spec, check_dtype\",\n [\n ({\"b\": \"mean\"}, {\"b\": custom_mean}, False),\n ({\"b\": \"sum\"}, {\"b\": custom_sum}, True),\n ([\"mean\", \"sum\"], [custom_mean, custom_sum], False),\n ({\"b\": [\"mean\", \"sum\"]}, {\"b\": [custom_mean, custom_sum]}, False),\n ],\n)\ndef test_dataframe_groupby_agg_custom_sum(pandas_spec, dask_spec, check_dtype):\n df = pd.DataFrame({\"g\": [0, 0, 1] * 3, \"b\": [1, 2, 3] * 3})\n ddf = dd.from_pandas(df, npartitions=2)\n\n expected = df.groupby(\"g\").aggregate(pandas_spec)\n result = ddf.groupby(\"g\").aggregate(dask_spec)\n\n assert_eq(result, expected, check_dtype=check_dtype)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_series_groupby_agg_custom_mean_test_series_groupby_agg_custom_mean.assert_eq_result_expecte": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_series_groupby_agg_custom_mean_test_series_groupby_agg_custom_mean.assert_eq_result_expecte", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1760, "end_line": 1775, "span_ids": ["test_series_groupby_agg_custom_mean"], "tokens": 165}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"pandas_spec, dask_spec\",\n [\n (\"mean\", custom_mean),\n ([\"mean\"], [custom_mean]),\n ([\"mean\", \"sum\"], [custom_mean, custom_sum]),\n ],\n)\ndef test_series_groupby_agg_custom_mean(pandas_spec, dask_spec):\n d = pd.DataFrame({\"g\": [0, 0, 1] * 3, \"b\": [1, 2, 3] * 3})\n a = dd.from_pandas(d, npartitions=2)\n\n expected = d[\"b\"].groupby(d[\"g\"]).aggregate(pandas_spec)\n result = a[\"b\"].groupby(a[\"g\"]).aggregate(dask_spec)\n\n assert_eq(result, expected, check_dtype=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_agg_custom__name_clash_with_internal_same_column_test_groupby_agg_custom__name_clash_with_internal_same_column.with_pytest_raises_ValueE.a_groupby_g_aggregate_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_agg_custom__name_clash_with_internal_same_column_test_groupby_agg_custom__name_clash_with_internal_same_column.with_pytest_raises_ValueE.a_groupby_g_aggregate_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1778, "end_line": 1786, "span_ids": ["test_groupby_agg_custom__name_clash_with_internal_same_column"], "tokens": 128}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_groupby_agg_custom__name_clash_with_internal_same_column():\n \"\"\"for a single input column only unique names are allowed\"\"\"\n d = pd.DataFrame({\"g\": [0, 0, 1] * 3, \"b\": [1, 2, 3] * 3})\n a = dd.from_pandas(d, npartitions=2)\n\n agg_func = dd.Aggregation(\"sum\", lambda s: s.sum(), lambda s0: s0.sum())\n\n with pytest.raises(ValueError):\n a.groupby(\"g\").aggregate({\"b\": [agg_func, \"sum\"]})", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_agg_custom__name_clash_with_internal_different_column_test_groupby_agg_custom__name_clash_with_internal_different_column.assert_eq_result_expecte": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_agg_custom__name_clash_with_internal_different_column_test_groupby_agg_custom__name_clash_with_internal_different_column.assert_eq_result_expecte", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1789, "end_line": 1807, "span_ids": ["test_groupby_agg_custom__name_clash_with_internal_different_column"], "tokens": 248}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_groupby_agg_custom__name_clash_with_internal_different_column():\n \"\"\"custom aggregation functions can share the name of a builtin function\"\"\"\n d = pd.DataFrame({\"g\": [0, 0, 1] * 3, \"b\": [1, 2, 3] * 3, \"c\": [4, 5, 6] * 3})\n a = dd.from_pandas(d, npartitions=2)\n\n # NOTE: this function is purposefully misnamed\n agg_func = dd.Aggregation(\n \"sum\",\n lambda s: (s.count(), s.sum()),\n lambda s0, s1: (s0.sum(), s1.sum()),\n lambda s0, s1: s1 / s0,\n )\n\n # NOTE: the name of agg-func is suppressed in the output,\n # since only a single agg func per column was specified\n result = a.groupby(\"g\").aggregate({\"b\": agg_func, \"c\": \"sum\"})\n expected = d.groupby(\"g\").aggregate({\"b\": \"mean\", \"c\": \"sum\"})\n\n assert_eq(result, expected, check_dtype=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_agg_custom__mode_test_groupby_agg_custom__mode.assert_eq_actual_expecte": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_agg_custom__mode_test_groupby_agg_custom__mode.assert_eq_actual_expecte", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1810, "end_line": 1846, "span_ids": ["test_groupby_agg_custom__mode"], "tokens": 345}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_groupby_agg_custom__mode():\n # mode function passing intermediates as pure python objects around. to protect\n # results from pandas in apply use return results as single-item lists\n def agg_mode(s):\n def impl(s):\n (res,) = s.iloc[0]\n\n for (i,) in s.iloc[1:]:\n res = res.add(i, fill_value=0)\n\n return [res]\n\n return s.apply(impl)\n\n agg_func = dd.Aggregation(\n \"custom_mode\",\n lambda s: s.apply(lambda s: [s.value_counts()]),\n agg_mode,\n lambda s: s.map(lambda i: i[0].idxmax()),\n )\n\n d = pd.DataFrame(\n {\n \"g0\": [0, 0, 0, 1, 1] * 3,\n \"g1\": [0, 0, 0, 1, 1] * 3,\n \"cc\": [4, 5, 4, 6, 6] * 3,\n }\n )\n a = dd.from_pandas(d, npartitions=5)\n\n actual = a[\"cc\"].groupby([a[\"g0\"], a[\"g1\"]]).agg(agg_func)\n\n # cheat to get the correct index\n expected = pd.DataFrame({\"g0\": [0, 1], \"g1\": [0, 1], \"cc\": [4, 6]})\n expected = expected[\"cc\"].groupby([expected[\"g0\"], expected[\"g1\"]]).agg(\"sum\")\n\n assert_eq(actual, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_select_column_agg_test_groupby_select_column_agg.assert_eq_actual_expecte": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_select_column_agg_test_groupby_select_column_agg.assert_eq_actual_expecte", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1849, "end_line": 1860, "span_ids": ["test_groupby_select_column_agg"], "tokens": 164}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"func\", [\"var\", list])\ndef test_groupby_select_column_agg(func):\n pdf = pd.DataFrame(\n {\n \"A\": [1, 2, 3, 1, 2, 3, 1, 2, 4],\n \"B\": [-0.776, -0.4, -0.873, 0.054, 1.419, -0.948, -0.967, -1.714, -0.666],\n }\n )\n ddf = dd.from_pandas(pdf, npartitions=4)\n actual = ddf.groupby(\"A\")[\"B\"].agg(func)\n expected = pdf.groupby(\"A\")[\"B\"].agg(func)\n assert_eq(actual, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_std_object_dtype_test_std_object_dtype.assert_eq_func_df_func_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_std_object_dtype_test_std_object_dtype.assert_eq_func_df_func_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1862, "end_line": 1877, "span_ids": ["test_std_object_dtype"], "tokens": 156}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"func\",\n [\n lambda x: x.std(),\n lambda x: x.groupby(\"x\").std(),\n lambda x: x.groupby(\"x\").var(),\n lambda x: x.groupby(\"x\").mean(),\n lambda x: x.groupby(\"x\").sum(),\n lambda x: x.groupby(\"x\").z.std(),\n ],\n)\ndef test_std_object_dtype(func):\n df = pd.DataFrame({\"x\": [1, 2, 1], \"y\": [\"a\", \"b\", \"c\"], \"z\": [11.0, 22.0, 33.0]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n assert_eq(func(df), func(ddf))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_std_columns_int_test_timeseries.assert_eq_df_groupby_nam": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_std_columns_int_test_timeseries.assert_eq_df_groupby_nam", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1880, "end_line": 1892, "span_ids": ["test_timeseries", "test_std_columns_int"], "tokens": 131}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_std_columns_int():\n # Make sure std() works when index_by is a df with integer column names\n # Non regression test for issue #3560\n\n df = pd.DataFrame({0: [5], 1: [5]})\n ddf = dd.from_pandas(df, npartitions=2)\n by = dask.array.from_array([0, 1]).to_dask_dataframe()\n ddf.groupby(by).std()\n\n\ndef test_timeseries():\n df = dask.datasets.timeseries().partitions[:2]\n assert_eq(df.groupby(\"name\").std(), df.groupby(\"name\").std())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_with_min_count_test_with_min_count.for_df_ddf_in_zip_dfs_d.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_with_min_count_test_with_min_count.for_df_ddf_in_zip_dfs_d.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1895, "end_line": 1930, "span_ids": ["test_with_min_count"], "tokens": 307}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n PANDAS_VERSION < \"0.22.0\",\n reason=\"Parameter min_count not implemented in \"\n \"DataFrame.groupby().sum() and DataFrame.groupby().prod()\",\n)\n@pytest.mark.parametrize(\"min_count\", [0, 1, 2, 3])\ndef test_with_min_count(min_count):\n dfs = [\n pd.DataFrame(\n {\n \"group\": [\"A\", \"A\", \"B\"],\n \"val1\": [np.nan, 2, 3],\n \"val2\": [np.nan, 5, 6],\n \"val3\": [5, 4, 9],\n }\n ),\n pd.DataFrame(\n {\n \"group\": [\"A\", \"A\", \"B\"],\n \"val1\": [2, np.nan, np.nan],\n \"val2\": [np.nan, 5, 6],\n \"val3\": [5, 4, 9],\n }\n ),\n ]\n ddfs = [dd.from_pandas(df, npartitions=4) for df in dfs]\n\n for df, ddf in zip(dfs, ddfs):\n assert_eq(\n df.groupby(\"group\").sum(min_count=min_count),\n ddf.groupby(\"group\").sum(min_count=min_count),\n )\n assert_eq(\n df.groupby(\"group\").prod(min_count=min_count),\n ddf.groupby(\"group\").prod(min_count=min_count),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_group_keys_test_groupby_group_keys.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_group_keys_test_groupby_group_keys.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1933, "end_line": 1943, "span_ids": ["test_groupby_group_keys"], "tokens": 147}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_groupby_group_keys():\n df = pd.DataFrame({\"a\": [1, 2, 2, 3], \"b\": [2, 3, 4, 5]})\n ddf = dd.from_pandas(df, npartitions=2).set_index(\"a\")\n pdf = df.set_index(\"a\")\n\n func = lambda g: g.copy()\n expected = pdf.groupby(\"a\").apply(func)\n assert_eq(expected, ddf.groupby(\"a\").apply(func, meta=expected))\n\n expected = pdf.groupby(\"a\", group_keys=False).apply(func)\n assert_eq(expected, ddf.groupby(\"a\", group_keys=False).apply(func, meta=expected))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_cov_test_groupby_cov.if_isinstance_columns_np.else_.assert_eq_expected_resul": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_cov_test_groupby_cov.if_isinstance_columns_np.else_.assert_eq_expected_resul", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1947, "end_line": 1971, "span_ids": ["test_groupby_cov"], "tokens": 263}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"columns\",\n [[\"a\", \"b\", \"c\"], np.array([1.0, 2.0, 3.0]), [\"1\", \"2\", \"3\"], [\"\", \"a\", \"b\"]],\n)\ndef test_groupby_cov(columns):\n rows = 20\n cols = 3\n data = np.random.randn(rows, cols)\n df = pd.DataFrame(data, columns=columns)\n df[\"key\"] = [0] * 10 + [1] * 5 + [2] * 5\n ddf = dd.from_pandas(df, npartitions=3)\n\n expected = df.groupby(\"key\").cov()\n result = ddf.groupby(\"key\").cov()\n # when using numerical values for columns\n # the column mapping and stacking leads to a float typed\n # MultiIndex. Pandas will normally create a object typed\n # MultiIndex\n if isinstance(columns, np.ndarray):\n result = result.compute()\n # don't bother checking index -- MultiIndex levels are in a frozenlist\n result.columns = result.columns.astype(np.dtype(\"O\"))\n assert_eq(expected, result, check_index=False)\n else:\n assert_eq(expected, result)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_df_groupby_idxmin_test_df_groupby_idxmin.assert_eq_expected_resul": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_df_groupby_idxmin_test_df_groupby_idxmin.assert_eq_expected_resul", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1973, "end_line": 1986, "span_ids": ["test_df_groupby_idxmin"], "tokens": 147}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_df_groupby_idxmin():\n pdf = pd.DataFrame(\n {\"idx\": list(range(4)), \"group\": [1, 1, 2, 2], \"value\": [10, 20, 20, 10]}\n ).set_index(\"idx\")\n\n ddf = dd.from_pandas(pdf, npartitions=3)\n\n expected = pd.DataFrame({\"group\": [1, 2], \"value\": [0, 3]}).set_index(\"group\")\n\n result_pd = pdf.groupby(\"group\").idxmin()\n result_dd = ddf.groupby(\"group\").idxmin()\n\n assert_eq(result_pd, result_dd)\n assert_eq(expected, result_dd)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_df_groupby_idxmin_skipna_test_df_groupby_idxmin_skipna.assert_eq_result_pd_resu": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_df_groupby_idxmin_skipna_test_df_groupby_idxmin_skipna.assert_eq_result_pd_resu", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1989, "end_line": 2004, "span_ids": ["test_df_groupby_idxmin_skipna"], "tokens": 147}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"skipna\", [True, False])\ndef test_df_groupby_idxmin_skipna(skipna):\n pdf = pd.DataFrame(\n {\n \"idx\": list(range(4)),\n \"group\": [1, 1, 2, 2],\n \"value\": [np.nan, 20.1, np.nan, 10.1],\n }\n ).set_index(\"idx\")\n\n ddf = dd.from_pandas(pdf, npartitions=3)\n\n result_pd = pdf.groupby(\"group\").idxmin(skipna=skipna)\n result_dd = ddf.groupby(\"group\").idxmin(skipna=skipna)\n\n assert_eq(result_pd, result_dd)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_df_groupby_idxmax_test_df_groupby_idxmax.assert_eq_expected_resul": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_df_groupby_idxmax_test_df_groupby_idxmax.assert_eq_expected_resul", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 2007, "end_line": 2020, "span_ids": ["test_df_groupby_idxmax"], "tokens": 147}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_df_groupby_idxmax():\n pdf = pd.DataFrame(\n {\"idx\": list(range(4)), \"group\": [1, 1, 2, 2], \"value\": [10, 20, 20, 10]}\n ).set_index(\"idx\")\n\n ddf = dd.from_pandas(pdf, npartitions=3)\n\n expected = pd.DataFrame({\"group\": [1, 2], \"value\": [1, 2]}).set_index(\"group\")\n\n result_pd = pdf.groupby(\"group\").idxmax()\n result_dd = ddf.groupby(\"group\").idxmax()\n\n assert_eq(result_pd, result_dd)\n assert_eq(expected, result_dd)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_df_groupby_idxmax_skipna_test_df_groupby_idxmax_skipna.assert_eq_result_pd_resu": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_df_groupby_idxmax_skipna_test_df_groupby_idxmax_skipna.assert_eq_result_pd_resu", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 2023, "end_line": 2038, "span_ids": ["test_df_groupby_idxmax_skipna"], "tokens": 147}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"skipna\", [True, False])\ndef test_df_groupby_idxmax_skipna(skipna):\n pdf = pd.DataFrame(\n {\n \"idx\": list(range(4)),\n \"group\": [1, 1, 2, 2],\n \"value\": [np.nan, 20.1, np.nan, 10.1],\n }\n ).set_index(\"idx\")\n\n ddf = dd.from_pandas(pdf, npartitions=3)\n\n result_pd = pdf.groupby(\"group\").idxmax(skipna=skipna)\n result_dd = ddf.groupby(\"group\").idxmax(skipna=skipna)\n\n assert_eq(result_pd, result_dd)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_series_groupby_idxmin_test_series_groupby_idxmin.assert_eq_expected_resul": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_series_groupby_idxmin_test_series_groupby_idxmin.assert_eq_expected_resul", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 2041, "end_line": 2056, "span_ids": ["test_series_groupby_idxmin"], "tokens": 159}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_series_groupby_idxmin():\n pdf = pd.DataFrame(\n {\"idx\": list(range(4)), \"group\": [1, 1, 2, 2], \"value\": [10, 20, 20, 10]}\n ).set_index(\"idx\")\n\n ddf = dd.from_pandas(pdf, npartitions=3)\n\n expected = (\n pd.DataFrame({\"group\": [1, 2], \"value\": [0, 3]}).set_index(\"group\").squeeze()\n )\n\n result_pd = pdf.groupby(\"group\")[\"value\"].idxmin()\n result_dd = ddf.groupby(\"group\")[\"value\"].idxmin()\n\n assert_eq(result_pd, result_dd)\n assert_eq(expected, result_dd)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_series_groupby_idxmin_skipna_test_series_groupby_idxmin_skipna.assert_eq_result_pd_resu": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_series_groupby_idxmin_skipna_test_series_groupby_idxmin_skipna.assert_eq_result_pd_resu", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 2059, "end_line": 2074, "span_ids": ["test_series_groupby_idxmin_skipna"], "tokens": 153}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"skipna\", [True, False])\ndef test_series_groupby_idxmin_skipna(skipna):\n pdf = pd.DataFrame(\n {\n \"idx\": list(range(4)),\n \"group\": [1, 1, 2, 2],\n \"value\": [np.nan, 20.1, np.nan, 10.1],\n }\n ).set_index(\"idx\")\n\n ddf = dd.from_pandas(pdf, npartitions=3)\n\n result_pd = pdf.groupby(\"group\")[\"value\"].idxmin(skipna=skipna)\n result_dd = ddf.groupby(\"group\")[\"value\"].idxmin(skipna=skipna)\n\n assert_eq(result_pd, result_dd)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_series_groupby_idxmax_test_series_groupby_idxmax.assert_eq_expected_resul": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_series_groupby_idxmax_test_series_groupby_idxmax.assert_eq_expected_resul", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 2077, "end_line": 2092, "span_ids": ["test_series_groupby_idxmax"], "tokens": 159}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_series_groupby_idxmax():\n pdf = pd.DataFrame(\n {\"idx\": list(range(4)), \"group\": [1, 1, 2, 2], \"value\": [10, 20, 20, 10]}\n ).set_index(\"idx\")\n\n ddf = dd.from_pandas(pdf, npartitions=3)\n\n expected = (\n pd.DataFrame({\"group\": [1, 2], \"value\": [1, 2]}).set_index(\"group\").squeeze()\n )\n\n result_pd = pdf.groupby(\"group\")[\"value\"].idxmax()\n result_dd = ddf.groupby(\"group\")[\"value\"].idxmax()\n\n assert_eq(result_pd, result_dd)\n assert_eq(expected, result_dd)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_series_groupby_idxmax_skipna_test_series_groupby_idxmax_skipna.assert_eq_result_pd_resu": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_series_groupby_idxmax_skipna_test_series_groupby_idxmax_skipna.assert_eq_result_pd_resu", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 2095, "end_line": 2110, "span_ids": ["test_series_groupby_idxmax_skipna"], "tokens": 153}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"skipna\", [True, False])\ndef test_series_groupby_idxmax_skipna(skipna):\n pdf = pd.DataFrame(\n {\n \"idx\": list(range(4)),\n \"group\": [1, 1, 2, 2],\n \"value\": [np.nan, 20.1, np.nan, 10.1],\n }\n ).set_index(\"idx\")\n\n ddf = dd.from_pandas(pdf, npartitions=3)\n\n result_pd = pdf.groupby(\"group\")[\"value\"].idxmax(skipna=skipna)\n result_dd = ddf.groupby(\"group\")[\"value\"].idxmax(skipna=skipna)\n\n assert_eq(result_pd, result_dd)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_unique_test_groupby_value_counts.assert_eq_dd_gb_pd_gb_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_unique_test_groupby_value_counts.assert_eq_dd_gb_pd_gb_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 2113, "end_line": 2140, "span_ids": ["test_groupby_value_counts", "test_groupby_unique"], "tokens": 260}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n version.parse(pd.__version__) < version.parse(\"0.25.0\"),\n reason=\"'explode' is not implemented\",\n)\ndef test_groupby_unique():\n rng = np.random.RandomState(42)\n df = pd.DataFrame(\n {\"foo\": rng.randint(3, size=100), \"bar\": rng.randint(10, size=100)}\n )\n ddf = dd.from_pandas(df, npartitions=10)\n\n pd_gb = df.groupby(\"foo\")[\"bar\"].unique()\n dd_gb = ddf.groupby(\"foo\")[\"bar\"].unique()\n\n # Use explode because each DataFrame row is a list; equality fails\n assert_eq(dd_gb.explode(), pd_gb.explode())\n\n\ndef test_groupby_value_counts():\n rng = np.random.RandomState(42)\n df = pd.DataFrame(\n {\"foo\": rng.randint(3, size=100), \"bar\": rng.randint(4, size=100)}\n )\n ddf = dd.from_pandas(df, npartitions=2)\n\n pd_gb = df.groupby(\"foo\")[\"bar\"].value_counts()\n dd_gb = ddf.groupby(\"foo\")[\"bar\"].value_counts()\n assert_eq(dd_gb, pd_gb)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_transform_funcs_test_groupby_transform_funcs.with_pytest_warns_UserWar.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_transform_funcs_test_groupby_transform_funcs.with_pytest_warns_UserWar.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 2143, "end_line": 2168, "span_ids": ["test_groupby_transform_funcs"], "tokens": 187}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"transformation\", [lambda x: x.sum(), np.sum, \"sum\", pd.Series.rank]\n)\ndef test_groupby_transform_funcs(transformation):\n pdf = pd.DataFrame(\n {\n \"A\": [1, 2, 3, 4] * 5,\n \"B\": np.random.randn(20),\n \"C\": np.random.randn(20),\n \"D\": np.random.randn(20),\n }\n )\n ddf = dd.from_pandas(pdf, 3)\n\n with pytest.warns(UserWarning):\n # DataFrame\n assert_eq(\n pdf.groupby(\"A\").transform(transformation),\n ddf.groupby(\"A\").transform(transformation),\n )\n\n # Series\n assert_eq(\n pdf.groupby(\"A\")[\"B\"].transform(transformation),\n ddf.groupby(\"A\")[\"B\"].transform(transformation),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_transform_ufunc_partitioning_test_groupby_transform_ufunc_partitioning.with_pytest_warns_UserWar.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_transform_ufunc_partitioning_test_groupby_transform_ufunc_partitioning.with_pytest_warns_UserWar.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 2171, "end_line": 2196, "span_ids": ["test_groupby_transform_ufunc_partitioning"], "tokens": 211}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"npartitions\", list(range(1, 10)))\n@pytest.mark.parametrize(\"indexed\", [True, False])\ndef test_groupby_transform_ufunc_partitioning(npartitions, indexed):\n pdf = pd.DataFrame({\"group\": [1, 2, 3, 4, 5] * 20, \"value\": np.random.randn(100)})\n\n if indexed:\n pdf = pdf.set_index(\"group\")\n\n ddf = dd.from_pandas(pdf, npartitions)\n\n with pytest.warns(UserWarning):\n # DataFrame\n assert_eq(\n pdf.groupby(\"group\").transform(lambda series: series - series.mean()),\n ddf.groupby(\"group\").transform(lambda series: series - series.mean()),\n )\n\n # Series\n assert_eq(\n pdf.groupby(\"group\")[\"value\"].transform(\n lambda series: series - series.mean()\n ),\n ddf.groupby(\"group\")[\"value\"].transform(\n lambda series: series - series.mean()\n ),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_aggregate_categoricals_test_groupby_aggregate_categoricals.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_aggregate_categoricals_test_groupby_aggregate_categoricals.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 2199, "end_line": 2238, "span_ids": ["test_groupby_aggregate_categoricals"], "tokens": 318}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"grouping,agg\",\n [\n (\n lambda df: df.drop(columns=\"category_2\").groupby(\"category_1\"),\n lambda grp: grp.mean(),\n ),\n (\n lambda df: df.drop(columns=\"category_2\").groupby(\"category_1\"),\n lambda grp: grp.agg(\"mean\"),\n ),\n (lambda df: df.groupby([\"category_1\", \"category_2\"]), lambda grp: grp.mean()),\n pytest.param(\n lambda df: df.groupby([\"category_1\", \"category_2\"]),\n lambda grp: grp.agg(\"mean\"),\n marks=pytest.mark.xfail(\n not dask.dataframe.utils.PANDAS_GT_100,\n reason=(\n \"Should work starting from pandas 1.0.0: \"\n \"https://github.com/dask/dask/pull/5423\"\n ),\n ),\n ),\n ],\n)\ndef test_groupby_aggregate_categoricals(grouping, agg):\n pdf = pd.DataFrame(\n {\n \"category_1\": pd.Categorical(list(\"AABBCC\")),\n \"category_2\": pd.Categorical(list(\"ABCABC\")),\n \"value\": np.random.uniform(size=6),\n }\n )\n ddf = dd.from_pandas(pdf, 2)\n\n # DataFrameGroupBy\n assert_eq(agg(grouping(pdf)), agg(grouping(ddf)))\n\n # SeriesGroupBy\n assert_eq(agg(grouping(pdf)[\"value\"]), agg(grouping(ddf)[\"value\"]))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_dropna_pandas_test_groupby_dropna_pandas.assert_eq_dask_result_pd": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_dropna_pandas_test_groupby_dropna_pandas.assert_eq_dask_result_pd", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 2241, "end_line": 2260, "span_ids": ["test_groupby_dropna_pandas"], "tokens": 252}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.xfail(reason=\"dropna kwarg not supported in pandas groupby.\")\n@pytest.mark.parametrize(\"dropna\", [False, True])\ndef test_groupby_dropna_pandas(dropna):\n\n # The `dropna` arg is not currently supported by pandas\n # (See #https://github.com/pandas-dev/pandas/pull/21669)\n # Dask supports the argument for the cudf backend,\n # but passing it to the pandas backend will fail.\n\n # TODO: Expand test when `dropna` is supported in pandas.\n # (See: `test_groupby_dropna_cudf`)\n\n df = pd.DataFrame(\n {\"a\": [1, 2, 3, 4, None, None, 7, 8], \"e\": [4, 5, 6, 3, 2, 1, 0, 0]}\n )\n ddf = dd.from_pandas(df, npartitions=3)\n\n dask_result = ddf.groupby(\"a\", dropna=dropna)\n pd_result = df.groupby(\"a\", dropna=dropna)\n assert_eq(dask_result, pd_result)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_dropna_cudf_test_groupby_dropna_cudf.assert_eq_dask_result_cu": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_dropna_cudf_test_groupby_dropna_cudf.assert_eq_dask_result_cu", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 2263, "end_line": 2295, "span_ids": ["test_groupby_dropna_cudf"], "tokens": 390}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"dropna\", [False, True, None])\n@pytest.mark.parametrize(\"by\", [\"a\", \"c\", \"d\", [\"a\", \"b\"], [\"a\", \"c\"], [\"a\", \"d\"]])\ndef test_groupby_dropna_cudf(dropna, by):\n\n # NOTE: This test requires cudf/dask_cudf, and will\n # be skipped by non-GPU CI\n\n cudf = pytest.importorskip(\"cudf\")\n dask_cudf = pytest.importorskip(\"dask_cudf\")\n\n df = cudf.DataFrame(\n {\n \"a\": [1, 2, 3, 4, None, None, 7, 8],\n \"b\": [1, 0] * 4,\n \"c\": [\"a\", \"b\", None, None, \"e\", \"f\", \"g\", \"h\"],\n \"e\": [4, 5, 6, 3, 2, 1, 0, 0],\n }\n )\n df[\"d\"] = df[\"c\"].astype(\"category\")\n ddf = dask_cudf.from_cudf(df, npartitions=3)\n\n if dropna is None:\n dask_result = ddf.groupby(by).e.sum()\n cudf_result = df.groupby(by).e.sum()\n else:\n dask_result = ddf.groupby(by, dropna=dropna).e.sum()\n cudf_result = df.groupby(by, dropna=dropna).e.sum()\n if by in [\"c\", \"d\"]:\n # Lose string/category index name in cudf...\n dask_result = dask_result.compute()\n dask_result.index.name = cudf_result.index.name\n\n assert_eq(dask_result, cudf_result)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_rounding_negative_var_test_groupby_split_out_multiindex.assert_eq_ddf_result_ddf": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_rounding_negative_var_test_groupby_split_out_multiindex.assert_eq_ddf_result_ddf", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 2298, "end_line": 2330, "span_ids": ["test_rounding_negative_var", "test_groupby_split_out_multiindex"], "tokens": 351}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_rounding_negative_var():\n x = [-0.00179999999 for _ in range(10)]\n ids = [1 for _ in range(5)] + [2 for _ in range(5)]\n\n df = pd.DataFrame({\"ids\": ids, \"x\": x})\n\n ddf = dd.from_pandas(df, npartitions=2)\n assert_eq(ddf.groupby(\"ids\").x.std(), df.groupby(\"ids\").x.std())\n\n\n@pytest.mark.parametrize(\"split_out\", [2, 3])\n@pytest.mark.parametrize(\"column\", [[\"b\", \"c\"], [\"b\", \"d\"], [\"b\", \"e\"]])\ndef test_groupby_split_out_multiindex(split_out, column):\n df = pd.DataFrame(\n {\n \"a\": np.arange(8),\n \"b\": [1, 0, 0, 2, 1, 1, 2, 0],\n \"c\": [0, 1] * 4,\n \"d\": [\"dog\", \"cat\", \"cat\", \"dog\", \"dog\", \"dog\", \"cat\", \"bird\"],\n }\n ).fillna(0)\n df[\"e\"] = df[\"d\"].astype(\"category\")\n ddf = dd.from_pandas(df, npartitions=3)\n\n ddf_result_so1 = (\n ddf.groupby(column).a.mean(split_out=1).compute().sort_values().dropna()\n )\n\n ddf_result = (\n ddf.groupby(column).a.mean(split_out=split_out).compute().sort_values().dropna()\n )\n\n assert_eq(ddf_result, ddf_result_so1, check_index=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_large_ints_exception_test_groupby_large_ints_exception.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_large_ints_exception_test_groupby_large_ints_exception.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 2333, "end_line": 2351, "span_ids": ["test_groupby_large_ints_exception"], "tokens": 214}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"backend\", [\"cudf\", \"pandas\"])\ndef test_groupby_large_ints_exception(backend):\n data_source = pytest.importorskip(backend)\n if backend == \"cudf\":\n dask_cudf = pytest.importorskip(\"dask_cudf\")\n data_frame = dask_cudf.from_cudf\n else:\n data_frame = dd.from_pandas\n max = np.iinfo(np.uint64).max\n sqrt = max ** 0.5\n series = data_source.Series(\n np.concatenate([sqrt * np.arange(5), np.arange(35)])\n ).astype(\"int64\")\n df = data_source.DataFrame({\"x\": series, \"z\": np.arange(40), \"y\": np.arange(40)})\n ddf = data_frame(df, npartitions=1)\n assert_eq(\n df.groupby(\"x\").std(),\n ddf.groupby(\"x\").std().compute(scheduler=\"single-threaded\"),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_sort_argument_test_groupby_sort_argument.if_agg_mean_.else_.assert_eq_result_3_resul": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_sort_argument_test_groupby_sort_argument.if_agg_mean_.else_.assert_eq_result_3_resul", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 2354, "end_line": 2391, "span_ids": ["test_groupby_sort_argument"], "tokens": 404}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"by\", [\"a\", \"b\", \"c\", [\"a\", \"b\"], [\"a\", \"c\"]])\n@pytest.mark.parametrize(\"agg\", [\"count\", \"mean\", \"std\"])\n@pytest.mark.parametrize(\"sort\", [True, False])\ndef test_groupby_sort_argument(by, agg, sort):\n\n df = pd.DataFrame(\n {\n \"a\": [1, 2, 3, 4, None, None, 7, 8],\n \"b\": [1, 0] * 4,\n \"c\": [\"a\", \"b\", None, None, \"e\", \"f\", \"g\", \"h\"],\n \"e\": [4, 5, 6, 3, 2, 1, 0, 0],\n }\n )\n ddf = dd.from_pandas(df, npartitions=3)\n\n gb = ddf.groupby(by, sort=sort)\n gb_pd = df.groupby(by, sort=sort)\n\n # Basic groupby aggregation\n result_1 = getattr(gb, agg)\n result_1_pd = getattr(gb_pd, agg)\n\n # Choose single column\n result_2 = getattr(gb.e, agg)\n result_2_pd = getattr(gb_pd.e, agg)\n\n # Use `agg()` api\n result_3 = gb.agg({\"e\": agg})\n result_3_pd = gb_pd.agg({\"e\": agg})\n\n if agg == \"mean\":\n assert_eq(result_1(), result_1_pd().astype(\"float\"))\n assert_eq(result_2(), result_2_pd().astype(\"float\"))\n assert_eq(result_3, result_3_pd.astype(\"float\"))\n else:\n assert_eq(result_1(), result_1_pd())\n assert_eq(result_2(), result_2_pd())\n assert_eq(result_3, result_3_pd)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_sort_argument_agg_test_groupby_sort_argument_agg.if_sort_.assert_eq_result_index_r": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_sort_argument_agg_test_groupby_sort_argument_agg.if_sort_.assert_eq_result_index_r", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 2395, "end_line": 2408, "span_ids": ["test_groupby_sort_argument_agg"], "tokens": 177}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"agg\", [M.sum, M.prod, M.max, M.min])\n@pytest.mark.parametrize(\"sort\", [True, False])\ndef test_groupby_sort_argument_agg(agg, sort):\n df = pd.DataFrame({\"x\": [4, 2, 1, 2, 3, 1], \"y\": [1, 2, 3, 4, 5, 6]})\n ddf = dd.from_pandas(df, npartitions=3)\n\n result = agg(ddf.groupby(\"x\", sort=sort))\n result_pd = agg(df.groupby(\"x\", sort=sort))\n\n assert_eq(result, result_pd)\n if sort:\n # Check order of index if sort==True\n # (no guarantee that order will match otherwise)\n assert_eq(result.index, result_pd.index)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_sort_true_split_out_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_sort_true_split_out_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 2410, "end_line": 2422, "span_ids": ["test_groupby_sort_true_split_out"], "tokens": 176}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_groupby_sort_true_split_out():\n df = pd.DataFrame({\"x\": [4, 2, 1, 2, 3, 1], \"y\": [1, 2, 3, 4, 5, 6]})\n ddf = dd.from_pandas(df, npartitions=3)\n\n # Works fine for split_out==1 or sort=False/None\n M.sum(ddf.groupby(\"x\", sort=True), split_out=1)\n M.sum(ddf.groupby(\"x\", sort=False), split_out=2)\n M.sum(ddf.groupby(\"x\"), split_out=2)\n\n with pytest.raises(NotImplementedError):\n # Cannot use sort=True with split_out>1 (for now)\n M.sum(ddf.groupby(\"x\", sort=True), split_out=2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_hashing.py_np_test_hash_pandas_object.if_isinstance_a_np_ndarr.else_.assert_eq_a_b_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_hashing.py_np_test_hash_pandas_object.if_isinstance_a_np_ndarr.else_.assert_eq_a_b_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_hashing.py", "file_name": "test_hashing.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 37, "span_ids": ["imports", "test_hash_pandas_object"], "tokens": 298}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import numpy as np\nimport pandas as pd\nimport dask.dataframe as dd\nfrom dask.dataframe import _compat\nfrom dask.dataframe._compat import tm\nfrom pandas.util import hash_pandas_object\n\nimport pytest\n\nfrom dask.dataframe.utils import assert_eq\n\n\n@pytest.mark.parametrize(\n \"obj\",\n [\n pd.Series([1, 2, 3]),\n pd.Series([1.0, 1.5, 3.2]),\n pd.Series([1.0, 1.5, 3.2], index=[1.5, 1.1, 3.3]),\n pd.Series([\"a\", \"b\", \"c\"]),\n pd.Series([True, False, True]),\n pd.Index([1, 2, 3]),\n pd.Index([True, False, True]),\n pd.DataFrame({\"x\": [\"a\", \"b\", \"c\"], \"y\": [1, 2, 3]}),\n _compat.makeMissingDataframe(),\n _compat.makeMixedDataFrame(),\n _compat.makeTimeDataFrame(),\n _compat.makeTimeSeries(),\n _compat.makeTimedeltaIndex(),\n ],\n)\ndef test_hash_pandas_object(obj):\n a = hash_pandas_object(obj)\n b = hash_pandas_object(obj)\n if isinstance(a, np.ndarray):\n np.testing.assert_equal(a, b)\n else:\n assert_eq(a, b)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_hashing.py_test_categorical_consistency_test_categorical_consistency.for_s1_in_.for_categorize_in_True_.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_hashing.py_test_categorical_consistency_test_categorical_consistency.for_s1_in_.for_categorize_in_True_.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_hashing.py", "file_name": "test_hashing.py", "file_type": "text/x-python", "category": "test", "start_line": 40, "end_line": 56, "span_ids": ["test_categorical_consistency"], "tokens": 212}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_categorical_consistency():\n # Check that categoricals hash consistent with their values, not codes\n # This should work for categoricals of any dtype\n for s1 in [\n pd.Series([\"a\", \"b\", \"c\", \"d\"]),\n pd.Series([1000, 2000, 3000, 4000]),\n pd.Series(pd.date_range(0, periods=4)),\n ]:\n s2 = s1.astype(\"category\").cat.set_categories(s1)\n s3 = s2.cat.set_categories(list(reversed(s1)))\n for categorize in [True, False]:\n # These should all hash identically\n h1 = hash_pandas_object(s1, categorize=categorize)\n h2 = hash_pandas_object(s2, categorize=categorize)\n h3 = hash_pandas_object(s3, categorize=categorize)\n tm.assert_series_equal(h1, h2)\n tm.assert_series_equal(h1, h3)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_hashing.py_test_object_missing_values_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_hashing.py_test_object_missing_values_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_hashing.py", "file_name": "test_hashing.py", "file_type": "text/x-python", "category": "test", "start_line": 59, "end_line": 84, "span_ids": ["test_hash_object_dispatch", "test_object_missing_values"], "tokens": 280}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_object_missing_values():\n # Check that the presence of missing values doesn't change how object dtype\n # is hashed.\n s = pd.Series([\"a\", \"b\", \"c\", None])\n h1 = hash_pandas_object(s).iloc[:3]\n h2 = hash_pandas_object(s.iloc[:3])\n tm.assert_series_equal(h1, h2)\n\n\n@pytest.mark.parametrize(\n \"obj\",\n [\n pd.Index([1, 2, 3]),\n pd.Index([True, False, True]),\n pd.Series([1, 2, 3]),\n pd.Series([1.0, 1.5, 3.2]),\n pd.Series([1.0, 1.5, 3.2], index=[1.5, 1.1, 3.3]),\n pd.DataFrame({\"x\": [\"a\", \"b\", \"c\"], \"y\": [1, 2, 3]}),\n pd.DataFrame({\"x\": [\"a\", \"b\", \"c\"], \"y\": [1, 2, 3]}, index=[\"a\", \"z\", \"x\"]),\n ],\n)\ndef test_hash_object_dispatch(obj):\n result = dd.utils.hash_object_dispatch(obj)\n expected = pd.util.hash_pandas_object(obj)\n assert_eq(result, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_hyperloglog.py_dd_test_basic.assert_abs_approx_exact": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_hyperloglog.py_dd_test_basic.assert_abs_approx_exact", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_hyperloglog.py", "file_name": "test_hyperloglog.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 77, "span_ids": ["imports", "test_basic"], "tokens": 679}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import dask.dataframe as dd\n\nimport pandas as pd\nimport numpy as np\nimport pytest\n\n\nrs = np.random.RandomState(96)\n\n\n@pytest.mark.parametrize(\n \"df\",\n [\n pd.DataFrame(\n {\n \"x\": [1, 2, 3] * 3,\n \"y\": [1.2, 3.4, 5.6] * 3,\n \"z\": -(np.arange(9, dtype=np.int8)),\n }\n ),\n pd.DataFrame(\n {\n \"x\": rs.randint(0, 1000000, (10000,)),\n \"y\": rs.randn(10000),\n \"z\": rs.uniform(0, 9999999, (10000,)),\n }\n ),\n pd.DataFrame(\n {\n \"x\": np.repeat(rs.randint(0, 1000000, (1000,)), 3),\n \"y\": np.repeat(rs.randn(1000), 3),\n \"z\": np.repeat(rs.uniform(0, 9999999, (1000,)), 3),\n }\n ),\n pd.DataFrame({\"x\": rs.randint(0, 1000000, (10000,))}),\n pd.DataFrame(\n {\n \"x\": rs.randint(0, 1000000, (7,)),\n \"y\": [\"a\", \"bet\", \"is\", \"a\", \"tax\", \"on\", \"bs\"],\n }\n ),\n pd.DataFrame(\n {\n \"w\": np.zeros((20000,)),\n \"x\": np.zeros((20000,)),\n \"y\": np.zeros((20000,)) + 4803592,\n \"z\": np.zeros((20000,)),\n }\n ),\n pd.DataFrame({\"x\": [1, 2, 3] * 1000}),\n pd.DataFrame({\"x\": np.random.random(1000)}),\n pytest.param(\n pd.DataFrame(\n {\n \"a\": [1, 2, 3] * 3,\n \"b\": [1.2, 3.4, 5.6] * 3,\n \"c\": [1 + 2j, 3 + 4j, 5 + 6j] * 3,\n \"d\": -(np.arange(9, dtype=np.int8)),\n }\n ),\n marks=[\n pytest.mark.filterwarnings(\"ignore::numpy.core.numeric.ComplexWarning\")\n ],\n ),\n pd.Series([1, 2, 3] * 1000),\n pd.Series(np.random.random(1000)),\n pd.Series(np.random.random(1000), index=np.ones(1000)),\n pd.Series(np.random.random(1000), index=np.random.random(1000)),\n ],\n)\n@pytest.mark.parametrize(\"npartitions\", [2, 20])\ndef test_basic(df, npartitions):\n ddf = dd.from_pandas(df, npartitions=npartitions)\n\n approx = ddf.nunique_approx().compute(scheduler=\"sync\")\n exact = len(df.drop_duplicates())\n assert abs(approx - exact) <= 2 or abs(approx - exact) / exact < 0.05", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_hyperloglog.py_test_split_every_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_hyperloglog.py_test_split_every_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_hyperloglog.py", "file_name": "test_hyperloglog.py", "file_type": "text/x-python", "category": "test", "start_line": 80, "end_line": 101, "span_ids": ["test_split_every", "test_larger_data"], "tokens": 208}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"split_every\", [None, 2, 10])\n@pytest.mark.parametrize(\"npartitions\", [2, 20])\ndef test_split_every(split_every, npartitions):\n df = pd.Series([1, 2, 3] * 1000)\n ddf = dd.from_pandas(df, npartitions=npartitions)\n\n approx = ddf.nunique_approx(split_every=split_every).compute(scheduler=\"sync\")\n exact = len(df.drop_duplicates())\n assert abs(approx - exact) <= 2 or abs(approx - exact) / exact < 0.05\n\n\ndef test_larger_data():\n df = dd.demo.make_timeseries(\n \"2000-01-01\",\n \"2000-04-01\",\n {\"value\": float, \"id\": int},\n freq=\"10s\",\n partition_freq=\"1D\",\n seed=1,\n )\n assert df.nunique_approx().compute() > 1000", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_pd_if_dd__compat_PANDAS_GT_1.CHECK_FREQ_check_freq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_pd_if_dd__compat_PANDAS_GT_1.CHECK_FREQ_check_freq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_indexing.py", "file_name": "test_indexing.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 24, "span_ids": ["imports"], "tokens": 287}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pandas as pd\nimport numpy as np\n\nimport pytest\n\nimport dask\nimport dask.dataframe as dd\n\nfrom dask.dataframe._compat import tm, PANDAS_GT_100\nfrom dask.dataframe.indexing import _coerce_loc_index\nfrom dask.dataframe.utils import assert_eq, make_meta, PANDAS_VERSION\n\n\ndsk = {\n (\"x\", 0): pd.DataFrame({\"a\": [1, 2, 3], \"b\": [4, 5, 6]}, index=[0, 1, 3]),\n (\"x\", 1): pd.DataFrame({\"a\": [4, 5, 6], \"b\": [3, 2, 1]}, index=[5, 6, 8]),\n (\"x\", 2): pd.DataFrame({\"a\": [7, 8, 9], \"b\": [0, 0, 0]}, index=[9, 9, 9]),\n}\nmeta = make_meta({\"a\": \"i8\", \"b\": \"i8\"}, index=pd.Index([], \"i8\"))\nd = dd.DataFrame(dsk, \"x\", meta, [0, 5, 9, 9])\nfull = d.compute()\nCHECK_FREQ = {}\nif dd._compat.PANDAS_GT_110:\n CHECK_FREQ[\"check_freq\"] = False", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc_test_loc.None_4": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc_test_loc.None_4", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_indexing.py", "file_name": "test_indexing.py", "file_type": "text/x-python", "category": "test", "start_line": 27, "end_line": 73, "span_ids": ["test_loc"], "tokens": 634}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_loc():\n assert d.loc[3:8].divisions[0] == 3\n assert d.loc[3:8].divisions[-1] == 8\n\n assert d.loc[5].divisions == (5, 5)\n\n assert_eq(d.loc[5], full.loc[5:5])\n assert_eq(d.loc[3:8], full.loc[3:8])\n assert_eq(d.loc[:8], full.loc[:8])\n assert_eq(d.loc[3:], full.loc[3:])\n assert_eq(d.loc[[5]], full.loc[[5]])\n\n expected_warning = FutureWarning\n\n if not PANDAS_GT_100:\n # removed in pandas 1.0\n with pytest.warns(expected_warning):\n assert_eq(d.loc[[3, 4, 1, 8]], full.loc[[3, 4, 1, 8]])\n with pytest.warns(expected_warning):\n assert_eq(d.loc[[3, 4, 1, 9]], full.loc[[3, 4, 1, 9]])\n with pytest.warns(expected_warning):\n assert_eq(d.loc[np.array([3, 4, 1, 9])], full.loc[np.array([3, 4, 1, 9])])\n\n assert_eq(d.a.loc[5], full.a.loc[5:5])\n assert_eq(d.a.loc[3:8], full.a.loc[3:8])\n assert_eq(d.a.loc[:8], full.a.loc[:8])\n assert_eq(d.a.loc[3:], full.a.loc[3:])\n assert_eq(d.a.loc[[5]], full.a.loc[[5]])\n if not PANDAS_GT_100:\n # removed in pandas 1.0\n with pytest.warns(expected_warning):\n assert_eq(d.a.loc[[3, 4, 1, 8]], full.a.loc[[3, 4, 1, 8]])\n with pytest.warns(expected_warning):\n assert_eq(d.a.loc[[3, 4, 1, 9]], full.a.loc[[3, 4, 1, 9]])\n with pytest.warns(expected_warning):\n assert_eq(\n d.a.loc[np.array([3, 4, 1, 9])], full.a.loc[np.array([3, 4, 1, 9])]\n )\n assert_eq(d.a.loc[[]], full.a.loc[[]])\n assert_eq(d.a.loc[np.array([])], full.a.loc[np.array([])])\n\n pytest.raises(KeyError, lambda: d.loc[1000])\n assert_eq(d.loc[1000:], full.loc[1000:])\n assert_eq(d.loc[-2000:-1000], full.loc[-2000:-1000])\n\n assert sorted(d.loc[5].dask) == sorted(d.loc[5].dask)\n assert sorted(d.loc[5].dask) != sorted(d.loc[6].dask)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc_non_informative_index_test_loc_non_informative_index.assert_eq_ddf_loc_20_df": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc_non_informative_index_test_loc_non_informative_index.assert_eq_ddf_loc_20_df", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_indexing.py", "file_name": "test_indexing.py", "file_type": "text/x-python", "category": "test", "start_line": 76, "end_line": 88, "span_ids": ["test_loc_non_informative_index"], "tokens": 185}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_loc_non_informative_index():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4]}, index=[10, 20, 30, 40])\n ddf = dd.from_pandas(df, npartitions=2, sort=True)\n ddf.divisions = (None,) * 3\n assert not ddf.known_divisions\n\n ddf.loc[20:30].compute(scheduler=\"sync\")\n\n assert_eq(ddf.loc[20:30], df.loc[20:30])\n\n df = pd.DataFrame({\"x\": [1, 2, 3, 4]}, index=[10, 20, 20, 40])\n ddf = dd.from_pandas(df, npartitions=2, sort=True)\n assert_eq(ddf.loc[20], df.loc[20:20])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc_with_text_dates_test_loc_with_text_dates.assert_len_s_loc_2000_01": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc_with_text_dates_test_loc_with_text_dates.assert_len_s_loc_2000_01", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_indexing.py", "file_name": "test_indexing.py", "file_type": "text/x-python", "category": "test", "start_line": 91, "end_line": 103, "span_ids": ["test_loc_with_text_dates"], "tokens": 139}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_loc_with_text_dates():\n A = dd._compat.makeTimeSeries().iloc[:5]\n B = dd._compat.makeTimeSeries().iloc[5:]\n s = dd.Series(\n {(\"df\", 0): A, (\"df\", 1): B},\n \"df\",\n A,\n [A.index.min(), B.index.min(), B.index.max()],\n )\n\n assert s.loc[\"2000\":\"2010\"].divisions == s.divisions\n assert_eq(s.loc[\"2000\":\"2010\"], s)\n assert len(s.loc[\"2000-01-03\":\"2000-01-05\"].compute()) == 3", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc_with_series_test_loc_with_function.assert_eq_d_loc__col_l": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc_with_series_test_loc_with_function.assert_eq_d_loc__col_l", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_indexing.py", "file_name": "test_indexing.py", "file_type": "text/x-python", "category": "test", "start_line": 106, "end_line": 126, "span_ids": ["test_loc_with_series", "test_loc_with_array", "test_loc_with_function"], "tokens": 267}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_loc_with_series():\n assert_eq(d.loc[d.a % 2 == 0], full.loc[full.a % 2 == 0])\n\n assert sorted(d.loc[d.a % 2].dask) == sorted(d.loc[d.a % 2].dask)\n assert sorted(d.loc[d.a % 2].dask) != sorted(d.loc[d.a % 3].dask)\n\n\ndef test_loc_with_array():\n assert_eq(d.loc[(d.a % 2 == 0).values], full.loc[(full.a % 2 == 0).values])\n\n assert sorted(d.loc[(d.a % 2).values].dask) == sorted(d.loc[(d.a % 2).values].dask)\n assert sorted(d.loc[(d.a % 2).values].dask) != sorted(d.loc[(d.a % 3).values].dask)\n\n\ndef test_loc_with_function():\n assert_eq(d.loc[lambda df: df[\"a\"] > 3, :], full.loc[lambda df: df[\"a\"] > 3, :])\n\n def _col_loc_fun(_df):\n return _df.columns.str.contains(\"b\")\n\n assert_eq(d.loc[:, _col_loc_fun], full.loc[:, _col_loc_fun])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc_with_array_different_partition_test_loc_with_array_different_partition.with_pytest_raises_ValueE.ddf_loc_ddf_A_0_repar": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc_with_array_different_partition_test_loc_with_array_different_partition.with_pytest_raises_ValueE.ddf_loc_ddf_A_0_repar", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_indexing.py", "file_name": "test_indexing.py", "file_type": "text/x-python", "category": "test", "start_line": 129, "end_line": 139, "span_ids": ["test_loc_with_array_different_partition"], "tokens": 114}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_loc_with_array_different_partition():\n df = pd.DataFrame(\n np.random.randn(20, 5),\n index=list(\"abcdefghijklmnopqrst\"),\n columns=list(\"ABCDE\"),\n )\n ddf = dd.from_pandas(df, 3)\n\n assert_eq(ddf.loc[(ddf.A > 0).values], df.loc[(df.A > 0).values])\n with pytest.raises(ValueError):\n ddf.loc[(ddf.A > 0).repartition([\"a\", \"g\", \"k\", \"o\", \"t\"]).values]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc_with_series_different_partition_test_loc_with_series_different_partition.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc_with_series_different_partition_test_loc_with_series_different_partition.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_indexing.py", "file_name": "test_indexing.py", "file_type": "text/x-python", "category": "test", "start_line": 142, "end_line": 153, "span_ids": ["test_loc_with_series_different_partition"], "tokens": 116}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_loc_with_series_different_partition():\n df = pd.DataFrame(\n np.random.randn(20, 5),\n index=list(\"abcdefghijklmnopqrst\"),\n columns=list(\"ABCDE\"),\n )\n ddf = dd.from_pandas(df, 3)\n\n assert_eq(ddf.loc[ddf.A > 0], df.loc[df.A > 0])\n assert_eq(\n ddf.loc[(ddf.A > 0).repartition([\"a\", \"g\", \"k\", \"o\", \"t\"])], df.loc[df.A > 0]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc2d_test_loc2d.None_3.d_a_loc_d_a_2_0_3_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc2d_test_loc2d.None_3.d_a_loc_d_a_2_0_3_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_indexing.py", "file_name": "test_indexing.py", "file_type": "text/x-python", "category": "test", "start_line": 156, "end_line": 184, "span_ids": ["test_loc2d"], "tokens": 359}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_loc2d():\n # index indexer is always regarded as slice for duplicated values\n assert_eq(d.loc[5, \"a\"], full.loc[5:5, \"a\"])\n # assert_eq(d.loc[[5], 'a'], full.loc[[5], 'a'])\n assert_eq(d.loc[5, [\"a\"]], full.loc[5:5, [\"a\"]])\n # assert_eq(d.loc[[5], ['a']], full.loc[[5], ['a']])\n\n assert_eq(d.loc[3:8, \"a\"], full.loc[3:8, \"a\"])\n assert_eq(d.loc[:8, \"a\"], full.loc[:8, \"a\"])\n assert_eq(d.loc[3:, \"a\"], full.loc[3:, \"a\"])\n assert_eq(d.loc[[8], \"a\"], full.loc[[8], \"a\"])\n\n assert_eq(d.loc[3:8, [\"a\"]], full.loc[3:8, [\"a\"]])\n assert_eq(d.loc[:8, [\"a\"]], full.loc[:8, [\"a\"]])\n assert_eq(d.loc[3:, [\"a\"]], full.loc[3:, [\"a\"]])\n\n # 3d\n with pytest.raises(pd.core.indexing.IndexingError):\n d.loc[3, 3, 3]\n\n # Series should raise\n with pytest.raises(pd.core.indexing.IndexingError):\n d.a.loc[3, 3]\n\n with pytest.raises(pd.core.indexing.IndexingError):\n d.a.loc[3:, 3]\n\n with pytest.raises(pd.core.indexing.IndexingError):\n d.a.loc[d.a % 2 == 0, 3]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc2d_some_missing_test_loc2d_with_known_divisions.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc2d_some_missing_test_loc2d_with_known_divisions.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_indexing.py", "file_name": "test_indexing.py", "file_type": "text/x-python", "category": "test", "start_line": 187, "end_line": 211, "span_ids": ["test_loc2d_some_missing", "test_loc2d_with_known_divisions"], "tokens": 359}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skip(PANDAS_GT_100, reason=\"Removed in pandas 1.0\")\ndef test_loc2d_some_missing():\n with pytest.warns(FutureWarning):\n assert_eq(d.loc[[3, 4, 3], [\"a\"]], full.loc[[3, 4, 3], [\"a\"]])\n\n\ndef test_loc2d_with_known_divisions():\n df = pd.DataFrame(\n np.random.randn(20, 5),\n index=list(\"abcdefghijklmnopqrst\"),\n columns=list(\"ABCDE\"),\n )\n ddf = dd.from_pandas(df, 3)\n\n assert_eq(ddf.loc[\"a\", \"A\"], df.loc[[\"a\"], \"A\"])\n assert_eq(ddf.loc[\"a\", [\"A\"]], df.loc[[\"a\"], [\"A\"]])\n assert_eq(ddf.loc[\"a\":\"o\", \"A\"], df.loc[\"a\":\"o\", \"A\"])\n assert_eq(ddf.loc[\"a\":\"o\", [\"A\"]], df.loc[\"a\":\"o\", [\"A\"]])\n assert_eq(ddf.loc[[\"n\"], [\"A\"]], df.loc[[\"n\"], [\"A\"]])\n assert_eq(ddf.loc[[\"a\", \"c\", \"n\"], [\"A\"]], df.loc[[\"a\", \"c\", \"n\"], [\"A\"]])\n assert_eq(ddf.loc[[\"t\", \"b\"], [\"A\"]], df.loc[[\"t\", \"b\"], [\"A\"]])\n assert_eq(\n ddf.loc[[\"r\", \"r\", \"c\", \"g\", \"h\"], [\"A\"]],\n df.loc[[\"r\", \"r\", \"c\", \"g\", \"h\"], [\"A\"]],\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc2d_with_unknown_divisions_test_loc2d_with_unknown_divisions.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc2d_with_unknown_divisions_test_loc2d_with_unknown_divisions.None_3", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_indexing.py", "file_name": "test_indexing.py", "file_type": "text/x-python", "category": "test", "start_line": 214, "end_line": 228, "span_ids": ["test_loc2d_with_unknown_divisions"], "tokens": 175}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_loc2d_with_unknown_divisions():\n df = pd.DataFrame(\n np.random.randn(20, 5),\n index=list(\"abcdefghijklmnopqrst\"),\n columns=list(\"ABCDE\"),\n )\n ddf = dd.from_pandas(df, 3)\n\n ddf.divisions = (None,) * len(ddf.divisions)\n assert ddf.known_divisions is False\n\n assert_eq(ddf.loc[\"a\", \"A\"], df.loc[[\"a\"], \"A\"])\n assert_eq(ddf.loc[\"a\", [\"A\"]], df.loc[[\"a\"], [\"A\"]])\n assert_eq(ddf.loc[\"a\":\"o\", \"A\"], df.loc[\"a\":\"o\", \"A\"])\n assert_eq(ddf.loc[\"a\":\"o\", [\"A\"]], df.loc[\"a\":\"o\", [\"A\"]])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc2d_duplicated_columns_test_loc2d_duplicated_columns.None_13": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc2d_duplicated_columns_test_loc2d_duplicated_columns.None_13", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_indexing.py", "file_name": "test_indexing.py", "file_type": "text/x-python", "category": "test", "start_line": 231, "end_line": 255, "span_ids": ["test_loc2d_duplicated_columns"], "tokens": 417}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_loc2d_duplicated_columns():\n df = pd.DataFrame(\n np.random.randn(20, 5),\n index=list(\"abcdefghijklmnopqrst\"),\n columns=list(\"AABCD\"),\n )\n ddf = dd.from_pandas(df, 3)\n\n assert_eq(ddf.loc[\"a\", \"A\"], df.loc[[\"a\"], \"A\"])\n assert_eq(ddf.loc[\"a\", [\"A\"]], df.loc[[\"a\"], [\"A\"]])\n assert_eq(ddf.loc[\"j\", \"B\"], df.loc[[\"j\"], \"B\"])\n assert_eq(ddf.loc[\"j\", [\"B\"]], df.loc[[\"j\"], [\"B\"]])\n\n assert_eq(ddf.loc[\"a\":\"o\", \"A\"], df.loc[\"a\":\"o\", \"A\"])\n assert_eq(ddf.loc[\"a\":\"o\", [\"A\"]], df.loc[\"a\":\"o\", [\"A\"]])\n assert_eq(ddf.loc[\"j\":\"q\", \"B\"], df.loc[\"j\":\"q\", \"B\"])\n assert_eq(ddf.loc[\"j\":\"q\", [\"B\"]], df.loc[\"j\":\"q\", [\"B\"]])\n\n assert_eq(ddf.loc[\"a\":\"o\", \"B\":\"D\"], df.loc[\"a\":\"o\", \"B\":\"D\"])\n assert_eq(ddf.loc[\"a\":\"o\", \"B\":\"D\"], df.loc[\"a\":\"o\", \"B\":\"D\"])\n assert_eq(ddf.loc[\"j\":\"q\", \"B\":\"A\"], df.loc[\"j\":\"q\", \"B\":\"A\"])\n assert_eq(ddf.loc[\"j\":\"q\", \"B\":\"A\"], df.loc[\"j\":\"q\", \"B\":\"A\"])\n\n assert_eq(ddf.loc[ddf.B > 0, \"B\"], df.loc[df.B > 0, \"B\"])\n assert_eq(ddf.loc[ddf.B > 0, [\"A\", \"C\"]], df.loc[df.B > 0, [\"A\", \"C\"]])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_getitem_test_getitem.None_13": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_getitem_test_getitem.None_13", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_indexing.py", "file_name": "test_indexing.py", "file_type": "text/x-python", "category": "test", "start_line": 258, "end_line": 291, "span_ids": ["test_getitem"], "tokens": 390}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_getitem():\n df = pd.DataFrame(\n {\n \"A\": [1, 2, 3, 4, 5, 6, 7, 8, 9],\n \"B\": [9, 8, 7, 6, 5, 4, 3, 2, 1],\n \"C\": [True, False, True] * 3,\n },\n columns=list(\"ABC\"),\n )\n ddf = dd.from_pandas(df, 2)\n assert_eq(ddf[\"A\"], df[\"A\"])\n # check cache consistency\n tm.assert_series_equal(ddf[\"A\"]._meta, ddf._meta[\"A\"])\n\n assert_eq(ddf[[\"A\", \"B\"]], df[[\"A\", \"B\"]])\n tm.assert_frame_equal(ddf[[\"A\", \"B\"]]._meta, ddf._meta[[\"A\", \"B\"]])\n\n assert_eq(ddf[ddf.C], df[df.C])\n tm.assert_series_equal(ddf.C._meta, ddf._meta.C)\n\n assert_eq(ddf[ddf.C.repartition([0, 2, 5, 8])], df[df.C])\n\n pytest.raises(KeyError, lambda: df[\"X\"])\n pytest.raises(KeyError, lambda: df[[\"A\", \"X\"]])\n pytest.raises(AttributeError, lambda: df.X)\n\n # not str/unicode\n df = pd.DataFrame(np.random.randn(10, 5))\n ddf = dd.from_pandas(df, 2)\n assert_eq(ddf[0], df[0])\n assert_eq(ddf[[1, 2]], df[[1, 2]])\n\n pytest.raises(KeyError, lambda: df[8])\n pytest.raises(KeyError, lambda: df[[1, 8]])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_getitem_slice_test_getitem_slice.assert_eq_ddf_f_df_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_getitem_slice_test_getitem_slice.assert_eq_ddf_f_df_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_indexing.py", "file_name": "test_indexing.py", "file_type": "text/x-python", "category": "test", "start_line": 294, "end_line": 306, "span_ids": ["test_getitem_slice"], "tokens": 161}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_getitem_slice():\n df = pd.DataFrame(\n {\n \"A\": [1, 2, 3, 4, 5, 6, 7, 8, 9],\n \"B\": [9, 8, 7, 6, 5, 4, 3, 2, 1],\n \"C\": [True, False, True] * 3,\n },\n index=list(\"abcdefghi\"),\n )\n ddf = dd.from_pandas(df, 3)\n assert_eq(ddf[\"a\":\"e\"], df[\"a\":\"e\"])\n assert_eq(ddf[\"a\":\"b\"], df[\"a\":\"b\"])\n assert_eq(ddf[\"f\":], df[\"f\":])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_getitem_integer_slice_test_getitem_integer_slice.assert_eq_ddf_8_df_8_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_getitem_integer_slice_test_getitem_integer_slice.assert_eq_ddf_8_df_8_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_indexing.py", "file_name": "test_indexing.py", "file_type": "text/x-python", "category": "test", "start_line": 309, "end_line": 321, "span_ids": ["test_getitem_integer_slice"], "tokens": 162}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_getitem_integer_slice():\n df = pd.DataFrame({\"A\": range(6)})\n ddf = dd.from_pandas(df, 2)\n # integer slicing is iloc based\n with pytest.raises(NotImplementedError):\n ddf[1:3]\n\n df = pd.DataFrame({\"A\": range(6)}, index=[1.0, 2.0, 3.0, 5.0, 10.0, 11.0])\n ddf = dd.from_pandas(df, 2)\n # except for float dtype indexes\n assert_eq(ddf[2:8], df[2:8])\n assert_eq(ddf[2:], df[2:])\n assert_eq(ddf[:8], df[:8])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc_on_numpy_datetimes_test_loc_on_pandas_datetimes.assert_eq_a_loc_2014_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc_on_numpy_datetimes_test_loc_on_pandas_datetimes.assert_eq_a_loc_2014_2", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_indexing.py", "file_name": "test_indexing.py", "file_type": "text/x-python", "category": "test", "start_line": 324, "end_line": 341, "span_ids": ["test_loc_on_numpy_datetimes", "test_loc_on_pandas_datetimes"], "tokens": 192}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_loc_on_numpy_datetimes():\n df = pd.DataFrame(\n {\"x\": [1, 2, 3]}, index=list(map(np.datetime64, [\"2014\", \"2015\", \"2016\"]))\n )\n a = dd.from_pandas(df, 2)\n a.divisions = list(map(np.datetime64, a.divisions))\n\n assert_eq(a.loc[\"2014\":\"2015\"], a.loc[\"2014\":\"2015\"])\n\n\ndef test_loc_on_pandas_datetimes():\n df = pd.DataFrame(\n {\"x\": [1, 2, 3]}, index=list(map(pd.Timestamp, [\"2014\", \"2015\", \"2016\"]))\n )\n a = dd.from_pandas(df, 2)\n a.divisions = list(map(pd.Timestamp, a.divisions))\n\n assert_eq(a.loc[\"2014\":\"2015\"], a.loc[\"2014\":\"2015\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc_datetime_no_freq_test_coerce_loc_index.for_t_in_pd_Timestamp_n.assert_isinstance__coerce": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc_datetime_no_freq_test_coerce_loc_index.for_t_in_pd_Timestamp_n.assert_isinstance__coerce", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_indexing.py", "file_name": "test_indexing.py", "file_type": "text/x-python", "category": "test", "start_line": 344, "end_line": 360, "span_ids": ["test_coerce_loc_index", "test_loc_datetime_no_freq"], "tokens": 184}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_loc_datetime_no_freq():\n # https://github.com/dask/dask/issues/2389\n\n datetime_index = pd.date_range(\"2016-01-01\", \"2016-01-31\", freq=\"12h\")\n datetime_index.freq = None # FORGET FREQUENCY\n df = pd.DataFrame({\"num\": range(len(datetime_index))}, index=datetime_index)\n\n ddf = dd.from_pandas(df, npartitions=1)\n slice_ = slice(\"2016-01-03\", \"2016-01-05\")\n result = ddf.loc[slice_, :]\n expected = df.loc[slice_, :]\n assert_eq(result, expected)\n\n\ndef test_coerce_loc_index():\n for t in [pd.Timestamp, np.datetime64]:\n assert isinstance(_coerce_loc_index([t(\"2014\")], \"2014\"), t)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc_timestamp_str_test_loc_timestamp_str.None_15": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc_timestamp_str_test_loc_timestamp_str.None_15", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_indexing.py", "file_name": "test_indexing.py", "file_type": "text/x-python", "category": "test", "start_line": 363, "end_line": 422, "span_ids": ["test_loc_timestamp_str"], "tokens": 707}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_loc_timestamp_str():\n\n df = pd.DataFrame(\n {\"A\": np.random.randn(100), \"B\": np.random.randn(100)},\n index=pd.date_range(\"2011-01-01\", freq=\"H\", periods=100),\n )\n ddf = dd.from_pandas(df, 10)\n\n # partial string slice\n assert_eq(df.loc[\"2011-01-02\"], ddf.loc[\"2011-01-02\"])\n assert_eq(df.loc[\"2011-01-02\":\"2011-01-10\"], ddf.loc[\"2011-01-02\":\"2011-01-10\"])\n # same reso, dask result is always DataFrame\n assert_eq(\n df.loc[\"2011-01-02 10:00\"].to_frame().T,\n ddf.loc[\"2011-01-02 10:00\"],\n **CHECK_FREQ\n )\n\n # series\n assert_eq(df.A.loc[\"2011-01-02\"], ddf.A.loc[\"2011-01-02\"], **CHECK_FREQ)\n assert_eq(\n df.A.loc[\"2011-01-02\":\"2011-01-10\"],\n ddf.A.loc[\"2011-01-02\":\"2011-01-10\"],\n **CHECK_FREQ\n )\n\n # slice with timestamp (dask result must be DataFrame)\n assert_eq(\n df.loc[pd.Timestamp(\"2011-01-02\")].to_frame().T,\n ddf.loc[pd.Timestamp(\"2011-01-02\")],\n **CHECK_FREQ\n )\n assert_eq(\n df.loc[pd.Timestamp(\"2011-01-02\") : pd.Timestamp(\"2011-01-10\")],\n ddf.loc[pd.Timestamp(\"2011-01-02\") : pd.Timestamp(\"2011-01-10\")],\n **CHECK_FREQ\n )\n assert_eq(\n df.loc[pd.Timestamp(\"2011-01-02 10:00\")].to_frame().T,\n ddf.loc[pd.Timestamp(\"2011-01-02 10:00\")],\n **CHECK_FREQ\n )\n\n df = pd.DataFrame(\n {\"A\": np.random.randn(100), \"B\": np.random.randn(100)},\n index=pd.date_range(\"2011-01-01\", freq=\"M\", periods=100),\n )\n ddf = dd.from_pandas(df, 50)\n assert_eq(df.loc[\"2011-01\"], ddf.loc[\"2011-01\"])\n assert_eq(df.loc[\"2011\"], ddf.loc[\"2011\"])\n\n assert_eq(df.loc[\"2011-01\":\"2012-05\"], ddf.loc[\"2011-01\":\"2012-05\"])\n assert_eq(df.loc[\"2011\":\"2015\"], ddf.loc[\"2011\":\"2015\"])\n\n # series\n assert_eq(df.B.loc[\"2011-01\"], ddf.B.loc[\"2011-01\"])\n assert_eq(df.B.loc[\"2011\"], ddf.B.loc[\"2011\"])\n\n assert_eq(df.B.loc[\"2011-01\":\"2012-05\"], ddf.B.loc[\"2011-01\":\"2012-05\"])\n assert_eq(df.B.loc[\"2011\":\"2015\"], ddf.B.loc[\"2011\":\"2015\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_getitem_timestamp_str_test_loc_period_str.pass": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_getitem_timestamp_str_test_loc_period_str.pass", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_indexing.py", "file_name": "test_indexing.py", "file_type": "text/x-python", "category": "test", "start_line": 425, "end_line": 452, "span_ids": ["test_getitem_timestamp_str", "test_loc_period_str"], "tokens": 310}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_getitem_timestamp_str():\n\n df = pd.DataFrame(\n {\"A\": np.random.randn(100), \"B\": np.random.randn(100)},\n index=pd.date_range(\"2011-01-01\", freq=\"H\", periods=100),\n )\n ddf = dd.from_pandas(df, 10)\n\n # partial string slice\n assert_eq(df[\"2011-01-02\"], ddf[\"2011-01-02\"])\n assert_eq(df[\"2011-01-02\":\"2011-01-10\"], df[\"2011-01-02\":\"2011-01-10\"])\n\n df = pd.DataFrame(\n {\"A\": np.random.randn(100), \"B\": np.random.randn(100)},\n index=pd.date_range(\"2011-01-01\", freq=\"D\", periods=100),\n )\n ddf = dd.from_pandas(df, 50)\n assert_eq(df[\"2011-01\"], ddf[\"2011-01\"])\n assert_eq(df[\"2011\"], ddf[\"2011\"])\n\n assert_eq(df[\"2011-01\":\"2012-05\"], ddf[\"2011-01\":\"2012-05\"])\n assert_eq(df[\"2011\":\"2015\"], ddf[\"2011\":\"2015\"])\n\n\ndef test_loc_period_str():\n # .loc with PeriodIndex doesn't support partial string indexing\n # https://github.com/pydata/pandas/issues/13429\n pass", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_getitem_period_str_test_getitem_period_str.assert_eq_df_2011_2015": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_getitem_period_str_test_getitem_period_str.assert_eq_df_2011_2015", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_indexing.py", "file_name": "test_indexing.py", "file_type": "text/x-python", "category": "test", "start_line": 455, "end_line": 477, "span_ids": ["test_getitem_period_str"], "tokens": 285}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_getitem_period_str():\n\n df = pd.DataFrame(\n {\"A\": np.random.randn(100), \"B\": np.random.randn(100)},\n index=pd.period_range(\"2011-01-01\", freq=\"H\", periods=100),\n )\n ddf = dd.from_pandas(df, 10)\n\n # partial string slice\n assert_eq(df[\"2011-01-02\"], ddf[\"2011-01-02\"])\n assert_eq(df[\"2011-01-02\":\"2011-01-10\"], df[\"2011-01-02\":\"2011-01-10\"])\n # same reso, dask result is always DataFrame\n\n df = pd.DataFrame(\n {\"A\": np.random.randn(100), \"B\": np.random.randn(100)},\n index=pd.period_range(\"2011-01-01\", freq=\"D\", periods=100),\n )\n ddf = dd.from_pandas(df, 50)\n assert_eq(df[\"2011-01\"], ddf[\"2011-01\"])\n assert_eq(df[\"2011\"], ddf[\"2011\"])\n\n assert_eq(df[\"2011-01\":\"2012-05\"], ddf[\"2011-01\":\"2012-05\"])\n assert_eq(df[\"2011\":\"2015\"], ddf[\"2011\":\"2015\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_to_series_test_to_series.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_to_series_test_to_series.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_indexing.py", "file_name": "test_indexing.py", "file_type": "text/x-python", "category": "test", "start_line": 480, "end_line": 495, "span_ids": ["test_to_series"], "tokens": 132}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_series():\n\n # Test for time index\n df = pd.DataFrame(\n {\"A\": np.random.randn(100)},\n index=pd.date_range(\"2011-01-01\", freq=\"H\", periods=100),\n )\n ddf = dd.from_pandas(df, 10)\n\n assert_eq(df.index.to_series(), ddf.index.to_series())\n\n # Test for numerical index\n df = pd.DataFrame({\"A\": np.random.randn(100)}, index=range(100))\n ddf = dd.from_pandas(df, 10)\n\n assert_eq(df.index.to_series(), ddf.index.to_series())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_to_frame_test_to_frame.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_to_frame_test_to_frame.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_indexing.py", "file_name": "test_indexing.py", "file_type": "text/x-python", "category": "test", "start_line": 498, "end_line": 513, "span_ids": ["test_to_frame"], "tokens": 132}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_frame():\n\n # Test for time index\n df = pd.DataFrame(\n {\"A\": np.random.randn(100)},\n index=pd.date_range(\"2011-01-01\", freq=\"H\", periods=100),\n )\n ddf = dd.from_pandas(df, 10)\n\n assert_eq(df.index.to_frame(), ddf.index.to_frame())\n\n # Test for numerical index\n df = pd.DataFrame({\"A\": np.random.randn(100)}, index=range(100))\n ddf = dd.from_pandas(df, 10)\n\n assert_eq(df.index.to_frame(), ddf.index.to_frame())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_to_frame_name_test_to_frame_name.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_to_frame_name_test_to_frame_name.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_indexing.py", "file_name": "test_indexing.py", "file_type": "text/x-python", "category": "test", "start_line": 516, "end_line": 531, "span_ids": ["test_to_frame_name"], "tokens": 168}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(PANDAS_VERSION < \"0.24.0\", reason=\"No renaming for index\")\ndef test_to_frame_name():\n # Test for time index\n df = pd.DataFrame(\n {\"A\": np.random.randn(100)},\n index=pd.date_range(\"2011-01-01\", freq=\"H\", periods=100),\n )\n ddf = dd.from_pandas(df, 10)\n\n assert_eq(df.index.to_frame(name=\"foo\"), ddf.index.to_frame(name=\"foo\"))\n\n # Test for numerical index\n df = pd.DataFrame({\"A\": np.random.randn(100)}, index=range(100))\n ddf = dd.from_pandas(df, 10)\n\n assert_eq(df.index.to_frame(name=\"bar\"), ddf.index.to_frame(name=\"bar\"))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_iloc_test_iloc_series.with_pytest_raises_Attrib.ds_iloc_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_iloc_test_iloc_series.with_pytest_raises_Attrib.ds_iloc_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_indexing.py", "file_name": "test_indexing.py", "file_type": "text/x-python", "category": "test", "start_line": 534, "end_line": 549, "span_ids": ["test_iloc_series", "test_iloc"], "tokens": 151}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"indexer\", [0, [0], [0, 1], [1, 0], [False, True, True]])\ndef test_iloc(indexer):\n df = pd.DataFrame({\"A\": [1, 2], \"B\": [3, 4], \"C\": [5, 6]})\n ddf = dd.from_pandas(df, 2)\n\n result = ddf.iloc[:, indexer]\n expected = df.iloc[:, indexer]\n\n assert_eq(result, expected)\n\n\ndef test_iloc_series():\n s = pd.Series([1, 2, 3])\n ds = dd.from_pandas(s, 2)\n with pytest.raises(AttributeError):\n ds.iloc[:]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_iloc_raises_test_iloc_raises.with_pytest_raises_IndexE.ddf_iloc_5_6_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_iloc_raises_test_iloc_raises.with_pytest_raises_IndexE.ddf_iloc_5_6_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_indexing.py", "file_name": "test_indexing.py", "file_type": "text/x-python", "category": "test", "start_line": 552, "end_line": 566, "span_ids": ["test_iloc_raises"], "tokens": 143}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_iloc_raises():\n df = pd.DataFrame({\"A\": [1, 2], \"B\": [3, 4], \"C\": [5, 6]})\n ddf = dd.from_pandas(df, 2)\n\n with pytest.raises(NotImplementedError):\n ddf.iloc[[0, 1], :]\n\n with pytest.raises(NotImplementedError):\n ddf.iloc[[0, 1], [0, 1]]\n\n with pytest.raises(ValueError):\n ddf.iloc[[0, 1], [0, 1], [1, 2]]\n\n with pytest.raises(IndexError):\n ddf.iloc[:, [5, 6]]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_iloc_duplicate_columns_test_iloc_duplicate_columns.assert_eq_select_negative": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_iloc_duplicate_columns_test_iloc_duplicate_columns.assert_eq_select_negative", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_indexing.py", "file_name": "test_indexing.py", "file_type": "text/x-python", "category": "test", "start_line": 569, "end_line": 589, "span_ids": ["test_iloc_duplicate_columns"], "tokens": 232}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_iloc_duplicate_columns():\n df = pd.DataFrame({\"A\": [1, 2], \"B\": [3, 4], \"C\": [5, 6]})\n ddf = dd.from_pandas(df, 2)\n df.columns = [\"A\", \"A\", \"C\"]\n ddf.columns = [\"A\", \"A\", \"C\"]\n\n selection = ddf.iloc[:, 2]\n # Check that `iloc` is called instead of getitem\n assert any([key.startswith(\"iloc\") for key in selection.dask.layers.keys()])\n\n select_first = ddf.iloc[:, 1]\n assert_eq(select_first, df.iloc[:, 1])\n\n select_zeroth = ddf.iloc[:, 0]\n assert_eq(select_zeroth, df.iloc[:, 0])\n\n select_list_cols = ddf.iloc[:, [0, 2]]\n assert_eq(select_list_cols, df.iloc[:, [0, 2]])\n\n select_negative = ddf.iloc[:, -1:-3:-1]\n assert_eq(select_negative, df.iloc[:, -1:-3:-1])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_iloc_dispatch_to_getitem_test_iloc_dispatch_to_getitem.assert_eq_select_negative": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_iloc_dispatch_to_getitem_test_iloc_dispatch_to_getitem.assert_eq_select_negative", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_indexing.py", "file_name": "test_indexing.py", "file_type": "text/x-python", "category": "test", "start_line": 592, "end_line": 611, "span_ids": ["test_iloc_dispatch_to_getitem"], "tokens": 212}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_iloc_dispatch_to_getitem():\n df = pd.DataFrame({\"A\": [1, 2], \"B\": [3, 4], \"C\": [5, 6]})\n ddf = dd.from_pandas(df, 2)\n\n selection = ddf.iloc[:, 2]\n\n assert all([not key.startswith(\"iloc\") for key in selection.dask.layers.keys()])\n assert any([key.startswith(\"getitem\") for key in selection.dask.layers.keys()])\n\n select_first = ddf.iloc[:, 1]\n assert_eq(select_first, df.iloc[:, 1])\n\n select_zeroth = ddf.iloc[:, 0]\n assert_eq(select_zeroth, df.iloc[:, 0])\n\n select_list_cols = ddf.iloc[:, [0, 2]]\n assert_eq(select_list_cols, df.iloc[:, [0, 2]])\n\n select_negative = ddf.iloc[:, -1:-3:-1]\n assert_eq(select_negative, df.iloc[:, -1:-3:-1])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_iloc_out_of_order_selection_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_iloc_out_of_order_selection_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_indexing.py", "file_name": "test_indexing.py", "file_type": "text/x-python", "category": "test", "start_line": 614, "end_line": 631, "span_ids": ["test_iloc_out_of_order_selection"], "tokens": 183}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_iloc_out_of_order_selection():\n df = pd.DataFrame({\"A\": [1] * 100, \"B\": [2] * 100, \"C\": [3] * 100, \"D\": [4] * 100})\n ddf = dd.from_pandas(df, 2)\n ddf = ddf[[\"C\", \"A\", \"B\"]]\n a = ddf.iloc[:, 0]\n b = ddf.iloc[:, 1]\n c = ddf.iloc[:, 2]\n\n assert a.name == \"C\"\n assert b.name == \"A\"\n assert c.name == \"B\"\n\n a1, b1, c1 = dask.compute(a, b, c)\n\n assert a1.name == \"C\"\n assert b1.name == \"A\"\n assert c1.name == \"B\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_merge_column_and_index.py_dd_df_left.return.pd_DataFrame_dict_idx_idx": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_merge_column_and_index.py_dd_df_left.return.pd_DataFrame_dict_idx_idx", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_merge_column_and_index.py", "file_name": "test_merge_column_and_index.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 20, "span_ids": ["imports", "df_left"], "tokens": 174}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import dask.dataframe as dd\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nfrom dask.dataframe.utils import assert_eq, PANDAS_VERSION\n\n\n# Fixtures\n# ========\n@pytest.fixture\ndef df_left():\n # Create frame with 10 partitions\n # Frame has 11 distinct idx values\n partition_sizes = np.array([3, 4, 2, 5, 3, 2, 5, 9, 4, 7, 4])\n idx = [i for i, s in enumerate(partition_sizes) for _ in range(s)]\n k = [i for s in partition_sizes for i in range(s)]\n vi = range(len(k))\n\n return pd.DataFrame(dict(idx=idx, k=k, v1=vi)).set_index([\"idx\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_merge_column_and_index.py_df_right_df_right.return.pd_DataFrame_dict_idx_idx": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_merge_column_and_index.py_df_right_df_right.return.pd_DataFrame_dict_idx_idx", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_merge_column_and_index.py", "file_name": "test_merge_column_and_index.py", "file_type": "text/x-python", "category": "test", "start_line": 23, "end_line": 32, "span_ids": ["df_right"], "tokens": 131}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.fixture\ndef df_right():\n # Create frame with 10 partitions\n # Frame has 11 distinct idx values\n partition_sizes = np.array([4, 2, 5, 3, 2, 5, 9, 4, 7, 4, 8])\n idx = [i for i, s in enumerate(partition_sizes) for _ in range(s)]\n k = [i for s in partition_sizes for i in range(s)]\n vi = range(len(k))\n\n return pd.DataFrame(dict(idx=idx, k=k, v1=vi)).set_index([\"idx\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_merge_column_and_index.py_ddf_left_on.return.request_param": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_merge_column_and_index.py_ddf_left_on.return.request_param", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_merge_column_and_index.py", "file_name": "test_merge_column_and_index.py", "file_type": "text/x-python", "category": "test", "start_line": 35, "end_line": 76, "span_ids": ["ddf_right", "ddf_right_unknown", "on", "how", "ddf_left_single", "ddf_right_single", "ddf_left_unknown", "ddf_left"], "tokens": 308}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.fixture\ndef ddf_left(df_left):\n # Create frame with 10 partitions\n # Skip division on 2 so there is one mismatch with ddf_right\n return dd.repartition(df_left, [0, 1, 3, 4, 5, 6, 7, 8, 9, 10, 11])\n\n\n@pytest.fixture\ndef ddf_left_unknown(ddf_left):\n return ddf_left.clear_divisions()\n\n\n@pytest.fixture\ndef ddf_left_single(df_left):\n return dd.from_pandas(df_left, npartitions=1, sort=False)\n\n\n@pytest.fixture\ndef ddf_right(df_right):\n # Create frame with 10 partitions\n # Skip division on 3 so there is one mismatch with ddf_left\n return dd.repartition(df_right, [0, 1, 2, 4, 5, 6, 7, 8, 9, 10, 11])\n\n\n@pytest.fixture\ndef ddf_right_unknown(ddf_right):\n return ddf_right.clear_divisions()\n\n\n@pytest.fixture\ndef ddf_right_single(df_right):\n return dd.from_pandas(df_right, npartitions=1, sort=False)\n\n\n@pytest.fixture(params=[\"inner\", \"left\", \"right\", \"outer\"])\ndef how(request):\n return request.param\n\n\n@pytest.fixture(params=[\"idx\", [\"idx\"], [\"idx\", \"k\"], [\"k\", \"idx\"]])\ndef on(request):\n return request.param", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_merge_column_and_index.py__Tests_test_merge_known_to_known.assert_len_result___dask_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_merge_column_and_index.py__Tests_test_merge_known_to_known.assert_len_result___dask_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_merge_column_and_index.py", "file_name": "test_merge_column_and_index.py", "file_type": "text/x-python", "category": "test", "start_line": 79, "end_line": 95, "span_ids": ["test_merge_known_to_known", "on"], "tokens": 155}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# Tests\n# =====\n@pytest.mark.skipif(\n PANDAS_VERSION < \"0.23.0\",\n reason=\"Need pandas col+index merge support (pandas-dev/pandas#14355)\",\n)\ndef test_merge_known_to_known(df_left, df_right, ddf_left, ddf_right, on, how):\n # Compute expected\n expected = df_left.merge(df_right, on=on, how=how)\n\n # Perform merge\n result = ddf_left.merge(ddf_right, on=on, how=how, shuffle=\"tasks\")\n\n # Assertions\n assert_eq(result, expected)\n assert_eq(result.divisions, tuple(range(12)))\n assert len(result.__dask_graph__()) < 80", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_merge_column_and_index.py_test_merge_known_to_single_test_merge_known_to_single.assert_len_result___dask_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_merge_column_and_index.py_test_merge_known_to_single_test_merge_known_to_single.assert_len_result___dask_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_merge_column_and_index.py", "file_name": "test_merge_column_and_index.py", "file_type": "text/x-python", "category": "test", "start_line": 98, "end_line": 113, "span_ids": ["test_merge_known_to_single"], "tokens": 164}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n PANDAS_VERSION < \"0.23.0\",\n reason=\"Need pandas col+index merge support (pandas-dev/pandas#14355)\",\n)\n@pytest.mark.parametrize(\"how\", [\"inner\", \"left\"])\ndef test_merge_known_to_single(df_left, df_right, ddf_left, ddf_right_single, on, how):\n # Compute expected\n expected = df_left.merge(df_right, on=on, how=how)\n\n # Perform merge\n result = ddf_left.merge(ddf_right_single, on=on, how=how, shuffle=\"tasks\")\n\n # Assertions\n assert_eq(result, expected)\n assert_eq(result.divisions, ddf_left.divisions)\n assert len(result.__dask_graph__()) < 30", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_merge_column_and_index.py_test_merge_single_to_known_test_merge_single_to_known.assert_len_result___dask_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_merge_column_and_index.py_test_merge_single_to_known_test_merge_single_to_known.assert_len_result___dask_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_merge_column_and_index.py", "file_name": "test_merge_column_and_index.py", "file_type": "text/x-python", "category": "test", "start_line": 116, "end_line": 131, "span_ids": ["test_merge_single_to_known"], "tokens": 164}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n PANDAS_VERSION < \"0.23.0\",\n reason=\"Need pandas col+index merge support (pandas-dev/pandas#14355)\",\n)\n@pytest.mark.parametrize(\"how\", [\"inner\", \"right\"])\ndef test_merge_single_to_known(df_left, df_right, ddf_left_single, ddf_right, on, how):\n # Compute expected\n expected = df_left.merge(df_right, on=on, how=how)\n\n # Perform merge\n result = ddf_left_single.merge(ddf_right, on=on, how=how, shuffle=\"tasks\")\n\n # Assertions\n assert_eq(result, expected)\n assert_eq(result.divisions, ddf_right.divisions)\n assert len(result.__dask_graph__()) < 30", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_merge_column_and_index.py_test_merge_known_to_unknown_test_merge_known_to_unknown.assert_len_result___dask_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_merge_column_and_index.py_test_merge_known_to_unknown_test_merge_known_to_unknown.assert_len_result___dask_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_merge_column_and_index.py", "file_name": "test_merge_column_and_index.py", "file_type": "text/x-python", "category": "test", "start_line": 134, "end_line": 150, "span_ids": ["test_merge_known_to_unknown"], "tokens": 158}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n PANDAS_VERSION < \"0.23.0\",\n reason=\"Need pandas col+index merge support (pandas-dev/pandas#14355)\",\n)\ndef test_merge_known_to_unknown(\n df_left, df_right, ddf_left, ddf_right_unknown, on, how\n):\n # Compute expected\n expected = df_left.merge(df_right, on=on, how=how)\n\n # Perform merge\n result = ddf_left.merge(ddf_right_unknown, on=on, how=how, shuffle=\"tasks\")\n\n # Assertions\n assert_eq(result, expected)\n assert_eq(result.divisions, tuple(None for _ in range(11)))\n assert len(result.__dask_graph__()) >= 390", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_merge_column_and_index.py_test_merge_unknown_to_known_test_merge_unknown_to_known.assert_len_result___dask_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_merge_column_and_index.py_test_merge_unknown_to_known_test_merge_unknown_to_known.assert_len_result___dask_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_merge_column_and_index.py", "file_name": "test_merge_column_and_index.py", "file_type": "text/x-python", "category": "test", "start_line": 153, "end_line": 169, "span_ids": ["test_merge_unknown_to_known"], "tokens": 158}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n PANDAS_VERSION < \"0.23.0\",\n reason=\"Need pandas col+index merge support (pandas-dev/pandas#14355)\",\n)\ndef test_merge_unknown_to_known(\n df_left, df_right, ddf_left_unknown, ddf_right, on, how\n):\n # Compute expected\n expected = df_left.merge(df_right, on=on, how=how)\n\n # Perform merge\n result = ddf_left_unknown.merge(ddf_right, on=on, how=how, shuffle=\"tasks\")\n\n # Assertions\n assert_eq(result, expected)\n assert_eq(result.divisions, tuple(None for _ in range(11)))\n assert len(result.__dask_graph__()) >= 390", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_merge_column_and_index.py_test_merge_unknown_to_unknown_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_merge_column_and_index.py_test_merge_unknown_to_unknown_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_merge_column_and_index.py", "file_name": "test_merge_column_and_index.py", "file_type": "text/x-python", "category": "test", "start_line": 172, "end_line": 189, "span_ids": ["test_merge_unknown_to_unknown"], "tokens": 162}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n PANDAS_VERSION < \"0.23.0\",\n reason=\"Need pandas col+index merge support (pandas-dev/pandas#14355)\",\n)\ndef test_merge_unknown_to_unknown(\n df_left, df_right, ddf_left_unknown, ddf_right_unknown, on, how\n):\n # Compute expected\n expected = df_left.merge(df_right, on=on, how=how)\n\n # Merge unknown to unknown\n result = ddf_left_unknown.merge(ddf_right_unknown, on=on, how=how, shuffle=\"tasks\")\n\n # Assertions\n assert_eq(result, expected)\n assert_eq(result.divisions, tuple(None for _ in range(11)))\n assert len(result.__dask_graph__()) >= 390", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_warnings_pytest": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_warnings_pytest", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 26, "span_ids": ["imports"], "tokens": 131}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import warnings\n\nimport dask.dataframe as dd\nimport numpy as np\nimport pandas as pd\n\nfrom dask.base import compute_as_if_collection\nfrom dask.dataframe._compat import tm\nfrom dask.dataframe.core import _Frame\nfrom dask.dataframe.methods import concat\nfrom dask.dataframe.multi import (\n align_partitions,\n merge_indexed_dataframes,\n hash_join,\n concat_indexed_dataframes,\n _maybe_align_partitions,\n)\nfrom dask.dataframe.utils import (\n assert_eq,\n assert_divisions,\n make_meta,\n has_known_categories,\n clear_known_categories,\n)\n\nimport pytest", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_align_partitions_test_align_partitions._different_index": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_align_partitions_test_align_partitions._different_index", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 29, "end_line": 86, "span_ids": ["test_align_partitions"], "tokens": 878}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_align_partitions():\n A = pd.DataFrame(\n {\"x\": [1, 2, 3, 4, 5, 6], \"y\": list(\"abdabd\")}, index=[10, 20, 30, 40, 50, 60]\n )\n a = dd.repartition(A, [10, 40, 60])\n\n B = pd.DataFrame({\"x\": [1, 2, 3, 4], \"y\": list(\"abda\")}, index=[30, 70, 80, 100])\n b = dd.repartition(B, [30, 80, 100])\n\n s = dd.core.Scalar({(\"s\", 0): 10}, \"s\", \"i8\")\n\n (aa, bb), divisions, L = align_partitions(a, b)\n\n def _check(a, b, aa, bb):\n assert isinstance(a, dd.DataFrame)\n assert isinstance(b, dd.DataFrame)\n assert isinstance(aa, dd.DataFrame)\n assert isinstance(bb, dd.DataFrame)\n assert_eq(a, aa)\n assert_eq(b, bb)\n assert divisions == (10, 30, 40, 60, 80, 100)\n assert isinstance(L, list)\n assert len(divisions) == 1 + len(L)\n\n _check(a, b, aa, bb)\n assert L == [\n [(aa._name, 0), (bb._name, 0)],\n [(aa._name, 1), (bb._name, 1)],\n [(aa._name, 2), (bb._name, 2)],\n [(aa._name, 3), (bb._name, 3)],\n [(aa._name, 4), (bb._name, 4)],\n ]\n\n (aa, ss, bb), divisions, L = align_partitions(a, s, b)\n _check(a, b, aa, bb)\n assert L == [\n [(aa._name, 0), None, (bb._name, 0)],\n [(aa._name, 1), None, (bb._name, 1)],\n [(aa._name, 2), None, (bb._name, 2)],\n [(aa._name, 3), None, (bb._name, 3)],\n [(aa._name, 4), None, (bb._name, 4)],\n ]\n assert_eq(ss, 10)\n\n ldf = pd.DataFrame({\"a\": [1, 2, 3, 4, 5, 6, 7], \"b\": [7, 6, 5, 4, 3, 2, 1]})\n rdf = pd.DataFrame({\"c\": [1, 2, 3, 4, 5, 6, 7], \"d\": [7, 6, 5, 4, 3, 2, 1]})\n\n for lhs, rhs in [\n (dd.from_pandas(ldf, 1), dd.from_pandas(rdf, 1)),\n (dd.from_pandas(ldf, 2), dd.from_pandas(rdf, 2)),\n (dd.from_pandas(ldf, 2), dd.from_pandas(rdf, 3)),\n (dd.from_pandas(ldf, 3), dd.from_pandas(rdf, 2)),\n ]:\n (lresult, rresult), div, parts = align_partitions(lhs, rhs)\n assert_eq(lresult, ldf)\n assert_eq(rresult, rdf)\n\n # different index\n # ... other code\n\n for lhs, rhs in [\n (dd.from_pandas(ldf, 1), dd.from_pandas(rdf, 1)),\n (dd.from_pandas(ldf, 2), dd.from_pandas(rdf, 2)),\n (dd.from_pandas(ldf, 2), dd.from_pandas(rdf, 3)),\n (dd.from_pandas(ldf, 3), dd.from_pandas(rdf, 2)),\n ]:\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_align_partitions.ldf_7_test_align_partitions.None_1.assert_eq_rresult_rdf_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_align_partitions.ldf_7_test_align_partitions.None_1.assert_eq_rresult_rdf_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 87, "end_line": 102, "span_ids": ["test_align_partitions"], "tokens": 289}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_align_partitions():\n # ... other code\n _check(a, b, aa, bb)\n # ... other code\n ldf = pd.DataFrame(\n {\"a\": [1, 2, 3, 4, 5, 6, 7], \"b\": [7, 6, 5, 4, 3, 2, 1]}, index=list(\"abcdefg\")\n )\n rdf = pd.DataFrame(\n {\"c\": [1, 2, 3, 4, 5, 6, 7], \"d\": [7, 6, 5, 4, 3, 2, 1]}, index=list(\"fghijkl\")\n )\n\n for lhs, rhs in [\n (dd.from_pandas(ldf, 1), dd.from_pandas(rdf, 1)),\n (dd.from_pandas(ldf, 2), dd.from_pandas(rdf, 2)),\n (dd.from_pandas(ldf, 2), dd.from_pandas(rdf, 3)),\n (dd.from_pandas(ldf, 3), dd.from_pandas(rdf, 2)),\n ]:\n (lresult, rresult), div, parts = align_partitions(lhs, rhs)\n assert_eq(lresult, ldf)\n assert_eq(rresult, rdf)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_align_partitions_unknown_divisions_test_align_partitions_unknown_divisions.None_1.align_partitions_ddf_ddf": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_align_partitions_unknown_divisions_test_align_partitions_unknown_divisions.None_1.align_partitions_ddf_ddf", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 105, "end_line": 122, "span_ids": ["test_align_partitions_unknown_divisions"], "tokens": 216}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_align_partitions_unknown_divisions():\n df = pd.DataFrame({\"a\": [1, 2, 3, 4, 5, 6, 7], \"b\": [7, 6, 5, 4, 3, 2, 1]})\n # One known, one unknown\n ddf = dd.from_pandas(df, npartitions=2)\n ddf2 = dd.from_pandas(df, npartitions=2, sort=False)\n assert not ddf2.known_divisions\n\n with pytest.raises(ValueError):\n align_partitions(ddf, ddf2)\n\n # Both unknown\n ddf = dd.from_pandas(df + 1, npartitions=2, sort=False)\n ddf2 = dd.from_pandas(df, npartitions=2, sort=False)\n assert not ddf.known_divisions\n assert not ddf2.known_divisions\n\n with pytest.raises(ValueError):\n align_partitions(ddf, ddf2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test__maybe_align_partitions_test__maybe_align_partitions.None_1._maybe_align_partitions_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test__maybe_align_partitions_test__maybe_align_partitions.None_1._maybe_align_partitions_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 125, "end_line": 167, "span_ids": ["test__maybe_align_partitions"], "tokens": 469}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test__maybe_align_partitions():\n df = pd.DataFrame({\"a\": [1, 2, 3, 4, 5, 6, 7], \"b\": [7, 6, 5, 4, 3, 2, 1]})\n # Both known, same divisions\n ddf = dd.from_pandas(df + 1, npartitions=2)\n ddf2 = dd.from_pandas(df, npartitions=2)\n\n a, b = _maybe_align_partitions([ddf, ddf2])\n assert a is ddf\n assert b is ddf2\n\n # Both unknown, same divisions\n ddf = dd.from_pandas(df + 1, npartitions=2, sort=False)\n ddf2 = dd.from_pandas(df, npartitions=2, sort=False)\n assert not ddf.known_divisions\n assert not ddf2.known_divisions\n\n a, b = _maybe_align_partitions([ddf, ddf2])\n assert a is ddf\n assert b is ddf2\n\n # Both known, different divisions\n ddf = dd.from_pandas(df + 1, npartitions=2)\n ddf2 = dd.from_pandas(df, npartitions=3)\n\n a, b = _maybe_align_partitions([ddf, ddf2])\n assert a.divisions == b.divisions\n\n # Both unknown, different divisions\n ddf = dd.from_pandas(df + 1, npartitions=2, sort=False)\n ddf2 = dd.from_pandas(df, npartitions=3, sort=False)\n assert not ddf.known_divisions\n assert not ddf2.known_divisions\n\n with pytest.raises(ValueError):\n _maybe_align_partitions([ddf, ddf2])\n\n # One known, one unknown\n ddf = dd.from_pandas(df, npartitions=2)\n ddf2 = dd.from_pandas(df, npartitions=2, sort=False)\n assert not ddf2.known_divisions\n\n with pytest.raises(ValueError):\n _maybe_align_partitions([ddf, ddf2])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_indexed_dataframe_to_indexed_dataframe_test_merge_indexed_dataframe_to_indexed_dataframe.None_9": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_indexed_dataframe_to_indexed_dataframe_test_merge_indexed_dataframe_to_indexed_dataframe.None_9", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 170, "end_line": 202, "span_ids": ["test_merge_indexed_dataframe_to_indexed_dataframe"], "tokens": 440}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_merge_indexed_dataframe_to_indexed_dataframe():\n A = pd.DataFrame({\"x\": [1, 2, 3, 4, 5, 6]}, index=[1, 2, 3, 4, 6, 7])\n a = dd.repartition(A, [1, 4, 7])\n\n B = pd.DataFrame({\"y\": list(\"abcdef\")}, index=[1, 2, 4, 5, 6, 8])\n b = dd.repartition(B, [1, 2, 5, 8])\n\n c = merge_indexed_dataframes(a, b, how=\"left\")\n assert c.divisions[0] == a.divisions[0]\n assert c.divisions[-1] == max(a.divisions + b.divisions)\n assert_eq(c, A.join(B))\n\n c = merge_indexed_dataframes(a, b, how=\"right\")\n assert c.divisions[0] == b.divisions[0]\n assert c.divisions[-1] == b.divisions[-1]\n assert_eq(c, A.join(B, how=\"right\"))\n\n c = merge_indexed_dataframes(a, b, how=\"inner\")\n assert c.divisions[0] == 1\n assert c.divisions[-1] == max(a.divisions + b.divisions)\n assert_eq(c.compute(), A.join(B, how=\"inner\"))\n\n c = merge_indexed_dataframes(a, b, how=\"outer\")\n assert c.divisions[0] == 1\n assert c.divisions[-1] == 8\n assert_eq(c.compute(), A.join(B, how=\"outer\"))\n\n assert sorted(merge_indexed_dataframes(a, b, how=\"inner\").dask) == sorted(\n merge_indexed_dataframes(a, b, how=\"inner\").dask\n )\n assert sorted(merge_indexed_dataframes(a, b, how=\"inner\").dask) != sorted(\n merge_indexed_dataframes(a, b, how=\"outer\").dask\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_list_eq_list_eq.dd__compat_assert_numpy_a": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_list_eq_list_eq.dd__compat_assert_numpy_a", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 205, "end_line": 223, "span_ids": ["list_eq"], "tokens": 135}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def list_eq(aa, bb):\n if isinstance(aa, dd.DataFrame):\n a = aa.compute(scheduler=\"sync\")\n else:\n a = aa\n if isinstance(bb, dd.DataFrame):\n b = bb.compute(scheduler=\"sync\")\n else:\n b = bb\n tm.assert_index_equal(a.columns, b.columns)\n\n if isinstance(a, pd.DataFrame):\n av = a.sort_values(list(a.columns)).values\n bv = b.sort_values(list(b.columns)).values\n else:\n av = a.sort_values().values\n bv = b.sort_values().values\n\n dd._compat.assert_numpy_array_equal(av, bv)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_hash_join_test_hash_join.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_hash_join_test_hash_join.None_2", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 226, "end_line": 257, "span_ids": ["test_hash_join"], "tokens": 397}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"how\", [\"inner\", \"left\", \"right\", \"outer\"])\n@pytest.mark.parametrize(\"shuffle\", [\"disk\", \"tasks\"])\ndef test_hash_join(how, shuffle):\n A = pd.DataFrame({\"x\": [1, 2, 3, 4, 5, 6], \"y\": [1, 1, 2, 2, 3, 4]})\n a = dd.repartition(A, [0, 4, 5])\n\n B = pd.DataFrame({\"y\": [1, 3, 4, 4, 5, 6], \"z\": [6, 5, 4, 3, 2, 1]})\n b = dd.repartition(B, [0, 2, 5])\n\n c = hash_join(a, \"y\", b, \"y\", how)\n\n result = c.compute()\n expected = pd.merge(A, B, how, \"y\")\n list_eq(result, expected)\n\n # Different columns and npartitions\n c = hash_join(a, \"x\", b, \"z\", \"outer\", npartitions=3, shuffle=shuffle)\n assert c.npartitions == 3\n\n result = c.compute(scheduler=\"single-threaded\")\n expected = pd.merge(A, B, \"outer\", None, \"x\", \"z\")\n\n list_eq(result, expected)\n\n assert (\n hash_join(a, \"y\", b, \"y\", \"inner\", shuffle=shuffle)._name\n == hash_join(a, \"y\", b, \"y\", \"inner\", shuffle=shuffle)._name\n )\n assert (\n hash_join(a, \"y\", b, \"y\", \"inner\", shuffle=shuffle)._name\n != hash_join(a, \"y\", b, \"y\", \"outer\", shuffle=shuffle)._name\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_sequential_joins_test_sequential_joins.assert_eq_multi_join_pd_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_sequential_joins_test_sequential_joins.assert_eq_multi_join_pd_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 260, "end_line": 278, "span_ids": ["test_sequential_joins"], "tokens": 310}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_sequential_joins():\n # Pandas version of multiple inner joins\n df1 = pd.DataFrame(\n {\"key\": list(range(6)), \"A\": [\"A0\", \"A1\", \"A2\", \"A3\", \"A4\", \"A5\"]}\n )\n df2 = pd.DataFrame({\"key\": list(range(4)), \"B\": [\"B0\", \"B1\", \"B2\", \"B3\"]})\n df3 = pd.DataFrame({\"key\": list(range(1, 5)), \"C\": [\"C0\", \"C1\", \"C2\", \"C3\"]})\n\n join_pd = df1.join(df2, how=\"inner\", lsuffix=\"_l\", rsuffix=\"_r\")\n multi_join_pd = join_pd.join(df3, how=\"inner\", lsuffix=\"_l\", rsuffix=\"_r\")\n\n # Dask version of multiple inner joins\n ddf1 = dd.from_pandas(df1, npartitions=3)\n ddf2 = dd.from_pandas(df2, npartitions=2)\n ddf3 = dd.from_pandas(df3, npartitions=2)\n\n join_dd = ddf1.join(ddf2, how=\"inner\", lsuffix=\"_l\", rsuffix=\"_r\")\n multi_join_dd = join_dd.join(ddf3, how=\"inner\", lsuffix=\"_l\", rsuffix=\"_r\")\n assert_eq(multi_join_pd, multi_join_dd)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_asof_indexed_test_merge_asof_indexed.assert_eq_c_C_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_asof_indexed_test_merge_asof_indexed.assert_eq_c_C_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 281, "end_line": 296, "span_ids": ["test_merge_asof_indexed"], "tokens": 201}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_merge_asof_indexed():\n A = pd.DataFrame(\n {\"left_val\": list(\"abcd\" * 3)},\n index=[1, 3, 7, 9, 10, 13, 14, 17, 20, 24, 25, 28],\n )\n a = dd.from_pandas(A, npartitions=4)\n B = pd.DataFrame(\n {\"right_val\": list(\"xyz\" * 4)},\n index=[1, 2, 3, 6, 7, 10, 12, 14, 16, 19, 23, 26],\n )\n b = dd.from_pandas(B, npartitions=3)\n\n C = pd.merge_asof(A, B, left_index=True, right_index=True)\n c = dd.merge_asof(a, b, left_index=True, right_index=True)\n\n assert_eq(c, C)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_asof_on_basic_test_merge_asof_on_basic.assert_eq_c_C_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_asof_on_basic_test_merge_asof_on_basic.assert_eq_c_C_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 299, "end_line": 307, "span_ids": ["test_merge_asof_on_basic"], "tokens": 148}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_merge_asof_on_basic():\n A = pd.DataFrame({\"a\": [1, 5, 10], \"left_val\": [\"a\", \"b\", \"c\"]})\n a = dd.from_pandas(A, npartitions=2)\n B = pd.DataFrame({\"a\": [1, 2, 3, 6, 7], \"right_val\": [1, 2, 3, 6, 7]})\n b = dd.from_pandas(B, npartitions=2)\n\n C = pd.merge_asof(A, B, on=\"a\")\n c = dd.merge_asof(a, b, on=\"a\")\n assert_eq(c, C)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_asof_on_test_merge_asof_on.assert_eq_c_C_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_asof_on_test_merge_asof_on.assert_eq_c_C_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 310, "end_line": 324, "span_ids": ["test_merge_asof_on"], "tokens": 213}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"allow_exact_matches\", [True, False])\n@pytest.mark.parametrize(\"direction\", [\"backward\", \"forward\", \"nearest\"])\ndef test_merge_asof_on(allow_exact_matches, direction):\n A = pd.DataFrame({\"a\": [1, 5, 10], \"left_val\": [\"a\", \"b\", \"c\"]})\n a = dd.from_pandas(A, npartitions=2)\n B = pd.DataFrame({\"a\": [1, 2, 3, 6, 7], \"right_val\": [1, 2, 3, 6, 7]})\n b = dd.from_pandas(B, npartitions=2)\n\n C = pd.merge_asof(\n A, B, on=\"a\", allow_exact_matches=allow_exact_matches, direction=direction\n )\n c = dd.merge_asof(\n a, b, on=\"a\", allow_exact_matches=allow_exact_matches, direction=direction\n )\n assert_eq(c, C)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_asof_left_on_right_index_test_merge_asof_left_on_right_index.for_nparts_in_1_2_3_.for_a1_idx2_in_.assert_eq_c_C_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_asof_left_on_right_index_test_merge_asof_left_on_right_index.for_nparts_in_1_2_3_.for_a1_idx2_in_.assert_eq_c_C_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 327, "end_line": 390, "span_ids": ["test_merge_asof_left_on_right_index"], "tokens": 622}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"allow_exact_matches\", [True, False])\n@pytest.mark.parametrize(\"direction\", [\"backward\", \"forward\", \"nearest\"])\n@pytest.mark.parametrize(\"unknown_divisions\", [False, True])\ndef test_merge_asof_left_on_right_index(\n allow_exact_matches, direction, unknown_divisions\n):\n A = pd.DataFrame({\"a\": [1, 5, 10], \"left_val\": [\"a\", \"b\", \"c\"]}, index=[10, 20, 30])\n a = dd.from_pandas(A, npartitions=2)\n B = pd.DataFrame({\"right_val\": [2, 3, 6, 7]}, index=[2, 3, 6, 7])\n b = dd.from_pandas(B, npartitions=2)\n\n if unknown_divisions:\n a.divisions = [None] * len(a.divisions)\n\n C = pd.merge_asof(\n A,\n B,\n left_on=\"a\",\n right_index=True,\n allow_exact_matches=allow_exact_matches,\n direction=direction,\n )\n c = dd.merge_asof(\n a,\n b,\n left_on=\"a\",\n right_index=True,\n allow_exact_matches=allow_exact_matches,\n direction=direction,\n )\n assert_eq(c, C)\n\n for nparts in [1, 2, 3]:\n for a1, idx2 in (\n ([5, 10, 15, 20], [1, 2, 3, 4]),\n ([1, 2, 3, 4], [5, 10, 15, 20]),\n ([5, 5, 10, 10, 15, 15], [4, 5, 6, 9, 10, 11, 14, 15, 16]),\n ([5, 10, 15], [4, 4, 5, 5, 6, 6, 9, 9, 10, 10, 11, 11]),\n ):\n A = pd.DataFrame({\"a\": a1}, index=[x * 10 for x in a1])\n a = dd.from_pandas(A, npartitions=nparts)\n B = pd.DataFrame({\"b\": idx2}, index=idx2)\n b = dd.from_pandas(B, npartitions=nparts)\n\n if unknown_divisions:\n a.divisions = [None] * len(a.divisions)\n\n C = pd.merge_asof(\n A,\n B,\n left_on=\"a\",\n right_index=True,\n allow_exact_matches=allow_exact_matches,\n direction=direction,\n )\n c = dd.merge_asof(\n a,\n b,\n left_on=\"a\",\n right_index=True,\n allow_exact_matches=allow_exact_matches,\n direction=direction,\n )\n assert_eq(c, C)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_asof_indexed_two_partitions_test_merge_asof_indexed_two_partitions.assert_eq_c_C_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_asof_indexed_two_partitions_test_merge_asof_indexed_two_partitions.assert_eq_c_C_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 393, "end_line": 401, "span_ids": ["test_merge_asof_indexed_two_partitions"], "tokens": 154}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_merge_asof_indexed_two_partitions():\n A = pd.DataFrame({\"left_val\": [\"a\", \"b\", \"c\"]}, index=[1, 5, 10])\n a = dd.from_pandas(A, npartitions=2)\n B = pd.DataFrame({\"right_val\": [1, 2, 3, 6, 7]}, index=[1, 2, 3, 6, 7])\n b = dd.from_pandas(B, npartitions=2)\n\n C = pd.merge_asof(A, B, left_index=True, right_index=True)\n c = dd.merge_asof(a, b, left_index=True, right_index=True)\n assert_eq(c, C)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_asof_on_by_test_merge_asof_on_by.assert_eq_c_C_check_ind": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_asof_on_by_test_merge_asof_on_by.assert_eq_c_C_check_ind", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 404, "end_line": 453, "span_ids": ["test_merge_asof_on_by"], "tokens": 649}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_merge_asof_on_by():\n times_A = [\n pd.to_datetime(d)\n for d in [\n \"2016-05-25 13:30:00.023\",\n \"2016-05-25 13:30:00.023\",\n \"2016-05-25 13:30:00.030\",\n \"2016-05-25 13:30:00.041\",\n \"2016-05-25 13:30:00.048\",\n \"2016-05-25 13:30:00.049\",\n \"2016-05-25 13:30:00.072\",\n \"2016-05-25 13:30:00.075\",\n ]\n ]\n tickers_A = [\"GOOG\", \"MSFT\", \"MSFT\", \"MSFT\", \"GOOG\", \"AAPL\", \"GOOG\", \"MSFT\"]\n bids_A = [720.50, 51.95, 51.97, 51.99, 720.50, 97.99, 720.50, 52.01]\n asks_A = [720.93, 51.96, 51.98, 52.00, 720.93, 98.01, 720.88, 52.03]\n times_B = [\n pd.to_datetime(d)\n for d in [\n \"2016-05-25 13:30:00.023\",\n \"2016-05-25 13:30:00.038\",\n \"2016-05-25 13:30:00.048\",\n \"2016-05-25 13:30:00.048\",\n \"2016-05-25 13:30:00.048\",\n ]\n ]\n tickers_B = [\"MSFT\", \"MSFT\", \"GOOG\", \"GOOG\", \"AAPL\"]\n prices_B = [51.95, 51.95, 720.77, 720.92, 98.00]\n quantities_B = [75, 155, 100, 100, 100]\n\n A = pd.DataFrame(\n {\"time\": times_A, \"ticker\": tickers_A, \"bid\": bids_A, \"ask\": asks_A},\n columns=[\"time\", \"ticker\", \"bid\", \"ask\"],\n )\n a = dd.from_pandas(A, npartitions=4)\n B = pd.DataFrame(\n {\n \"time\": times_B,\n \"ticker\": tickers_B,\n \"price\": prices_B,\n \"quantity\": quantities_B,\n },\n columns=[\"time\", \"ticker\", \"price\", \"quantity\"],\n )\n b = dd.from_pandas(B, npartitions=3)\n\n C = pd.merge_asof(B, A, on=\"time\", by=\"ticker\")\n c = dd.merge_asof(b, a, on=\"time\", by=\"ticker\")\n assert_eq(c, C, check_index=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_asof_on_by_tolerance_test_merge_asof_on_by_tolerance.assert_eq_c_C_check_ind": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_asof_on_by_tolerance_test_merge_asof_on_by_tolerance.assert_eq_c_C_check_ind", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 456, "end_line": 505, "span_ids": ["test_merge_asof_on_by_tolerance"], "tokens": 668}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_merge_asof_on_by_tolerance():\n times_A = [\n pd.to_datetime(d)\n for d in [\n \"2016-05-25 13:30:00.023\",\n \"2016-05-25 13:30:00.023\",\n \"2016-05-25 13:30:00.030\",\n \"2016-05-25 13:30:00.041\",\n \"2016-05-25 13:30:00.048\",\n \"2016-05-25 13:30:00.049\",\n \"2016-05-25 13:30:00.072\",\n \"2016-05-25 13:30:00.075\",\n ]\n ]\n tickers_A = [\"GOOG\", \"MSFT\", \"MSFT\", \"MSFT\", \"GOOG\", \"AAPL\", \"GOOG\", \"MSFT\"]\n bids_A = [720.50, 51.95, 51.97, 51.99, 720.50, 97.99, 720.50, 52.01]\n asks_A = [720.93, 51.96, 51.98, 52.00, 720.93, 98.01, 720.88, 52.03]\n times_B = [\n pd.to_datetime(d)\n for d in [\n \"2016-05-25 13:30:00.023\",\n \"2016-05-25 13:30:00.038\",\n \"2016-05-25 13:30:00.048\",\n \"2016-05-25 13:30:00.048\",\n \"2016-05-25 13:30:00.048\",\n ]\n ]\n tickers_B = [\"MSFT\", \"MSFT\", \"GOOG\", \"GOOG\", \"AAPL\"]\n prices_B = [51.95, 51.95, 720.77, 720.92, 98.00]\n quantities_B = [75, 155, 100, 100, 100]\n\n A = pd.DataFrame(\n {\"time\": times_A, \"ticker\": tickers_A, \"bid\": bids_A, \"ask\": asks_A},\n columns=[\"time\", \"ticker\", \"bid\", \"ask\"],\n )\n a = dd.from_pandas(A, npartitions=4)\n B = pd.DataFrame(\n {\n \"time\": times_B,\n \"ticker\": tickers_B,\n \"price\": prices_B,\n \"quantity\": quantities_B,\n },\n columns=[\"time\", \"ticker\", \"price\", \"quantity\"],\n )\n b = dd.from_pandas(B, npartitions=3)\n\n C = pd.merge_asof(B, A, on=\"time\", by=\"ticker\", tolerance=pd.Timedelta(\"2ms\"))\n c = dd.merge_asof(b, a, on=\"time\", by=\"ticker\", tolerance=pd.Timedelta(\"2ms\"))\n assert_eq(c, C, check_index=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_asof_on_by_tolerance_no_exact_matches_test_merge_asof_on_by_tolerance_no_exact_matches.assert_eq_c_C_check_ind": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_asof_on_by_tolerance_no_exact_matches_test_merge_asof_on_by_tolerance_no_exact_matches.assert_eq_c_C_check_ind", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 508, "end_line": 571, "span_ids": ["test_merge_asof_on_by_tolerance_no_exact_matches"], "tokens": 699}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_merge_asof_on_by_tolerance_no_exact_matches():\n times_A = [\n pd.to_datetime(d)\n for d in [\n \"2016-05-25 13:30:00.023\",\n \"2016-05-25 13:30:00.023\",\n \"2016-05-25 13:30:00.030\",\n \"2016-05-25 13:30:00.041\",\n \"2016-05-25 13:30:00.048\",\n \"2016-05-25 13:30:00.049\",\n \"2016-05-25 13:30:00.072\",\n \"2016-05-25 13:30:00.075\",\n ]\n ]\n tickers_A = [\"GOOG\", \"MSFT\", \"MSFT\", \"MSFT\", \"GOOG\", \"AAPL\", \"GOOG\", \"MSFT\"]\n bids_A = [720.50, 51.95, 51.97, 51.99, 720.50, 97.99, 720.50, 52.01]\n asks_A = [720.93, 51.96, 51.98, 52.00, 720.93, 98.01, 720.88, 52.03]\n times_B = [\n pd.to_datetime(d)\n for d in [\n \"2016-05-25 13:30:00.023\",\n \"2016-05-25 13:30:00.038\",\n \"2016-05-25 13:30:00.048\",\n \"2016-05-25 13:30:00.048\",\n \"2016-05-25 13:30:00.048\",\n ]\n ]\n tickers_B = [\"MSFT\", \"MSFT\", \"GOOG\", \"GOOG\", \"AAPL\"]\n prices_B = [51.95, 51.95, 720.77, 720.92, 98.00]\n quantities_B = [75, 155, 100, 100, 100]\n\n A = pd.DataFrame(\n {\"time\": times_A, \"ticker\": tickers_A, \"bid\": bids_A, \"ask\": asks_A},\n columns=[\"time\", \"ticker\", \"bid\", \"ask\"],\n )\n a = dd.from_pandas(A, npartitions=4)\n B = pd.DataFrame(\n {\n \"time\": times_B,\n \"ticker\": tickers_B,\n \"price\": prices_B,\n \"quantity\": quantities_B,\n },\n columns=[\"time\", \"ticker\", \"price\", \"quantity\"],\n )\n b = dd.from_pandas(B, npartitions=3)\n\n C = pd.merge_asof(\n B,\n A,\n on=\"time\",\n by=\"ticker\",\n tolerance=pd.Timedelta(\"10ms\"),\n allow_exact_matches=False,\n )\n c = dd.merge_asof(\n b,\n a,\n on=\"time\",\n by=\"ticker\",\n tolerance=pd.Timedelta(\"10ms\"),\n allow_exact_matches=False,\n )\n assert_eq(c, C, check_index=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_asof_unsorted_raises_test_merge_asof_unsorted_raises.with_pytest_raises_ValueE.result_compute_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_asof_unsorted_raises_test_merge_asof_unsorted_raises.with_pytest_raises_ValueE.result_compute_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 574, "end_line": 583, "span_ids": ["test_merge_asof_unsorted_raises"], "tokens": 148}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_merge_asof_unsorted_raises():\n A = pd.DataFrame({\"a\": [1, 5, 10], \"left_val\": [\"a\", \"b\", \"c\"]})\n a = dd.from_pandas(A, npartitions=2)\n B = pd.DataFrame({\"a\": [2, 1, 3, 6, 7], \"right_val\": [1, 2, 3, 6, 7]})\n b = dd.from_pandas(B, npartitions=2)\n\n result = dd.merge_asof(a, b, on=\"a\")\n # raise at runtime\n with pytest.raises(ValueError, match=\"right keys\"):\n result.compute()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_indexed_concat_test_indexed_concat.with_warnings_catch_warni.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_indexed_concat_test_indexed_concat.with_warnings_catch_warni.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 586, "end_line": 607, "span_ids": ["test_indexed_concat"], "tokens": 305}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"join\", [\"inner\", \"outer\"])\ndef test_indexed_concat(join):\n A = pd.DataFrame(\n {\"x\": [1, 2, 3, 4, 6, 7], \"y\": list(\"abcdef\")}, index=[1, 2, 3, 4, 6, 7]\n )\n a = dd.repartition(A, [1, 4, 7])\n\n B = pd.DataFrame({\"x\": [10, 20, 40, 50, 60, 80]}, index=[1, 2, 4, 5, 6, 8])\n b = dd.repartition(B, [1, 2, 5, 8])\n\n expected = pd.concat([A, B], axis=0, join=join, sort=False)\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", FutureWarning)\n result = concat_indexed_dataframes([a, b], join=join)\n assert_eq(result, expected)\n assert sorted(concat_indexed_dataframes([a, b], join=join).dask) == sorted(\n concat_indexed_dataframes([a, b], join=join).dask\n )\n assert sorted(concat_indexed_dataframes([a, b], join=\"inner\").dask) != sorted(\n concat_indexed_dataframes([a, b], join=\"outer\").dask\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat_test_concat.None_1.assert_eq_result_expecte": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat_test_concat.None_1.assert_eq_result_expecte", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 610, "end_line": 649, "span_ids": ["test_concat"], "tokens": 521}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"join\", [\"inner\", \"outer\"])\ndef test_concat(join):\n pdf1 = pd.DataFrame(\n {\"x\": [1, 2, 3, 4, 6, 7], \"y\": list(\"abcdef\")}, index=[1, 2, 3, 4, 6, 7]\n )\n ddf1 = dd.from_pandas(pdf1, 2)\n pdf2 = pd.DataFrame(\n {\"x\": [1, 2, 3, 4, 6, 7], \"y\": list(\"abcdef\")}, index=[8, 9, 10, 11, 12, 13]\n )\n ddf2 = dd.from_pandas(pdf2, 2)\n\n # different columns\n pdf3 = pd.DataFrame(\n {\"x\": [1, 2, 3, 4, 6, 7], \"z\": list(\"abcdef\")}, index=[8, 9, 10, 11, 12, 13]\n )\n ddf3 = dd.from_pandas(pdf3, 2)\n\n kwargs = {\"sort\": False}\n\n for (dd1, dd2, pd1, pd2) in [\n (ddf1, ddf2, pdf1, pdf2),\n (ddf1, ddf3, pdf1, pdf3),\n ]:\n\n expected = pd.concat([pd1, pd2], join=join, **kwargs)\n result = dd.concat([dd1, dd2], join=join, **kwargs)\n assert_eq(result, expected)\n\n # test outer only, inner has a problem on pandas side\n for (dd1, dd2, pd1, pd2) in [\n (ddf1, ddf2, pdf1, pdf2),\n (ddf1, ddf3, pdf1, pdf3),\n (ddf1.x, ddf2.x, pdf1.x, pdf2.x),\n (ddf1.x, ddf3.z, pdf1.x, pdf3.z),\n (ddf1.x, ddf2.x, pdf1.x, pdf2.x),\n (ddf1.x, ddf3.z, pdf1.x, pdf3.z),\n ]:\n expected = pd.concat([pd1, pd2], **kwargs)\n result = dd.concat([dd1, dd2], **kwargs)\n assert_eq(result, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat_different_dtypes_test_concat_different_dtypes.assert_dask_dtypes_pa": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat_different_dtypes_test_concat_different_dtypes.assert_dask_dtypes_pa", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 652, "end_line": 679, "span_ids": ["test_concat_different_dtypes"], "tokens": 303}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"value_1, value_2\",\n [\n (1.0, 1),\n (1.0, \"one\"),\n (1.0, pd.to_datetime(\"1970-01-01\")),\n (1, \"one\"),\n (1, pd.to_datetime(\"1970-01-01\")),\n (\"one\", pd.to_datetime(\"1970-01-01\")),\n ],\n)\ndef test_concat_different_dtypes(value_1, value_2):\n # check that the resulting dataframe has coherent dtypes\n # refer to https://github.com/dask/dask/issues/4685 and\n # https://github.com/dask/dask/issues/5968\n df_1 = pd.DataFrame({\"x\": [value_1]})\n df_2 = pd.DataFrame({\"x\": [value_2]})\n df = pd.concat([df_1, df_2], axis=0)\n\n pandas_dtype = df[\"x\"].dtype\n\n ddf_1 = dd.from_pandas(df_1, npartitions=1)\n ddf_2 = dd.from_pandas(df_2, npartitions=1)\n ddf = dd.concat([ddf_1, ddf_2], axis=0)\n\n dask_dtypes = list(ddf.map_partitions(lambda x: x.dtypes).compute())\n\n assert dask_dtypes == [pandas_dtype, pandas_dtype]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_columns_dtypes_test_merge_columns_dtypes.assert_has_nans_and_warn": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_columns_dtypes_test_merge_columns_dtypes.assert_has_nans_and_warn", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 682, "end_line": 717, "span_ids": ["test_merge_columns_dtypes"], "tokens": 350}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"how\", [\"inner\", \"outer\", \"left\", \"right\"])\n@pytest.mark.parametrize(\"on_index\", [True, False])\ndef test_merge_columns_dtypes(how, on_index):\n # tests results of merges with merge columns having different dtypes;\n # asserts that either the merge was successful or the corresponding warning is raised\n # addresses issue #4574\n\n df1 = pd.DataFrame(\n {\"A\": list(np.arange(5).astype(float)) * 2, \"B\": list(np.arange(5)) * 2}\n )\n df2 = pd.DataFrame({\"A\": np.arange(5), \"B\": np.arange(5)})\n\n a = dd.from_pandas(df1, 2) # merge column \"A\" is float\n b = dd.from_pandas(df2, 2) # merge column \"A\" is int\n\n on = [\"A\"]\n left_index = right_index = on_index\n\n if on_index:\n a = a.set_index(\"A\")\n b = b.set_index(\"A\")\n on = None\n\n with pytest.warns(None) as record:\n result = dd.merge(\n a, b, on=on, how=how, left_index=left_index, right_index=right_index\n )\n\n warned = any(\"merge column data type mismatches\" in str(r) for r in record)\n\n # result type depends on merge operation -> convert to pandas\n result = result if isinstance(result, pd.DataFrame) else result.compute()\n\n has_nans = result.isna().values.any()\n\n assert (has_nans and warned) or not has_nans", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_test_merge._pd_merge_A_B_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_test_merge._pd_merge_A_B_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 720, "end_line": 796, "span_ids": ["test_merge"], "tokens": 793}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"how\", [\"inner\", \"outer\", \"left\", \"right\"])\n@pytest.mark.parametrize(\"shuffle\", [\"disk\", \"tasks\"])\ndef test_merge(how, shuffle):\n A = pd.DataFrame({\"x\": [1, 2, 3, 4, 5, 6], \"y\": [1, 1, 2, 2, 3, 4]})\n a = dd.repartition(A, [0, 4, 5])\n\n B = pd.DataFrame({\"y\": [1, 3, 4, 4, 5, 6], \"z\": [6, 5, 4, 3, 2, 1]})\n b = dd.repartition(B, [0, 2, 5])\n\n assert_eq(\n dd.merge(a, b, left_index=True, right_index=True, how=how, shuffle=shuffle),\n pd.merge(A, B, left_index=True, right_index=True, how=how),\n )\n\n result = dd.merge(a, b, on=\"y\", how=how)\n list_eq(result, pd.merge(A, B, on=\"y\", how=how))\n assert all(d is None for d in result.divisions)\n\n list_eq(\n dd.merge(a, b, left_on=\"x\", right_on=\"z\", how=how, shuffle=shuffle),\n pd.merge(A, B, left_on=\"x\", right_on=\"z\", how=how),\n )\n list_eq(\n dd.merge(\n a,\n b,\n left_on=\"x\",\n right_on=\"z\",\n how=how,\n suffixes=(\"1\", \"2\"),\n shuffle=shuffle,\n ),\n pd.merge(A, B, left_on=\"x\", right_on=\"z\", how=how, suffixes=(\"1\", \"2\")),\n )\n\n list_eq(dd.merge(a, b, how=how, shuffle=shuffle), pd.merge(A, B, how=how))\n list_eq(dd.merge(a, B, how=how, shuffle=shuffle), pd.merge(A, B, how=how))\n list_eq(dd.merge(A, b, how=how, shuffle=shuffle), pd.merge(A, B, how=how))\n list_eq(dd.merge(A, B, how=how, shuffle=shuffle), pd.merge(A, B, how=how))\n\n list_eq(\n dd.merge(a, b, left_index=True, right_index=True, how=how, shuffle=shuffle),\n pd.merge(A, B, left_index=True, right_index=True, how=how),\n )\n list_eq(\n dd.merge(\n a,\n b,\n left_index=True,\n right_index=True,\n how=how,\n suffixes=(\"1\", \"2\"),\n shuffle=shuffle,\n ),\n pd.merge(A, B, left_index=True, right_index=True, how=how, suffixes=(\"1\", \"2\")),\n )\n\n list_eq(\n dd.merge(a, b, left_on=\"x\", right_index=True, how=how, shuffle=shuffle),\n pd.merge(A, B, left_on=\"x\", right_index=True, how=how),\n )\n list_eq(\n dd.merge(\n a,\n b,\n left_on=\"x\",\n right_index=True,\n how=how,\n suffixes=(\"1\", \"2\"),\n shuffle=shuffle,\n ),\n pd.merge(A, B, left_on=\"x\", right_index=True, how=how, suffixes=(\"1\", \"2\")),\n )\n\n # pandas result looks buggy\n # list_eq(dd.merge(a, B, left_index=True, right_on='y'),\n # pd.merge(A, B, left_index=True, right_on='y'))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_tasks_semi_anti_cudf_test_merge_tasks_semi_anti_cudf.assert_eq_result_expect_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_tasks_semi_anti_cudf_test_merge_tasks_semi_anti_cudf.assert_eq_result_expect_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 799, "end_line": 848, "span_ids": ["test_merge_tasks_semi_anti_cudf"], "tokens": 521}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"parts\", [(3, 3), (3, 1), (1, 3)])\n@pytest.mark.parametrize(\"how\", [\"leftsemi\", \"leftanti\"])\n@pytest.mark.parametrize(\n \"engine\",\n [\n \"cudf\",\n pytest.param(\n \"pandas\",\n marks=pytest.mark.xfail(\n reason=\"Pandas does not support leftsemi or leftanti\"\n ),\n ),\n ],\n)\ndef test_merge_tasks_semi_anti_cudf(engine, how, parts):\n if engine == \"cudf\":\n # NOTE: engine == \"cudf\" requires cudf/dask_cudf,\n # will be skipped by non-GPU CI.\n\n cudf = pytest.importorskip(\"cudf\")\n dask_cudf = pytest.importorskip(\"dask_cudf\")\n\n emp = pd.DataFrame(\n {\n \"emp_id\": np.arange(101, stop=106),\n \"name\": [\"John\", \"Tom\", \"Harry\", \"Rahul\", \"Sakil\"],\n \"city\": [\"Cal\", \"Mum\", \"Del\", \"Ban\", \"Del\"],\n \"salary\": [50000, 40000, 80000, 60000, 90000],\n }\n )\n skills = pd.DataFrame(\n {\n \"skill_id\": [404, 405, 406, 407, 408],\n \"emp_id\": [103, 101, 105, 102, 101],\n \"skill_name\": [\"Dask\", \"Spark\", \"C\", \"Python\", \"R\"],\n }\n )\n\n if engine == \"cudf\":\n emp = cudf.from_pandas(emp)\n skills = cudf.from_pandas(skills)\n dd_emp = dask_cudf.from_cudf(emp, npartitions=parts[0])\n dd_skills = dask_cudf.from_cudf(skills, npartitions=parts[1])\n else:\n dd_emp = dd.from_pandas(emp, npartitions=parts[0])\n dd_skills = dd.from_pandas(skills, npartitions=parts[1])\n\n expect = emp.merge(skills, on=\"emp_id\", how=how).sort_values([\"emp_id\"])\n result = dd_emp.merge(dd_skills, on=\"emp_id\", how=how).sort_values([\"emp_id\"])\n assert_eq(result, expect, check_index=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_tasks_passes_through_test_merge_tasks_passes_through.assert_not_any_partd_in": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_tasks_passes_through_test_merge_tasks_passes_through.assert_not_any_partd_in", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 851, "end_line": 860, "span_ids": ["test_merge_tasks_passes_through"], "tokens": 186}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_merge_tasks_passes_through():\n a = pd.DataFrame({\"a\": [1, 2, 3, 4, 5, 6, 7], \"b\": [7, 6, 5, 4, 3, 2, 1]})\n b = pd.DataFrame({\"c\": [1, 2, 3, 4, 5, 6, 7], \"d\": [7, 6, 5, 4, 3, 2, 1]})\n\n aa = dd.from_pandas(a, npartitions=3)\n bb = dd.from_pandas(b, npartitions=2)\n\n cc = aa.merge(bb, left_on=\"a\", right_on=\"d\", shuffle=\"tasks\")\n\n assert not any(\"partd\" in k[0] for k in cc.dask)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_by_index_patterns_test_merge_by_index_patterns.pd_merge.return.out": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_by_index_patterns_test_merge_by_index_patterns.pd_merge.return.out", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 863, "end_line": 906, "span_ids": ["test_merge_by_index_patterns"], "tokens": 799}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.slow\n@pytest.mark.parametrize(\"shuffle\", [\"disk\", \"tasks\"])\n@pytest.mark.parametrize(\"how\", [\"inner\", \"outer\", \"left\", \"right\"])\ndef test_merge_by_index_patterns(how, shuffle):\n\n pdf1l = pd.DataFrame({\"a\": [1, 2, 3, 4, 5, 6, 7], \"b\": [7, 6, 5, 4, 3, 2, 1]})\n pdf1r = pd.DataFrame({\"c\": [1, 2, 3, 4, 5, 6, 7], \"d\": [7, 6, 5, 4, 3, 2, 1]})\n\n pdf2l = pd.DataFrame(\n {\"a\": [1, 2, 3, 4, 5, 6, 7], \"b\": [7, 6, 5, 4, 3, 2, 1]}, index=list(\"abcdefg\")\n )\n pdf2r = pd.DataFrame(\n {\"c\": [7, 6, 5, 4, 3, 2, 1], \"d\": [7, 6, 5, 4, 3, 2, 1]}, index=list(\"abcdefg\")\n )\n\n pdf3l = pdf2l\n pdf3r = pd.DataFrame({\"c\": [6, 7, 8, 9], \"d\": [5, 4, 3, 2]}, index=list(\"abdg\"))\n\n pdf4l = pdf2l\n pdf4r = pd.DataFrame({\"c\": [9, 10, 11, 12], \"d\": [5, 4, 3, 2]}, index=list(\"abdg\"))\n\n # completely different index\n pdf5l = pd.DataFrame(\n {\"a\": [1, 1, 2, 2, 3, 3, 4], \"b\": [7, 6, 5, 4, 3, 2, 1]}, index=list(\"lmnopqr\")\n )\n pdf5r = pd.DataFrame({\"c\": [1, 1, 1, 1], \"d\": [5, 4, 3, 2]}, index=list(\"abcd\"))\n\n pdf6l = pd.DataFrame(\n {\"a\": [1, 1, 2, 2, 3, 3, 4], \"b\": [7, 6, 5, 4, 3, 2, 1]}, index=list(\"cdefghi\")\n )\n pdf6r = pd.DataFrame({\"c\": [1, 2, 1, 2], \"d\": [5, 4, 3, 2]}, index=list(\"abcd\"))\n\n pdf7l = pd.DataFrame(\n {\"a\": [1, 1, 2, 2, 3, 3, 4], \"b\": [7, 6, 5, 4, 3, 2, 1]}, index=list(\"abcdefg\")\n )\n pdf7r = pd.DataFrame({\"c\": [5, 6, 7, 8], \"d\": [5, 4, 3, 2]}, index=list(\"fghi\"))\n\n def pd_merge(left, right, **kwargs):\n # Workaround pandas bug where output dtype of empty index will be int64\n # even if input was object.\n out = pd.merge(left, right, **kwargs)\n if len(out) == 0:\n return out.set_index(out.index.astype(left.index.dtype))\n return out\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_by_index_patterns.for_pdl_pdr_in__test_merge_by_index_patterns.for_pdl_pdr_in_.for_lpart_rpart_in_.None_15": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_by_index_patterns.for_pdl_pdr_in__test_merge_by_index_patterns.for_pdl_pdr_in_.for_lpart_rpart_in_.None_15", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 908, "end_line": 1052, "span_ids": ["test_merge_by_index_patterns"], "tokens": 1162}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.slow\n@pytest.mark.parametrize(\"shuffle\", [\"disk\", \"tasks\"])\n@pytest.mark.parametrize(\"how\", [\"inner\", \"outer\", \"left\", \"right\"])\ndef test_merge_by_index_patterns(how, shuffle):\n # ... other code\n\n for pdl, pdr in [\n (pdf1l, pdf1r),\n (pdf2l, pdf2r),\n (pdf3l, pdf3r),\n (pdf4l, pdf4r),\n (pdf5l, pdf5r),\n (pdf6l, pdf6r),\n (pdf7l, pdf7r),\n ]:\n\n for lpart, rpart in [\n (2, 2), # same partition\n (3, 2), # left npartition > right npartition\n (2, 3),\n ]: # left npartition < right npartition\n\n ddl = dd.from_pandas(pdl, lpart)\n ddr = dd.from_pandas(pdr, rpart)\n\n assert_eq(\n dd.merge(\n ddl,\n ddr,\n how=how,\n left_index=True,\n right_index=True,\n shuffle=shuffle,\n ),\n pd_merge(pdl, pdr, how=how, left_index=True, right_index=True),\n )\n assert_eq(\n dd.merge(\n ddr,\n ddl,\n how=how,\n left_index=True,\n right_index=True,\n shuffle=shuffle,\n ),\n pd_merge(pdr, pdl, how=how, left_index=True, right_index=True),\n )\n\n assert_eq(\n dd.merge(\n ddl,\n ddr,\n how=how,\n left_index=True,\n right_index=True,\n shuffle=shuffle,\n indicator=True,\n ),\n pd_merge(\n pdl, pdr, how=how, left_index=True, right_index=True, indicator=True\n ),\n )\n assert_eq(\n dd.merge(\n ddr,\n ddl,\n how=how,\n left_index=True,\n right_index=True,\n shuffle=shuffle,\n indicator=True,\n ),\n pd_merge(\n pdr, pdl, how=how, left_index=True, right_index=True, indicator=True\n ),\n )\n\n assert_eq(\n ddr.merge(\n ddl, how=how, left_index=True, right_index=True, shuffle=shuffle\n ),\n pdr.merge(pdl, how=how, left_index=True, right_index=True),\n )\n assert_eq(\n ddl.merge(\n ddr, how=how, left_index=True, right_index=True, shuffle=shuffle\n ),\n pdl.merge(pdr, how=how, left_index=True, right_index=True),\n )\n\n # hash join\n list_eq(\n dd.merge(ddl, ddr, how=how, left_on=\"a\", right_on=\"c\", shuffle=shuffle),\n pd.merge(pdl, pdr, how=how, left_on=\"a\", right_on=\"c\"),\n )\n list_eq(\n dd.merge(ddl, ddr, how=how, left_on=\"b\", right_on=\"d\", shuffle=shuffle),\n pd.merge(pdl, pdr, how=how, left_on=\"b\", right_on=\"d\"),\n )\n\n list_eq(\n dd.merge(\n ddr,\n ddl,\n how=how,\n left_on=\"c\",\n right_on=\"a\",\n shuffle=shuffle,\n indicator=True,\n ),\n pd.merge(pdr, pdl, how=how, left_on=\"c\", right_on=\"a\", indicator=True),\n )\n list_eq(\n dd.merge(\n ddr,\n ddl,\n how=how,\n left_on=\"d\",\n right_on=\"b\",\n shuffle=shuffle,\n indicator=True,\n ),\n pd.merge(pdr, pdl, how=how, left_on=\"d\", right_on=\"b\", indicator=True),\n )\n\n list_eq(\n dd.merge(ddr, ddl, how=how, left_on=\"c\", right_on=\"a\", shuffle=shuffle),\n pd.merge(pdr, pdl, how=how, left_on=\"c\", right_on=\"a\"),\n )\n list_eq(\n dd.merge(ddr, ddl, how=how, left_on=\"d\", right_on=\"b\", shuffle=shuffle),\n pd.merge(pdr, pdl, how=how, left_on=\"d\", right_on=\"b\"),\n )\n\n list_eq(\n ddl.merge(ddr, how=how, left_on=\"a\", right_on=\"c\", shuffle=shuffle),\n pdl.merge(pdr, how=how, left_on=\"a\", right_on=\"c\"),\n )\n list_eq(\n ddl.merge(ddr, how=how, left_on=\"b\", right_on=\"d\", shuffle=shuffle),\n pdl.merge(pdr, how=how, left_on=\"b\", right_on=\"d\"),\n )\n\n list_eq(\n ddr.merge(ddl, how=how, left_on=\"c\", right_on=\"a\", shuffle=shuffle),\n pdr.merge(pdl, how=how, left_on=\"c\", right_on=\"a\"),\n )\n list_eq(\n ddr.merge(ddl, how=how, left_on=\"d\", right_on=\"b\", shuffle=shuffle),\n pdr.merge(pdl, how=how, left_on=\"d\", right_on=\"b\"),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_join_by_index_patterns_test_join_by_index_patterns.pdf7r.pd_DataFrame_c_list_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_join_by_index_patterns_test_join_by_index_patterns.pdf7r.pd_DataFrame_c_list_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 1055, "end_line": 1096, "span_ids": ["test_join_by_index_patterns"], "tokens": 621}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"how\", [\"inner\", \"outer\", \"left\", \"right\"])\n@pytest.mark.parametrize(\"shuffle\", [\"disk\", \"tasks\"])\ndef test_join_by_index_patterns(how, shuffle):\n\n # Similar test cases as test_merge_by_index_patterns,\n # but columns / index for join have same dtype\n\n pdf1l = pd.DataFrame(\n {\"a\": list(\"abcdefg\"), \"b\": [7, 6, 5, 4, 3, 2, 1]}, index=list(\"abcdefg\")\n )\n pdf1r = pd.DataFrame(\n {\"c\": list(\"abcdefg\"), \"d\": [7, 6, 5, 4, 3, 2, 1]}, index=list(\"abcdefg\")\n )\n\n pdf2l = pdf1l\n pdf2r = pd.DataFrame(\n {\"c\": list(\"gfedcba\"), \"d\": [7, 6, 5, 4, 3, 2, 1]}, index=list(\"abcdefg\")\n )\n\n pdf3l = pdf1l\n pdf3r = pd.DataFrame({\"c\": list(\"abdg\"), \"d\": [5, 4, 3, 2]}, index=list(\"abdg\"))\n\n pdf4l = pd.DataFrame(\n {\"a\": list(\"abcabce\"), \"b\": [7, 6, 5, 4, 3, 2, 1]}, index=list(\"abcdefg\")\n )\n pdf4r = pd.DataFrame({\"c\": list(\"abda\"), \"d\": [5, 4, 3, 2]}, index=list(\"abdg\"))\n\n # completely different index\n pdf5l = pd.DataFrame(\n {\"a\": list(\"lmnopqr\"), \"b\": [7, 6, 5, 4, 3, 2, 1]}, index=list(\"lmnopqr\")\n )\n pdf5r = pd.DataFrame({\"c\": list(\"abcd\"), \"d\": [5, 4, 3, 2]}, index=list(\"abcd\"))\n\n pdf6l = pd.DataFrame(\n {\"a\": list(\"cdefghi\"), \"b\": [7, 6, 5, 4, 3, 2, 1]}, index=list(\"cdefghi\")\n )\n pdf6r = pd.DataFrame({\"c\": list(\"abab\"), \"d\": [5, 4, 3, 2]}, index=list(\"abcd\"))\n\n pdf7l = pd.DataFrame(\n {\"a\": list(\"aabbccd\"), \"b\": [7, 6, 5, 4, 3, 2, 1]}, index=list(\"abcdefg\")\n )\n pdf7r = pd.DataFrame({\"c\": list(\"aabb\"), \"d\": [5, 4, 3, 2]}, index=list(\"fghi\"))\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_join_by_index_patterns.for_pdl_pdr_in__test_join_by_index_patterns.for_pdl_pdr_in_.for_lpart_rpart_in_2_._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_join_by_index_patterns.for_pdl_pdr_in__test_join_by_index_patterns.for_pdl_pdr_in_.for_lpart_rpart_in_2_._", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 1098, "end_line": 1145, "span_ids": ["test_join_by_index_patterns"], "tokens": 659}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"how\", [\"inner\", \"outer\", \"left\", \"right\"])\n@pytest.mark.parametrize(\"shuffle\", [\"disk\", \"tasks\"])\ndef test_join_by_index_patterns(how, shuffle):\n # ... other code\n\n for pdl, pdr in [\n (pdf1l, pdf1r),\n (pdf2l, pdf2r),\n (pdf3l, pdf3r),\n (pdf4l, pdf4r),\n (pdf5l, pdf5r),\n (pdf6l, pdf6r),\n (pdf7l, pdf7r),\n ]:\n\n for lpart, rpart in [(2, 2), (3, 2), (2, 3)]:\n\n ddl = dd.from_pandas(pdl, lpart)\n ddr = dd.from_pandas(pdr, rpart)\n\n assert_eq(ddl.join(ddr, how=how, shuffle=shuffle), pdl.join(pdr, how=how))\n assert_eq(ddr.join(ddl, how=how, shuffle=shuffle), pdr.join(pdl, how=how))\n\n assert_eq(\n ddl.join(ddr, how=how, lsuffix=\"l\", rsuffix=\"r\", shuffle=shuffle),\n pdl.join(pdr, how=how, lsuffix=\"l\", rsuffix=\"r\"),\n )\n assert_eq(\n ddr.join(ddl, how=how, lsuffix=\"l\", rsuffix=\"r\", shuffle=shuffle),\n pdr.join(pdl, how=how, lsuffix=\"l\", rsuffix=\"r\"),\n )\n\n \"\"\"\n # temporary disabled bacause pandas may incorrectly raise\n # IndexError for empty DataFrame\n # https://github.com/pydata/pandas/pull/10826\n\n list_assert_eq(ddl.join(ddr, how=how, on='a', lsuffix='l', rsuffix='r'),\n pdl.join(pdr, how=how, on='a', lsuffix='l', rsuffix='r'))\n\n list_eq(ddr.join(ddl, how=how, on='c', lsuffix='l', rsuffix='r'),\n pdr.join(pdl, how=how, on='c', lsuffix='l', rsuffix='r'))\n\n # merge with index and columns\n list_eq(ddl.merge(ddr, how=how, left_on='a', right_index=True),\n pdl.merge(pdr, how=how, left_on='a', right_index=True))\n list_eq(ddr.merge(ddl, how=how, left_on='c', right_index=True),\n pdr.merge(pdl, how=how, left_on='c', right_index=True))\n list_eq(ddl.merge(ddr, how=how, left_index=True, right_on='c'),\n pdl.merge(pdr, how=how, left_index=True, right_on='c'))\n list_eq(ddr.merge(ddl, how=how, left_index=True, right_on='a'),\n pdr.merge(pdl, how=how, left_index=True, right_on='a'))\n \"\"\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_by_multiple_columns_test_merge_by_multiple_columns.pdf3r.pd_DataFrame_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_by_multiple_columns_test_merge_by_multiple_columns.pdf3r.pd_DataFrame_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 1148, "end_line": 1201, "span_ids": ["test_merge_by_multiple_columns"], "tokens": 507}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"how\", [\"inner\", \"outer\", \"left\", \"right\"])\n@pytest.mark.parametrize(\"shuffle\", [\"disk\", \"tasks\"])\ndef test_merge_by_multiple_columns(how, shuffle):\n # warnings here from pandas\n pdf1l = pd.DataFrame(\n {\n \"a\": list(\"abcdefghij\"),\n \"b\": list(\"abcdefghij\"),\n \"c\": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],\n },\n index=list(\"abcdefghij\"),\n )\n pdf1r = pd.DataFrame(\n {\n \"d\": list(\"abcdefghij\"),\n \"e\": list(\"abcdefghij\"),\n \"f\": [10, 9, 8, 7, 6, 5, 4, 3, 2, 1],\n },\n index=list(\"abcdefghij\"),\n )\n\n pdf2l = pd.DataFrame(\n {\n \"a\": list(\"abcdeabcde\"),\n \"b\": list(\"abcabcabca\"),\n \"c\": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],\n },\n index=list(\"abcdefghij\"),\n )\n pdf2r = pd.DataFrame(\n {\n \"d\": list(\"edcbaedcba\"),\n \"e\": list(\"aaabbbcccd\"),\n \"f\": [10, 9, 8, 7, 6, 5, 4, 3, 2, 1],\n },\n index=list(\"fghijklmno\"),\n )\n\n pdf3l = pd.DataFrame(\n {\n \"a\": list(\"aaaaaaaaaa\"),\n \"b\": list(\"aaaaaaaaaa\"),\n \"c\": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],\n },\n index=list(\"abcdefghij\"),\n )\n pdf3r = pd.DataFrame(\n {\n \"d\": list(\"aaabbbccaa\"),\n \"e\": list(\"abbbbbbbbb\"),\n \"f\": [10, 9, 8, 7, 6, 5, 4, 3, 2, 1],\n },\n index=list(\"ABCDEFGHIJ\"),\n )\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_by_multiple_columns.for_pdl_pdr_in_pdf1l__test_merge_by_multiple_columns.for_pdl_pdr_in_pdf1l_.for_lpart_rpart_in_2_.None_8": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_by_multiple_columns.for_pdl_pdr_in_pdf1l__test_merge_by_multiple_columns.for_pdl_pdr_in_pdf1l_.for_lpart_rpart_in_2_.None_8", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 1203, "end_line": 1265, "span_ids": ["test_merge_by_multiple_columns"], "tokens": 614}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"how\", [\"inner\", \"outer\", \"left\", \"right\"])\n@pytest.mark.parametrize(\"shuffle\", [\"disk\", \"tasks\"])\ndef test_merge_by_multiple_columns(how, shuffle):\n # ... other code\n\n for pdl, pdr in [(pdf1l, pdf1r), (pdf2l, pdf2r), (pdf3l, pdf3r)]:\n\n for lpart, rpart in [(2, 2), (3, 2), (2, 3)]:\n\n ddl = dd.from_pandas(pdl, lpart)\n ddr = dd.from_pandas(pdr, rpart)\n\n assert_eq(ddl.join(ddr, how=how, shuffle=shuffle), pdl.join(pdr, how=how))\n assert_eq(ddr.join(ddl, how=how, shuffle=shuffle), pdr.join(pdl, how=how))\n\n assert_eq(\n dd.merge(\n ddl,\n ddr,\n how=how,\n left_index=True,\n right_index=True,\n shuffle=shuffle,\n ),\n pd.merge(pdl, pdr, how=how, left_index=True, right_index=True),\n )\n assert_eq(\n dd.merge(\n ddr,\n ddl,\n how=how,\n left_index=True,\n right_index=True,\n shuffle=shuffle,\n ),\n pd.merge(pdr, pdl, how=how, left_index=True, right_index=True),\n )\n\n # hash join\n list_eq(\n dd.merge(ddl, ddr, how=how, left_on=\"a\", right_on=\"d\", shuffle=shuffle),\n pd.merge(pdl, pdr, how=how, left_on=\"a\", right_on=\"d\"),\n )\n list_eq(\n dd.merge(ddl, ddr, how=how, left_on=\"b\", right_on=\"e\", shuffle=shuffle),\n pd.merge(pdl, pdr, how=how, left_on=\"b\", right_on=\"e\"),\n )\n\n list_eq(\n dd.merge(ddr, ddl, how=how, left_on=\"d\", right_on=\"a\", shuffle=shuffle),\n pd.merge(pdr, pdl, how=how, left_on=\"d\", right_on=\"a\"),\n )\n list_eq(\n dd.merge(ddr, ddl, how=how, left_on=\"e\", right_on=\"b\", shuffle=shuffle),\n pd.merge(pdr, pdl, how=how, left_on=\"e\", right_on=\"b\"),\n )\n\n list_eq(\n dd.merge(\n ddl,\n ddr,\n how=how,\n left_on=[\"a\", \"b\"],\n right_on=[\"d\", \"e\"],\n shuffle=shuffle,\n ),\n pd.merge(pdl, pdr, how=how, left_on=[\"a\", \"b\"], right_on=[\"d\", \"e\"]),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_melt_test_melt.None_9": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_melt_test_melt.None_9", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 1268, "end_line": 1298, "span_ids": ["test_melt"], "tokens": 380}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_melt():\n pdf = pd.DataFrame(\n {\"A\": list(\"abcd\") * 5, \"B\": list(\"XY\") * 10, \"C\": np.random.randn(20)}\n )\n ddf = dd.from_pandas(pdf, 4)\n\n list_eq(dd.melt(ddf), pd.melt(pdf))\n\n list_eq(dd.melt(ddf, id_vars=\"C\"), pd.melt(pdf, id_vars=\"C\"))\n list_eq(dd.melt(ddf, value_vars=\"C\"), pd.melt(pdf, value_vars=\"C\"))\n list_eq(\n dd.melt(ddf, value_vars=[\"A\", \"C\"], var_name=\"myvar\"),\n pd.melt(pdf, value_vars=[\"A\", \"C\"], var_name=\"myvar\"),\n )\n list_eq(\n dd.melt(ddf, id_vars=\"B\", value_vars=[\"A\", \"C\"], value_name=\"myval\"),\n pd.melt(pdf, id_vars=\"B\", value_vars=[\"A\", \"C\"], value_name=\"myval\"),\n )\n\n # test again as DataFrame method\n list_eq(ddf.melt(), pdf.melt())\n list_eq(ddf.melt(id_vars=\"C\"), pdf.melt(id_vars=\"C\"))\n list_eq(ddf.melt(value_vars=\"C\"), pdf.melt(value_vars=\"C\"))\n list_eq(\n ddf.melt(value_vars=[\"A\", \"C\"], var_name=\"myvar\"),\n pdf.melt(value_vars=[\"A\", \"C\"], var_name=\"myvar\"),\n )\n list_eq(\n ddf.melt(id_vars=\"B\", value_vars=[\"A\", \"C\"], value_name=\"myval\"),\n pdf.melt(id_vars=\"B\", value_vars=[\"A\", \"C\"], value_name=\"myval\"),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_cheap_inner_merge_with_pandas_object_test_cheap_inner_merge_with_pandas_object.list_eq_da_merge_b_on_x": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_cheap_inner_merge_with_pandas_object_test_cheap_inner_merge_with_pandas_object.list_eq_da_merge_b_on_x", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 1301, "end_line": 1312, "span_ids": ["test_cheap_inner_merge_with_pandas_object"], "tokens": 174}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_cheap_inner_merge_with_pandas_object():\n a = pd.DataFrame(\n {\"x\": [1, 2, 3, 4, 5, 6], \"y\": list(\"abdabd\")}, index=[10, 20, 30, 40, 50, 60]\n )\n da = dd.from_pandas(a, npartitions=3)\n\n b = pd.DataFrame({\"x\": [1, 2, 3, 4], \"z\": list(\"abda\")})\n\n dc = da.merge(b, on=\"x\", how=\"inner\")\n assert all(\"shuffle\" not in k[0] for k in dc.dask)\n\n list_eq(da.merge(b, on=\"x\", how=\"inner\"), a.merge(b, on=\"x\", how=\"inner\"))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_cheap_single_partition_merge_test_cheap_single_partition_merge.list_eq_aa_merge_bb_on_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_cheap_single_partition_merge_test_cheap_single_partition_merge.list_eq_aa_merge_bb_on_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 1315, "end_line": 1328, "span_ids": ["test_cheap_single_partition_merge"], "tokens": 211}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_cheap_single_partition_merge():\n a = pd.DataFrame(\n {\"x\": [1, 2, 3, 4, 5, 6], \"y\": list(\"abdabd\")}, index=[10, 20, 30, 40, 50, 60]\n )\n aa = dd.from_pandas(a, npartitions=3)\n\n b = pd.DataFrame({\"x\": [1, 2, 3, 4], \"z\": list(\"abda\")})\n bb = dd.from_pandas(b, npartitions=1, sort=False)\n\n cc = aa.merge(bb, on=\"x\", how=\"inner\")\n assert all(\"shuffle\" not in k[0] for k in cc.dask)\n assert len(cc.dask) == len(aa.dask) * 2 + len(bb.dask)\n\n list_eq(aa.merge(bb, on=\"x\", how=\"inner\"), a.merge(b, on=\"x\", how=\"inner\"))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_cheap_single_partition_merge_divisions_test_cheap_single_partition_merge_divisions.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_cheap_single_partition_merge_divisions_test_cheap_single_partition_merge_divisions.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 1331, "end_line": 1346, "span_ids": ["test_cheap_single_partition_merge_divisions"], "tokens": 190}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_cheap_single_partition_merge_divisions():\n a = pd.DataFrame(\n {\"x\": [1, 2, 3, 4, 5, 6], \"y\": list(\"abdabd\")}, index=[10, 20, 30, 40, 50, 60]\n )\n aa = dd.from_pandas(a, npartitions=3)\n\n b = pd.DataFrame({\"x\": [1, 2, 3, 4], \"z\": list(\"abda\")})\n bb = dd.from_pandas(b, npartitions=1, sort=False)\n\n actual = aa.merge(bb, on=\"x\", how=\"inner\")\n assert not actual.known_divisions\n assert_divisions(actual)\n\n actual = bb.merge(aa, on=\"x\", how=\"inner\")\n assert not actual.known_divisions\n assert_divisions(actual)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_cheap_single_parition_merge_left_right_test_cheap_single_parition_merge_left_right.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_cheap_single_parition_merge_left_right_test_cheap_single_parition_merge_left_right.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 1349, "end_line": 1365, "span_ids": ["test_cheap_single_parition_merge_left_right"], "tokens": 212}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"how\", [\"left\", \"right\"])\ndef test_cheap_single_parition_merge_left_right(how):\n a = pd.DataFrame({\"x\": range(8), \"z\": list(\"ababbdda\")}, index=range(8))\n aa = dd.from_pandas(a, npartitions=1)\n\n b = pd.DataFrame({\"x\": [1, 2, 3, 4], \"z\": list(\"abda\")}, index=range(4))\n bb = dd.from_pandas(b, npartitions=1)\n\n actual = aa.merge(bb, left_index=True, right_on=\"x\", how=how)\n expected = a.merge(b, left_index=True, right_on=\"x\", how=how)\n\n assert_eq(actual, expected)\n\n actual = aa.merge(bb, left_on=\"x\", right_index=True, how=how)\n expected = a.merge(b, left_on=\"x\", right_index=True, how=how)\n\n assert_eq(actual, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_cheap_single_partition_merge_on_index_test_cheap_single_partition_merge_on_index.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_cheap_single_partition_merge_on_index_test_cheap_single_partition_merge_on_index.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 1368, "end_line": 1393, "span_ids": ["test_cheap_single_partition_merge_on_index"], "tokens": 307}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_cheap_single_partition_merge_on_index():\n a = pd.DataFrame(\n {\"x\": [1, 2, 3, 4, 5, 6], \"y\": list(\"abdabd\")}, index=[10, 20, 30, 40, 50, 60]\n )\n aa = dd.from_pandas(a, npartitions=3)\n\n b = pd.DataFrame({\"x\": [1, 2, 3, 4], \"z\": list(\"abda\")})\n bb = dd.from_pandas(b, npartitions=1, sort=False)\n\n actual = aa.merge(bb, left_index=True, right_on=\"x\", how=\"inner\")\n expected = a.merge(b, left_index=True, right_on=\"x\", how=\"inner\")\n\n # Workaround https://github.com/pandas-dev/pandas/issues/26925\n # actual has the correct dtype for the index (Int64). Pandas as object-dtype\n # for empty joins.\n expected.index = expected.index.astype(\"int64\")\n\n assert actual.known_divisions\n assert_eq(actual, expected)\n\n actual = bb.merge(aa, right_index=True, left_on=\"x\", how=\"inner\")\n expected = b.merge(a, right_index=True, left_on=\"x\", how=\"inner\")\n expected.index = expected.index.astype(\"int64\")\n\n assert actual.known_divisions\n assert_eq(actual, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_maintains_columns_test_merge_maintains_columns.assert_tuple_merged_colum": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_maintains_columns_test_merge_maintains_columns.assert_tuple_merged_colum", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 1396, "end_line": 1405, "span_ids": ["test_merge_maintains_columns"], "tokens": 163}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_merge_maintains_columns():\n lhs = pd.DataFrame(\n {\"A\": [1, 2, 3], \"B\": list(\"abc\"), \"C\": \"foo\", \"D\": 1.0}, columns=list(\"DCBA\")\n )\n rhs = pd.DataFrame(\n {\"G\": [4, 5], \"H\": 6.0, \"I\": \"bar\", \"B\": list(\"ab\")}, columns=list(\"GHIB\")\n )\n ddf = dd.from_pandas(lhs, npartitions=1)\n merged = dd.merge(ddf, rhs, on=\"B\").compute()\n assert tuple(merged.columns) == (\"D\", \"C\", \"B\", \"A\", \"G\", \"H\", \"I\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_index_without_divisions_test_merge_index_without_divisions.assert_eq_result_expecte": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_index_without_divisions_test_merge_index_without_divisions.assert_eq_result_expecte", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 1408, "end_line": 1418, "span_ids": ["test_merge_index_without_divisions"], "tokens": 166}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"shuffle\", [\"disk\", \"tasks\"])\ndef test_merge_index_without_divisions(shuffle):\n a = pd.DataFrame({\"x\": [1, 2, 3, 4, 5]}, index=[1, 2, 3, 4, 5])\n b = pd.DataFrame({\"y\": [1, 2, 3, 4, 5]}, index=[5, 4, 3, 2, 1])\n\n aa = dd.from_pandas(a, npartitions=3, sort=False)\n bb = dd.from_pandas(b, npartitions=2)\n\n result = aa.join(bb, how=\"inner\", shuffle=shuffle)\n expected = a.join(b, how=\"inner\")\n assert_eq(result, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_half_indexed_dataframe_avoids_shuffle_test_half_indexed_dataframe_avoids_shuffle.assert_len_cc_dask_500": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_half_indexed_dataframe_avoids_shuffle_test_half_indexed_dataframe_avoids_shuffle.assert_len_cc_dask_500", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 1421, "end_line": 1435, "span_ids": ["test_half_indexed_dataframe_avoids_shuffle"], "tokens": 149}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_half_indexed_dataframe_avoids_shuffle():\n a = pd.DataFrame({\"x\": np.random.randint(100, size=1000)})\n b = pd.DataFrame(\n {\"y\": np.random.randint(100, size=100)}, index=np.random.randint(100, size=100)\n )\n\n aa = dd.from_pandas(a, npartitions=100)\n bb = dd.from_pandas(b, npartitions=2)\n\n c = pd.merge(a, b, left_index=True, right_on=\"y\")\n cc = dd.merge(aa, bb, left_index=True, right_on=\"y\", shuffle=\"tasks\")\n\n list_eq(c, cc)\n\n assert len(cc.dask) < 500", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_errors_for_merge_on_frame_columns_test_concat_one_series.assert_isinstance_c_dd_D": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_errors_for_merge_on_frame_columns_test_concat_one_series.assert_isinstance_c_dd_D", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 1438, "end_line": 1460, "span_ids": ["test_errors_for_merge_on_frame_columns", "test_concat_one_series"], "tokens": 256}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_errors_for_merge_on_frame_columns():\n a = pd.DataFrame({\"x\": [1, 2, 3, 4, 5]}, index=[1, 2, 3, 4, 5])\n b = pd.DataFrame({\"y\": [1, 2, 3, 4, 5]}, index=[5, 4, 3, 2, 1])\n\n aa = dd.from_pandas(a, npartitions=3, sort=False)\n bb = dd.from_pandas(b, npartitions=2)\n\n with pytest.raises(NotImplementedError):\n dd.merge(aa, bb, left_on=\"x\", right_on=bb.y)\n\n with pytest.raises(NotImplementedError):\n dd.merge(aa, bb, left_on=aa.x, right_on=bb.y)\n\n\ndef test_concat_one_series():\n a = pd.Series([1, 2, 3, 4])\n aa = dd.from_pandas(a, npartitions=2, sort=False)\n\n c = dd.concat([aa], axis=0)\n assert isinstance(c, dd.Series)\n\n c = dd.concat([aa], axis=1)\n assert isinstance(c, dd.DataFrame)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat_unknown_divisions_test_concat_unknown_divisions.with_pytest_warns_None_a.assert_len_record_0": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat_unknown_divisions_test_concat_unknown_divisions.with_pytest_warns_None_a.assert_len_record_0", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 1463, "end_line": 1480, "span_ids": ["test_concat_unknown_divisions"], "tokens": 191}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_concat_unknown_divisions():\n a = pd.Series([1, 2, 3, 4])\n b = pd.Series([4, 3, 2, 1])\n aa = dd.from_pandas(a, npartitions=2, sort=False)\n bb = dd.from_pandas(b, npartitions=2, sort=False)\n\n assert not aa.known_divisions\n\n with pytest.warns(UserWarning):\n assert_eq(pd.concat([a, b], axis=1), dd.concat([aa, bb], axis=1))\n\n cc = dd.from_pandas(b, npartitions=1, sort=False)\n with pytest.raises(ValueError):\n dd.concat([aa, cc], axis=1)\n\n with pytest.warns(None) as record:\n dd.concat([aa, bb], axis=1, ignore_unknown_divisions=True)\n assert len(record) == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat_unknown_divisions_errors_test_concat_unknown_divisions_errors.with_pytest_raises_ValueE.with_pytest_warns_UserWar.dd_concat_aa_bb_axis_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat_unknown_divisions_errors_test_concat_unknown_divisions_errors.with_pytest_raises_ValueE.with_pytest_warns_UserWar.dd_concat_aa_bb_axis_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 1483, "end_line": 1491, "span_ids": ["test_concat_unknown_divisions_errors"], "tokens": 119}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_concat_unknown_divisions_errors():\n a = pd.Series([1, 2, 3, 4, 5, 6])\n b = pd.Series([4, 3, 2, 1])\n aa = dd.from_pandas(a, npartitions=2, sort=False)\n bb = dd.from_pandas(b, npartitions=2, sort=False)\n\n with pytest.raises(ValueError):\n with pytest.warns(UserWarning): # Concat with unknown divisions\n dd.concat([aa, bb], axis=1).compute()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat2_test_concat2.assert_dd_concat_a_is_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat2_test_concat2.assert_dd_concat_a_is_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 1494, "end_line": 1528, "span_ids": ["test_concat2"], "tokens": 594}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_concat2():\n dsk = {\n (\"x\", 0): pd.DataFrame({\"a\": [1, 2, 3], \"b\": [4, 5, 6]}),\n (\"x\", 1): pd.DataFrame({\"a\": [4, 5, 6], \"b\": [3, 2, 1]}),\n (\"x\", 2): pd.DataFrame({\"a\": [7, 8, 9], \"b\": [0, 0, 0]}),\n }\n meta = make_meta({\"a\": \"i8\", \"b\": \"i8\"})\n a = dd.DataFrame(dsk, \"x\", meta, [None, None])\n dsk = {\n (\"y\", 0): pd.DataFrame({\"a\": [10, 20, 30], \"b\": [40, 50, 60]}),\n (\"y\", 1): pd.DataFrame({\"a\": [40, 50, 60], \"b\": [30, 20, 10]}),\n (\"y\", 2): pd.DataFrame({\"a\": [70, 80, 90], \"b\": [0, 0, 0]}),\n }\n b = dd.DataFrame(dsk, \"y\", meta, [None, None])\n\n dsk = {\n (\"y\", 0): pd.DataFrame({\"b\": [10, 20, 30], \"c\": [40, 50, 60]}),\n (\"y\", 1): pd.DataFrame({\"b\": [40, 50, 60], \"c\": [30, 20, 10]}),\n }\n meta = make_meta({\"b\": \"i8\", \"c\": \"i8\"})\n c = dd.DataFrame(dsk, \"y\", meta, [None, None])\n\n dsk = {\n (\"y\", 0): pd.DataFrame(\n {\"b\": [10, 20, 30], \"c\": [40, 50, 60], \"d\": [70, 80, 90]}\n ),\n (\"y\", 1): pd.DataFrame(\n {\"b\": [40, 50, 60], \"c\": [30, 20, 10], \"d\": [90, 80, 70]}, index=[3, 4, 5]\n ),\n }\n meta = make_meta({\"b\": \"i8\", \"c\": \"i8\", \"d\": \"i8\"}, index=pd.Index([], \"i8\"))\n d = dd.DataFrame(dsk, \"y\", meta, [0, 3, 5])\n\n cases = [[a, b], [a, c], [a, d]]\n assert dd.concat([a]) is a\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat2.for_case_in_cases__test_concat2.for_case_in_cases_.None_5.assert_set_result_dask_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat2.for_case_in_cases__test_concat2.for_case_in_cases_.None_5.assert_set_result_dask_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 1529, "end_line": 1561, "span_ids": ["test_concat2"], "tokens": 297}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_concat2():\n # ... other code\n for case in cases:\n pdcase = [_c.compute() for _c in case]\n\n with warnings.catch_warnings(record=True) as w:\n expected = pd.concat(pdcase, sort=False)\n\n ctx = FutureWarning if w else None\n\n with pytest.warns(ctx):\n result = dd.concat(case)\n\n assert result.npartitions == case[0].npartitions + case[1].npartitions\n assert result.divisions == (None,) * (result.npartitions + 1)\n assert_eq(expected, result)\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", FutureWarning)\n assert set(result.dask) == set(dd.concat(case).dask)\n\n with warnings.catch_warnings(record=True) as w:\n expected = pd.concat(pdcase, join=\"inner\", sort=False)\n\n ctx = FutureWarning if w else None\n\n with pytest.warns(ctx):\n result = dd.concat(case, join=\"inner\")\n assert result.npartitions == case[0].npartitions + case[1].npartitions\n assert result.divisions == (None,) * (result.npartitions + 1)\n assert_eq(result, result)\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", FutureWarning)\n assert set(result.dask) == set(dd.concat(case, join=\"inner\").dask)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat3_test_concat3.None_5.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat3_test_concat3.None_5.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 1564, "end_line": 1616, "span_ids": ["test_concat3"], "tokens": 485}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_concat3():\n pdf1 = pd.DataFrame(\n np.random.randn(6, 5), columns=list(\"ABCDE\"), index=list(\"abcdef\")\n )\n pdf2 = pd.DataFrame(\n np.random.randn(6, 5), columns=list(\"ABCFG\"), index=list(\"ghijkl\")\n )\n pdf3 = pd.DataFrame(\n np.random.randn(6, 5), columns=list(\"ABCHI\"), index=list(\"mnopqr\")\n )\n ddf1 = dd.from_pandas(pdf1, 2)\n ddf2 = dd.from_pandas(pdf2, 3)\n ddf3 = dd.from_pandas(pdf3, 2)\n\n with warnings.catch_warnings(record=True) as w:\n expected = pd.concat([pdf1, pdf2], sort=False)\n\n ctx = FutureWarning if w else None\n\n with pytest.warns(ctx):\n result = dd.concat([ddf1, ddf2])\n\n assert result.divisions == ddf1.divisions[:-1] + ddf2.divisions\n assert result.npartitions == ddf1.npartitions + ddf2.npartitions\n assert_eq(result, expected)\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", FutureWarning)\n assert_eq(\n dd.concat([ddf1, ddf2], interleave_partitions=True), pd.concat([pdf1, pdf2])\n )\n\n with warnings.catch_warnings(record=True) as w:\n expected = pd.concat([pdf1, pdf2, pdf3], sort=False)\n\n ctx = FutureWarning if w else None\n\n with pytest.warns(ctx):\n result = dd.concat([ddf1, ddf2, ddf3])\n assert result.divisions == (\n ddf1.divisions[:-1] + ddf2.divisions[:-1] + ddf3.divisions\n )\n assert result.npartitions == (\n ddf1.npartitions + ddf2.npartitions + ddf3.npartitions\n )\n assert_eq(result, expected)\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", FutureWarning)\n assert_eq(\n dd.concat([ddf1, ddf2, ddf3], interleave_partitions=True),\n pd.concat([pdf1, pdf2, pdf3]),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat4_interleave_partitions_test_concat4_interleave_partitions.assert_msg_in_str_err_val": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat4_interleave_partitions_test_concat4_interleave_partitions.assert_msg_in_str_err_val", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 1619, "end_line": 1664, "span_ids": ["test_concat4_interleave_partitions"], "tokens": 392}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.filterwarnings(\"ignore\")\ndef test_concat4_interleave_partitions():\n pdf1 = pd.DataFrame(\n np.random.randn(10, 5), columns=list(\"ABCDE\"), index=list(\"abcdefghij\")\n )\n pdf2 = pd.DataFrame(\n np.random.randn(13, 5), columns=list(\"ABCDE\"), index=list(\"fghijklmnopqr\")\n )\n pdf3 = pd.DataFrame(\n np.random.randn(13, 6), columns=list(\"CDEXYZ\"), index=list(\"fghijklmnopqr\")\n )\n\n ddf1 = dd.from_pandas(pdf1, 2)\n ddf2 = dd.from_pandas(pdf2, 3)\n ddf3 = dd.from_pandas(pdf3, 2)\n\n msg = (\n \"All inputs have known divisions which cannot be \"\n \"concatenated in order. Specify \"\n \"interleave_partitions=True to ignore order\"\n )\n\n cases = [\n [ddf1, ddf1],\n [ddf1, ddf2],\n [ddf1, ddf3],\n [ddf2, ddf1],\n [ddf2, ddf3],\n [ddf3, ddf1],\n [ddf3, ddf2],\n ]\n for case in cases:\n pdcase = [c.compute() for c in case]\n\n assert_eq(\n dd.concat(case, interleave_partitions=True), pd.concat(pdcase, sort=False)\n )\n assert_eq(\n dd.concat(case, join=\"inner\", interleave_partitions=True),\n pd.concat(pdcase, join=\"inner\", sort=False),\n )\n\n msg = \"'join' must be 'inner' or 'outer'\"\n with pytest.raises(ValueError) as err:\n dd.concat([ddf1, ddf1], join=\"invalid\", interleave_partitions=True)\n assert msg in str(err.value)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat5_test_concat5.cases_11._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat5_test_concat5.cases_11._", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 1667, "end_line": 1743, "span_ids": ["test_concat5"], "tokens": 670}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.filterwarnings(\"ignore\")\ndef test_concat5():\n pdf1 = pd.DataFrame(\n np.random.randn(7, 5), columns=list(\"ABCDE\"), index=list(\"abcdefg\")\n )\n pdf2 = pd.DataFrame(\n np.random.randn(7, 6), columns=list(\"FGHIJK\"), index=list(\"abcdefg\")\n )\n pdf3 = pd.DataFrame(\n np.random.randn(7, 6), columns=list(\"FGHIJK\"), index=list(\"cdefghi\")\n )\n pdf4 = pd.DataFrame(\n np.random.randn(7, 5), columns=list(\"FGHAB\"), index=list(\"cdefghi\")\n )\n pdf5 = pd.DataFrame(\n np.random.randn(7, 5), columns=list(\"FGHAB\"), index=list(\"fklmnop\")\n )\n\n ddf1 = dd.from_pandas(pdf1, 2)\n ddf2 = dd.from_pandas(pdf2, 3)\n ddf3 = dd.from_pandas(pdf3, 2)\n ddf4 = dd.from_pandas(pdf4, 2)\n ddf5 = dd.from_pandas(pdf5, 3)\n\n cases = [\n [ddf1, ddf2],\n [ddf1, ddf3],\n [ddf1, ddf4],\n [ddf1, ddf5],\n [ddf3, ddf4],\n [ddf3, ddf5],\n [ddf5, ddf1, ddf4],\n [ddf5, ddf3],\n [ddf1.A, ddf4.A],\n [ddf2.F, ddf3.F],\n [ddf4.A, ddf5.A],\n [ddf1.A, ddf4.F],\n [ddf2.F, ddf3.H],\n [ddf4.A, ddf5.B],\n [ddf1, ddf4.A],\n [ddf3.F, ddf2],\n [ddf5, ddf1.A, ddf2],\n ]\n\n for case in cases:\n pdcase = [c.compute() for c in case]\n\n with pytest.warns(None):\n # some cases will raise warning directly from pandas\n assert_eq(\n dd.concat(case, interleave_partitions=True),\n pd.concat(pdcase, sort=False),\n )\n\n assert_eq(\n dd.concat(case, join=\"inner\", interleave_partitions=True),\n pd.concat(pdcase, join=\"inner\"),\n )\n\n assert_eq(dd.concat(case, axis=1), pd.concat(pdcase, axis=1))\n\n assert_eq(\n dd.concat(case, axis=1, join=\"inner\"),\n pd.concat(pdcase, axis=1, join=\"inner\"),\n )\n\n # Dask + pandas\n cases = [\n [ddf1, pdf2],\n [ddf1, pdf3],\n [pdf1, ddf4],\n [pdf1.A, ddf4.A],\n [ddf2.F, pdf3.F],\n [ddf1, pdf4.A],\n [ddf3.F, pdf2],\n [ddf2, pdf1, ddf3.F],\n ]\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat5.None_1_test_concat5.None_1.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat5.None_1_test_concat5.None_1.None_3", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 1745, "end_line": 1760, "span_ids": ["test_concat5"], "tokens": 146}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.filterwarnings(\"ignore\")\ndef test_concat5():\n # ... other code\n\n for case in cases:\n pdcase = [c.compute() if isinstance(c, _Frame) else c for c in case]\n\n assert_eq(dd.concat(case, interleave_partitions=True), pd.concat(pdcase))\n\n assert_eq(\n dd.concat(case, join=\"inner\", interleave_partitions=True),\n pd.concat(pdcase, join=\"inner\"),\n )\n\n assert_eq(dd.concat(case, axis=1), pd.concat(pdcase, axis=1))\n\n assert_eq(\n dd.concat(case, axis=1, join=\"inner\"),\n pd.concat(pdcase, axis=1, join=\"inner\"),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat_categorical_test_concat_categorical.if_not_known_.dframes[0]._meta.clear_known_categories_df": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat_categorical_test_concat_categorical.if_not_known_.dframes[0]._meta.clear_known_categories_df", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 1763, "end_line": 1812, "span_ids": ["test_concat_categorical"], "tokens": 365}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"known, cat_index, divisions\",\n [\n (True, True, False),\n (True, False, True),\n (True, False, False),\n (False, True, False),\n (False, False, True),\n (False, False, False),\n ],\n)\n@pytest.mark.filterwarnings(\"ignore\")\ndef test_concat_categorical(known, cat_index, divisions):\n frames = [\n pd.DataFrame(\n {\n \"w\": list(\"xxxxx\"),\n \"x\": np.arange(5),\n \"y\": list(\"abcbc\"),\n \"z\": np.arange(5, dtype=\"f8\"),\n }\n ),\n pd.DataFrame(\n {\n \"w\": list(\"yyyyy\"),\n \"x\": np.arange(5, 10),\n \"y\": list(\"abbba\"),\n \"z\": np.arange(5, 10, dtype=\"f8\"),\n }\n ),\n pd.DataFrame(\n {\n \"w\": list(\"zzzzz\"),\n \"x\": np.arange(10, 15),\n \"y\": list(\"bcbcc\"),\n \"z\": np.arange(10, 15, dtype=\"f8\"),\n }\n ),\n ]\n for df in frames:\n df.w = df.w.astype(\"category\")\n df.y = df.y.astype(\"category\")\n\n if cat_index:\n frames = [df.set_index(df.y) for df in frames]\n\n dframes = [dd.from_pandas(p, npartitions=2, sort=divisions) for p in frames]\n\n if not known:\n dframes[0]._meta = clear_known_categories(dframes[0]._meta, [\"y\"], index=True)\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat_categorical.check_and_return_test_concat_categorical.check_and_return.return.res": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat_categorical.check_and_return_test_concat_categorical.check_and_return.return.res", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 1814, "end_line": 1825, "span_ids": ["test_concat_categorical"], "tokens": 223}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"known, cat_index, divisions\",\n [\n (True, True, False),\n (True, False, True),\n (True, False, False),\n (False, True, False),\n (False, False, True),\n (False, False, False),\n ],\n)\n@pytest.mark.filterwarnings(\"ignore\")\ndef test_concat_categorical(known, cat_index, divisions):\n # ... other code\n\n def check_and_return(ddfs, dfs, join):\n sol = concat(dfs, join=join)\n res = dd.concat(ddfs, join=join, interleave_partitions=divisions)\n assert_eq(res, sol)\n if known:\n parts = compute_as_if_collection(\n dd.DataFrame, res.dask, res.__dask_keys__()\n )\n for p in [i.iloc[:0] for i in parts]:\n res._meta == p # will error if schemas don't align\n assert not cat_index or has_known_categories(res.index) == known\n return res\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat_categorical.for_join_in_inner_ou_test_concat_categorical.for_join_in_inner_ou.None_4": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat_categorical.for_join_in_inner_ou_test_concat_categorical.for_join_in_inner_ou.None_4", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 1827, "end_line": 1848, "span_ids": ["test_concat_categorical"], "tokens": 299}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"known, cat_index, divisions\",\n [\n (True, True, False),\n (True, False, True),\n (True, False, False),\n (False, True, False),\n (False, False, True),\n (False, False, False),\n ],\n)\n@pytest.mark.filterwarnings(\"ignore\")\ndef test_concat_categorical(known, cat_index, divisions):\n # ... other code\n\n for join in [\"inner\", \"outer\"]:\n # Frame\n res = check_and_return(dframes, frames, join)\n assert has_known_categories(res.w)\n assert has_known_categories(res.y) == known\n\n # Series\n res = check_and_return([i.y for i in dframes], [i.y for i in frames], join)\n assert has_known_categories(res) == known\n\n # Non-cat series with cat index\n if cat_index:\n res = check_and_return([i.x for i in dframes], [i.x for i in frames], join)\n\n # Partition missing columns\n res = check_and_return(\n [dframes[0][[\"x\", \"y\"]]] + dframes[1:],\n [frames[0][[\"x\", \"y\"]]] + frames[1:],\n join,\n )\n assert not hasattr(res, \"w\") or has_known_categories(res.w)\n assert has_known_categories(res.y) == known", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat_categorical_mixed_simple_test_concat_categorical_mixed_simple.assert_eq_result_expecte": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat_categorical_mixed_simple_test_concat_categorical_mixed_simple.assert_eq_result_expecte", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 1851, "end_line": 1859, "span_ids": ["test_concat_categorical_mixed_simple"], "tokens": 114}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_concat_categorical_mixed_simple():\n a = pd.Series([\"a\", \"b\", \"c\"], dtype=\"category\")\n b = pd.Series([\"a\", \"b\"], dtype=\"category\")\n da = dd.from_pandas(a, 2).cat.as_unknown().to_frame(\"A\")\n db = dd.from_pandas(b, 2).to_frame(\"A\")\n\n expected = concat([a.to_frame(\"A\"), b.to_frame(\"A\")])\n result = dd.concat([da, db])\n assert_eq(result, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat_datetimeindex_test_concat_datetimeindex.assert_eq_result_expecte": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat_datetimeindex_test_concat_datetimeindex.assert_eq_result_expecte", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 1862, "end_line": 1884, "span_ids": ["test_concat_datetimeindex"], "tokens": 246}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_concat_datetimeindex():\n # https://github.com/dask/dask/issues/2932\n b2 = pd.DataFrame(\n {\"x\": [\"a\"]},\n index=pd.DatetimeIndex([\"2015-03-24 00:00:16\"], dtype=\"datetime64[ns]\"),\n )\n b3 = pd.DataFrame(\n {\"x\": [\"c\"]},\n index=pd.DatetimeIndex([\"2015-03-29 00:00:44\"], dtype=\"datetime64[ns]\"),\n )\n\n b2[\"x\"] = b2.x.astype(\"category\").cat.set_categories([\"a\", \"c\"])\n b3[\"x\"] = b3.x.astype(\"category\").cat.set_categories([\"a\", \"c\"])\n\n db2 = dd.from_pandas(b2, 1)\n db3 = dd.from_pandas(b3, 1)\n\n result = concat([b2.iloc[:0], b3.iloc[:0]])\n assert result.index.dtype == \"M8[ns]\"\n\n result = dd.concat([db2, db3])\n expected = pd.concat([b2, b3])\n assert_eq(result, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_append_test_append.None_8": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_append_test_append.None_8", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 1887, "end_line": 1924, "span_ids": ["test_append"], "tokens": 486}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_append():\n df = pd.DataFrame({\"a\": [1, 2, 3, 4, 5, 6], \"b\": [1, 2, 3, 4, 5, 6]})\n df2 = pd.DataFrame(\n {\"a\": [1, 2, 3, 4, 5, 6], \"b\": [1, 2, 3, 4, 5, 6]}, index=[6, 7, 8, 9, 10, 11]\n )\n df3 = pd.DataFrame(\n {\"b\": [1, 2, 3, 4, 5, 6], \"c\": [1, 2, 3, 4, 5, 6]}, index=[6, 7, 8, 9, 10, 11]\n )\n\n ddf = dd.from_pandas(df, 2)\n ddf2 = dd.from_pandas(df2, 2)\n ddf3 = dd.from_pandas(df3, 2)\n\n s = pd.Series([7, 8], name=6, index=[\"a\", \"b\"])\n\n def check_with_warning(dask_obj, dask_append, pandas_obj, pandas_append):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", FutureWarning)\n expected = pandas_obj.append(pandas_append)\n\n result = dask_obj.append(dask_append)\n\n assert_eq(result, expected)\n\n check_with_warning(ddf, s, df, s)\n check_with_warning(ddf, ddf2, df, df2)\n check_with_warning(ddf.a, ddf2.a, df.a, df2.a)\n\n # different columns\n check_with_warning(ddf, ddf3, df, df3)\n check_with_warning(ddf.a, ddf3.b, df.a, df3.b)\n\n # dask + pandas\n check_with_warning(ddf, df2, df, df2)\n check_with_warning(ddf.a, df2.a, df.a, df2.a)\n\n check_with_warning(ddf, df3, df, df3)\n check_with_warning(ddf.a, df3.b, df.a, df3.b)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_append2_test_append2.None_7": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_append2_test_append2.None_7", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 1927, "end_line": 1972, "span_ids": ["test_append2"], "tokens": 640}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.filterwarnings(\"ignore\")\ndef test_append2():\n dsk = {\n (\"x\", 0): pd.DataFrame({\"a\": [1, 2, 3], \"b\": [4, 5, 6]}),\n (\"x\", 1): pd.DataFrame({\"a\": [4, 5, 6], \"b\": [3, 2, 1]}),\n (\"x\", 2): pd.DataFrame({\"a\": [7, 8, 9], \"b\": [0, 0, 0]}),\n }\n meta = make_meta({\"a\": \"i8\", \"b\": \"i8\"})\n ddf1 = dd.DataFrame(dsk, \"x\", meta, [None, None])\n\n dsk = {\n (\"y\", 0): pd.DataFrame({\"a\": [10, 20, 30], \"b\": [40, 50, 60]}),\n (\"y\", 1): pd.DataFrame({\"a\": [40, 50, 60], \"b\": [30, 20, 10]}),\n (\"y\", 2): pd.DataFrame({\"a\": [70, 80, 90], \"b\": [0, 0, 0]}),\n }\n ddf2 = dd.DataFrame(dsk, \"y\", meta, [None, None])\n\n dsk = {\n (\"y\", 0): pd.DataFrame({\"b\": [10, 20, 30], \"c\": [40, 50, 60]}),\n (\"y\", 1): pd.DataFrame({\"b\": [40, 50, 60], \"c\": [30, 20, 10]}),\n }\n meta = make_meta({\"b\": \"i8\", \"c\": \"i8\"})\n ddf3 = dd.DataFrame(dsk, \"y\", meta, [None, None])\n\n assert_eq(ddf1.append(ddf2), ddf1.compute().append(ddf2.compute(), sort=False))\n assert_eq(ddf2.append(ddf1), ddf2.compute().append(ddf1.compute(), sort=False))\n\n # different columns\n assert_eq(ddf1.append(ddf3), ddf1.compute().append(ddf3.compute(), sort=False))\n assert_eq(ddf3.append(ddf1), ddf3.compute().append(ddf1.compute(), sort=False))\n\n # Dask + pandas\n assert_eq(\n ddf1.append(ddf2.compute()), ddf1.compute().append(ddf2.compute(), sort=False)\n )\n assert_eq(\n ddf2.append(ddf1.compute()), ddf2.compute().append(ddf1.compute(), sort=False)\n )\n\n # different columns\n assert_eq(\n ddf1.append(ddf3.compute()), ddf1.compute().append(ddf3.compute(), sort=False)\n )\n assert_eq(\n ddf3.append(ddf1.compute()), ddf3.compute().append(ddf1.compute(), sort=False)\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_append_categorical_test_append_categorical.for_known_in_True_False.None_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_append_categorical_test_append_categorical.for_known_in_True_False.None_5", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 1975, "end_line": 2022, "span_ids": ["test_append_categorical"], "tokens": 385}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_append_categorical():\n frames = [\n pd.DataFrame(\n {\n \"x\": np.arange(5, 10),\n \"y\": list(\"abbba\"),\n \"z\": np.arange(5, 10, dtype=\"f8\"),\n }\n ),\n pd.DataFrame(\n {\n \"x\": np.arange(10, 15),\n \"y\": list(\"bcbcc\"),\n \"z\": np.arange(10, 15, dtype=\"f8\"),\n }\n ),\n ]\n frames2 = []\n for df in frames:\n df.y = df.y.astype(\"category\")\n df2 = df.copy()\n df2.y = df2.y.cat.set_categories(list(\"abc\"))\n df.index = df.y\n frames2.append(df2.set_index(df2.y))\n\n df1, df2 = frames2\n\n for known in [True, False]:\n dframes = [dd.from_pandas(p, npartitions=2, sort=False) for p in frames]\n if not known:\n dframes[0]._meta = clear_known_categories(\n dframes[0]._meta, [\"y\"], index=True\n )\n ddf1, ddf2 = dframes\n\n res = ddf1.append(ddf2)\n assert_eq(res, df1.append(df2))\n assert has_known_categories(res.index) == known\n assert has_known_categories(res.y) == known\n\n res = ddf1.y.append(ddf2.y)\n assert_eq(res, df1.y.append(df2.y))\n assert has_known_categories(res.index) == known\n assert has_known_categories(res) == known\n\n res = ddf1.index.append(ddf2.index)\n assert_eq(res, df1.index.append(df2.index))\n assert has_known_categories(res) == known", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_append_lose_divisions_test_repartition_repeated_divisions.assert_eq_ddf2_df_set_in": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_append_lose_divisions_test_repartition_repeated_divisions.assert_eq_ddf2_df_set_in", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 2025, "end_line": 2049, "span_ids": ["test_repartition_repeated_divisions", "test_singleton_divisions", "test_append_lose_divisions"], "tokens": 262}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_append_lose_divisions():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4]}, index=[1, 2, 3, 4])\n ddf = dd.from_pandas(df, npartitions=2)\n\n ddf2 = ddf.append(ddf)\n df2 = df.append(df)\n assert_eq(ddf2, df2)\n\n\ndef test_singleton_divisions():\n df = pd.DataFrame({\"x\": [1, 1, 1]}, index=[1, 2, 3])\n ddf = dd.from_pandas(df, npartitions=2)\n ddf2 = ddf.set_index(\"x\")\n\n joined = ddf2.join(ddf2, rsuffix=\"r\")\n assert joined.divisions == (1, 1)\n joined.compute()\n\n\ndef test_repartition_repeated_divisions():\n df = pd.DataFrame({\"x\": [0, 0, 0, 0]})\n ddf = dd.from_pandas(df, npartitions=2).set_index(\"x\")\n\n ddf2 = ddf.repartition(divisions=(0, 0), force=True)\n assert_eq(ddf2, df.set_index(\"x\"))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_multi_duplicate_divisions_test_multi_duplicate_divisions.assert_eq_r1_r2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_multi_duplicate_divisions_test_multi_duplicate_divisions.assert_eq_r1_r2_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 2052, "end_line": 2067, "span_ids": ["test_multi_duplicate_divisions"], "tokens": 188}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_multi_duplicate_divisions():\n df1 = pd.DataFrame({\"x\": [0, 0, 0, 0]})\n df2 = pd.DataFrame({\"x\": [0]})\n\n ddf1 = dd.from_pandas(df1, npartitions=2).set_index(\"x\")\n ddf2 = dd.from_pandas(df2, npartitions=1).set_index(\"x\")\n assert ddf1.npartitions == 2\n assert len(ddf1) == len(df1)\n\n r1 = ddf1.merge(ddf2, how=\"left\", left_index=True, right_index=True)\n\n sf1 = df1.set_index(\"x\")\n sf2 = df2.set_index(\"x\")\n r2 = sf1.merge(sf2, how=\"left\", left_index=True, right_index=True)\n\n assert_eq(r1, r2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_outer_empty_test_merge_outer_empty.for_x_in_range_0_k_clust.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_outer_empty_test_merge_outer_empty.for_x_in_range_0_k_clust.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 2070, "end_line": 2084, "span_ids": ["test_merge_outer_empty"], "tokens": 158}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_merge_outer_empty():\n # Issue #5470 bug reproducer\n k_clusters = 3\n df = pd.DataFrame(\n {\"user\": [\"A\", \"B\", \"C\", \"D\", \"E\", \"F\"], \"cluster\": [1, 1, 2, 2, 3, 3]}\n )\n df = dd.from_pandas(df, npartitions=10)\n empty_df = dd.from_pandas(pd.DataFrame(), npartitions=10)\n\n for x in range(0, k_clusters + 1):\n assert_eq(\n dd.merge(empty_df, df[df.cluster == x], how=\"outer\").compute(),\n df[df.cluster == x].compute(),\n check_index=False,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_dtype_equality_warning_test_dtype_equality_warning.assert_len_r_0": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_dtype_equality_warning_test_dtype_equality_warning.assert_len_r_0", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 2087, "end_line": 2095, "span_ids": ["test_dtype_equality_warning"], "tokens": 109}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_dtype_equality_warning():\n # https://github.com/dask/dask/issues/5437\n df1 = pd.DataFrame({\"a\": np.array([1, 2], dtype=np.dtype(np.int64))})\n df2 = pd.DataFrame({\"a\": np.array([1, 2], dtype=np.dtype(np.longlong))})\n\n with pytest.warns(None) as r:\n dd.multi.warn_dtype_mismatch(df1, df2, \"a\", \"a\")\n\n assert len(r) == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_numeric.py_pytest_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_numeric.py_pytest_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_numeric.py", "file_name": "test_numeric.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 51, "span_ids": ["test_to_numeric_on_dask_dataframe_series", "test_to_numeric_on_scalars", "test_to_numeric_on_dask_array", "test_to_numeric_on_dask_dataframe_series_with_meta", "imports", "test_to_numeric_on_dask_dataframe_dataframe_raises_error"], "tokens": 445}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\nimport numpy as np\nimport pandas as pd\n\nfrom dask.array import from_array, Array\nfrom dask.delayed import Delayed\nfrom dask.dataframe import from_pandas, Series, to_numeric\n\n\n@pytest.mark.parametrize(\"arg\", [\"5\", 5, \"5 \"])\ndef test_to_numeric_on_scalars(arg):\n output = to_numeric(arg)\n assert isinstance(output, Delayed)\n assert output.compute() == 5\n\n\ndef test_to_numeric_on_dask_array():\n arg = from_array([\"1.0\", \"2\", -3, 5.1])\n expected = np.array([1.0, 2.0, -3.0, 5.1])\n output = to_numeric(arg)\n assert isinstance(output, Array)\n assert list(output.compute()) == list(expected)\n\n\ndef test_to_numeric_on_dask_dataframe_series():\n s = pd.Series([\"1.0\", \"2\", -3, -5.1])\n arg = from_pandas(s, npartitions=2)\n expected = pd.to_numeric(s)\n output = to_numeric(arg)\n assert output.dtype == \"int64\"\n assert isinstance(output, Series)\n assert list(output.compute()) == list(expected)\n\n\ndef test_to_numeric_on_dask_dataframe_series_with_meta():\n s = pd.Series([\"1.0\", \"2\", -3, -5.1])\n arg = from_pandas(s, npartitions=2)\n expected = pd.to_numeric(s)\n output = to_numeric(arg, meta=pd.Series([], dtype=\"float64\"))\n assert output.dtype == \"float64\"\n assert isinstance(output, Series)\n assert list(output.compute()) == list(expected)\n\n\ndef test_to_numeric_on_dask_dataframe_dataframe_raises_error():\n s = pd.Series([\"1.0\", \"2\", -3, -5.1])\n df = pd.DataFrame({\"a\": s, \"b\": s})\n arg = from_pandas(df, npartitions=2)\n with pytest.raises(TypeError, match=\"arg must be a list, tuple, dask.\"):\n to_numeric(arg)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_optimize_dataframe.py_dask_dfs.list_dsk_values_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_optimize_dataframe.py_dask_dfs.list_dsk_values_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_optimize_dataframe.py", "file_name": "test_optimize_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 10, "span_ids": ["imports"], "tokens": 158}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import dask\nimport dask.dataframe as dd\nimport pandas as pd\n\ndsk = {\n (\"x\", 0): pd.DataFrame({\"a\": [1, 2, 3], \"b\": [4, 5, 6]}, index=[0, 1, 3]),\n (\"x\", 1): pd.DataFrame({\"a\": [4, 5, 6], \"b\": [3, 2, 1]}, index=[5, 6, 8]),\n (\"x\", 2): pd.DataFrame({\"a\": [7, 8, 9], \"b\": [0, 0, 0]}, index=[9, 9, 9]),\n}\ndfs = list(dsk.values())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_optimize_dataframe.py_test_fuse_ave_width_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_optimize_dataframe.py_test_fuse_ave_width_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_optimize_dataframe.py", "file_name": "test_optimize_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 13, "end_line": 40, "span_ids": ["test_optimize_blockwise", "test_fuse_ave_width"], "tokens": 231}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fuse_ave_width():\n df = pd.DataFrame({\"x\": range(10)})\n df = dd.from_pandas(df, npartitions=5)\n\n s = (df.x + 1) + (df.x + 2)\n\n with dask.config.set({\"optimization.fuse.ave-width\": 4}):\n a = s.__dask_optimize__(s.dask, s.__dask_keys__())\n\n b = s.__dask_optimize__(s.dask, s.__dask_keys__())\n\n assert len(a) <= 15\n assert len(b) <= 15\n\n\ndef test_optimize_blockwise():\n from dask.array.optimization import optimize_blockwise\n\n df = pd.DataFrame({\"x\": range(10), \"y\": range(10)})\n ddf = dd.from_pandas(df, npartitions=2)\n\n for i in range(10):\n ddf[\"x\"] = ddf.x + 1 + ddf.y\n\n graph = optimize_blockwise(ddf.dask)\n\n assert len(graph) <= 4", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_np_test_get_dummies.tm_assert_index_equal_res": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_np_test_get_dummies.tm_assert_index_equal_res", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_reshape.py", "file_name": "test_reshape.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 27, "span_ids": ["imports", "test_get_dummies"], "tokens": 242}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import numpy as np\nimport pandas as pd\nimport pytest\n\nimport dask.dataframe as dd\n\nfrom dask.dataframe._compat import tm\nfrom dask.dataframe.utils import assert_eq, make_meta, PANDAS_GT_0240\n\n\n@pytest.mark.parametrize(\n \"data\",\n [\n pd.Series([1, 1, 1, 2, 2, 1, 3, 4], dtype=\"category\"),\n pd.Series(pd.Categorical([1, 1, 1, 2, 2, 1, 3, 4], categories=[4, 3, 2, 1])),\n pd.DataFrame(\n {\"a\": [1, 2, 3, 4, 4, 3, 2, 1], \"b\": pd.Categorical(list(\"abcdabcd\"))}\n ),\n ],\n)\ndef test_get_dummies(data):\n exp = pd.get_dummies(data)\n\n ddata = dd.from_pandas(data, 2)\n res = dd.get_dummies(ddata)\n assert_eq(res, exp)\n tm.assert_index_equal(res.columns, exp.columns)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_test_get_dummies_object_test_get_dummies_object.None_2.dd_get_dummies_ddf_colum": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_test_get_dummies_object_test_get_dummies_object.None_2.dd_get_dummies_ddf_colum", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_reshape.py", "file_name": "test_reshape.py", "file_type": "text/x-python", "category": "test", "start_line": 30, "end_line": 53, "span_ids": ["test_get_dummies_object"], "tokens": 199}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_get_dummies_object():\n df = pd.DataFrame(\n {\n \"a\": pd.Categorical([1, 2, 3, 4, 4, 3, 2, 1]),\n \"b\": list(\"abcdabcd\"),\n \"c\": pd.Categorical(list(\"abcdabcd\")),\n }\n )\n ddf = dd.from_pandas(df, 2)\n\n # Explicitly exclude object columns\n exp = pd.get_dummies(df, columns=[\"a\", \"c\"])\n res = dd.get_dummies(ddf, columns=[\"a\", \"c\"])\n assert_eq(res, exp)\n tm.assert_index_equal(res.columns, exp.columns)\n\n with pytest.raises(NotImplementedError):\n dd.get_dummies(ddf)\n\n with pytest.raises(NotImplementedError):\n dd.get_dummies(ddf.b)\n\n with pytest.raises(NotImplementedError):\n dd.get_dummies(ddf, columns=[\"b\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_test_get_dummies_kwargs_test_get_dummies_kwargs.None_7": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_test_get_dummies_kwargs_test_get_dummies_kwargs.None_7", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_reshape.py", "file_name": "test_reshape.py", "file_type": "text/x-python", "category": "test", "start_line": 56, "end_line": 87, "span_ids": ["test_get_dummies_kwargs"], "tokens": 340}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_get_dummies_kwargs():\n s = pd.Series([1, 1, 1, 2, 2, 1, 3, 4], dtype=\"category\")\n exp = pd.get_dummies(s, prefix=\"X\", prefix_sep=\"-\")\n\n ds = dd.from_pandas(s, 2)\n res = dd.get_dummies(ds, prefix=\"X\", prefix_sep=\"-\")\n assert_eq(res, exp)\n tm.assert_index_equal(res.columns, pd.Index([\"X-1\", \"X-2\", \"X-3\", \"X-4\"]))\n\n exp = pd.get_dummies(s, drop_first=True)\n\n ds = dd.from_pandas(s, 2)\n res = dd.get_dummies(ds, drop_first=True)\n assert_eq(res, exp)\n tm.assert_index_equal(res.columns, exp.columns)\n\n # nan\n s = pd.Series([1, 1, 1, 2, np.nan, 3, np.nan, 5], dtype=\"category\")\n exp = pd.get_dummies(s)\n\n ds = dd.from_pandas(s, 2)\n res = dd.get_dummies(ds)\n assert_eq(res, exp)\n tm.assert_index_equal(res.columns, exp.columns)\n\n # dummy_na\n exp = pd.get_dummies(s, dummy_na=True)\n\n ds = dd.from_pandas(s, 2)\n res = dd.get_dummies(ds, dummy_na=True)\n assert_eq(res, exp)\n tm.assert_index_equal(res.columns, pd.Index([1, 2, 3, 5, np.nan]))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_test_get_dummies_sparse_test_get_dummies_sparse.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_test_get_dummies_sparse_test_get_dummies_sparse.None_2", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_reshape.py", "file_name": "test_reshape.py", "file_type": "text/x-python", "category": "test", "start_line": 90, "end_line": 108, "span_ids": ["test_get_dummies_sparse"], "tokens": 186}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_get_dummies_sparse():\n s = pd.Series(pd.Categorical([\"a\", \"b\", \"a\"], categories=[\"a\", \"b\", \"c\"]))\n ds = dd.from_pandas(s, 2)\n\n exp = pd.get_dummies(s, sparse=True)\n res = dd.get_dummies(ds, sparse=True)\n assert_eq(exp, res)\n\n if PANDAS_GT_0240:\n exp_dtype = \"Sparse[uint8, 0]\"\n else:\n exp_dtype = \"uint8\"\n assert res.compute().a.dtype == exp_dtype\n assert pd.api.types.is_sparse(res.a.compute())\n\n exp = pd.get_dummies(s.to_frame(name=\"a\"), sparse=True)\n res = dd.get_dummies(ds.to_frame(name=\"a\"), sparse=True)\n assert_eq(exp, res)\n assert pd.api.types.is_sparse(res.a_a.compute())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_test_get_dummies_sparse_mix_test_get_dummies_sparse_mix.assert_pd_api_types_is_sp": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_test_get_dummies_sparse_mix_test_get_dummies_sparse_mix.assert_pd_api_types_is_sp", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_reshape.py", "file_name": "test_reshape.py", "file_type": "text/x-python", "category": "test", "start_line": 111, "end_line": 129, "span_ids": ["test_get_dummies_sparse_mix"], "tokens": 162}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_get_dummies_sparse_mix():\n df = pd.DataFrame(\n {\n \"A\": pd.Categorical([\"a\", \"b\", \"a\"], categories=[\"a\", \"b\", \"c\"]),\n \"B\": [0, 0, 1],\n }\n )\n ddf = dd.from_pandas(df, 2)\n\n exp = pd.get_dummies(df, sparse=True)\n res = dd.get_dummies(ddf, sparse=True)\n assert_eq(exp, res)\n\n if PANDAS_GT_0240:\n exp_dtype = \"Sparse[uint8, 0]\"\n else:\n exp_dtype = \"uint8\"\n assert res.compute().A_a.dtype == exp_dtype\n assert pd.api.types.is_sparse(res.A_a.compute())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_test_get_dummies_dtype_test_get_dummies_dtype.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_test_get_dummies_dtype_test_get_dummies_dtype.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_reshape.py", "file_name": "test_reshape.py", "file_type": "text/x-python", "category": "test", "start_line": 132, "end_line": 148, "span_ids": ["test_get_dummies_dtype"], "tokens": 162}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_get_dummies_dtype():\n df = pd.DataFrame(\n {\n \"A\": pd.Categorical([\"a\", \"b\", \"a\"], categories=[\"a\", \"b\", \"c\"]),\n \"B\": [0, 0, 1],\n }\n )\n ddf = dd.from_pandas(df, 2)\n\n exp = pd.get_dummies(df, dtype=\"float64\")\n res = dd.get_dummies(ddf, dtype=\"float64\")\n assert_eq(exp, res)\n assert res.compute().A_a.dtype == \"float64\"\n\n # dask's get_dummies on a pandas dataframe.\n assert_eq(dd.get_dummies(df, dtype=\"float64\"), exp)\n assert res.compute().A_a.dtype == \"float64\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_test_get_dummies_errors_test_get_dummies_errors.None_3.dd_get_dummies_ddf_x_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_test_get_dummies_errors_test_get_dummies_errors.None_3.dd_get_dummies_ddf_x_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_reshape.py", "file_name": "test_reshape.py", "file_type": "text/x-python", "category": "test", "start_line": 151, "end_line": 170, "span_ids": ["test_get_dummies_errors"], "tokens": 194}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_get_dummies_errors():\n with pytest.raises(NotImplementedError):\n # not Categorical\n s = pd.Series([1, 1, 1, 2, 2, 1, 3, 4])\n ds = dd.from_pandas(s, 2)\n dd.get_dummies(ds)\n\n # unknown categories\n df = pd.DataFrame({\"x\": list(\"abcbc\"), \"y\": list(\"bcbcb\")})\n ddf = dd.from_pandas(df, npartitions=2)\n ddf._meta = make_meta({\"x\": \"category\", \"y\": \"category\"})\n\n with pytest.raises(NotImplementedError):\n dd.get_dummies(ddf)\n\n with pytest.raises(NotImplementedError):\n dd.get_dummies(ddf, columns=[\"x\", \"y\"])\n\n with pytest.raises(NotImplementedError):\n dd.get_dummies(ddf.x)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_test_pivot_table_test_pivot_table.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_test_pivot_table_test_pivot_table.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_reshape.py", "file_name": "test_reshape.py", "file_type": "text/x-python", "category": "test", "start_line": 173, "end_line": 200, "span_ids": ["test_pivot_table"], "tokens": 334}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"values\", [\"B\", [\"B\"], [\"B\", \"D\"]])\n@pytest.mark.parametrize(\"aggfunc\", [\"mean\", \"sum\", \"count\"])\ndef test_pivot_table(values, aggfunc):\n df = pd.DataFrame(\n {\n \"A\": np.random.choice(list(\"XYZ\"), size=100),\n \"B\": np.random.randn(100),\n \"C\": pd.Categorical(np.random.choice(list(\"abc\"), size=100)),\n \"D\": np.random.randn(100),\n }\n )\n ddf = dd.from_pandas(df, 5).repartition((0, 20, 40, 60, 80, 98, 99))\n\n res = dd.pivot_table(ddf, index=\"A\", columns=\"C\", values=values, aggfunc=aggfunc)\n exp = pd.pivot_table(df, index=\"A\", columns=\"C\", values=values, aggfunc=aggfunc)\n if aggfunc == \"count\":\n # dask result cannot be int64 dtype depending on divisions because of NaN\n exp = exp.astype(np.float64)\n\n assert_eq(res, exp)\n\n # method\n res = ddf.pivot_table(index=\"A\", columns=\"C\", values=values, aggfunc=aggfunc)\n exp = df.pivot_table(index=\"A\", columns=\"C\", values=values, aggfunc=aggfunc)\n if aggfunc == \"count\":\n # dask result cannot be int64 dtype depending on divisions because of NaN\n exp = exp.astype(np.float64)\n assert_eq(res, exp)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_test_pivot_table_dtype_test_pivot_table_dtype.assert_eq_res_exp_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_test_pivot_table_dtype_test_pivot_table_dtype.assert_eq_res_exp_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_reshape.py", "file_name": "test_reshape.py", "file_type": "text/x-python", "category": "test", "start_line": 203, "end_line": 219, "span_ids": ["test_pivot_table_dtype"], "tokens": 178}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_pivot_table_dtype():\n\n df = pd.DataFrame(\n {\"A\": list(\"AABB\"), \"B\": pd.Categorical(list(\"ABAB\")), \"C\": [1, 2, 3, 4]}\n )\n ddf = dd.from_pandas(df, 2)\n res = dd.pivot_table(ddf, index=\"A\", columns=\"B\", values=\"C\", aggfunc=\"count\")\n\n exp_index = pd.CategoricalIndex([\"A\", \"B\"], name=\"B\")\n exp = pd.Series([np.float64] * 2, index=exp_index)\n tm.assert_series_equal(res.dtypes, exp)\n\n exp = pd.pivot_table(\n df, index=\"A\", columns=\"B\", values=\"C\", aggfunc=\"count\"\n ).astype(np.float64)\n\n assert_eq(res, exp)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_test_pivot_table_index_dtype_test_pivot_table_index_dtype.assert_res_index_dtype_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_test_pivot_table_index_dtype_test_pivot_table_index_dtype.assert_res_index_dtype_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_reshape.py", "file_name": "test_reshape.py", "file_type": "text/x-python", "category": "test", "start_line": 222, "end_line": 233, "span_ids": ["test_pivot_table_index_dtype"], "tokens": 122}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_pivot_table_index_dtype():\n df = pd.DataFrame(\n {\n \"A\": pd.date_range(start=\"2019-08-01\", periods=3, freq=\"1D\"),\n \"B\": pd.Categorical(list(\"abc\")),\n \"C\": [1, 2, 3],\n }\n )\n ddf = dd.from_pandas(df, 2)\n res = dd.pivot_table(ddf, index=\"A\", columns=\"B\", values=\"C\", aggfunc=\"count\")\n\n assert res.index.dtype == np.dtype(\"datetime64[ns]\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_test_pivot_table_errors_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_test_pivot_table_errors_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_reshape.py", "file_name": "test_reshape.py", "file_type": "text/x-python", "category": "test", "start_line": 236, "end_line": 287, "span_ids": ["test_pivot_table_errors"], "tokens": 519}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_pivot_table_errors():\n df = pd.DataFrame(\n {\n \"A\": np.random.choice(list(\"abc\"), size=10),\n \"B\": np.random.randn(10),\n \"C\": pd.Categorical(np.random.choice(list(\"abc\"), size=10)),\n }\n )\n ddf = dd.from_pandas(df, 2)\n\n msg = \"'index' must be the name of an existing column\"\n with pytest.raises(ValueError) as err:\n dd.pivot_table(ddf, index=[\"A\"], columns=\"C\", values=\"B\")\n assert msg in str(err.value)\n msg = \"'columns' must be the name of an existing column\"\n with pytest.raises(ValueError) as err:\n dd.pivot_table(ddf, index=\"A\", columns=[\"C\"], values=\"B\")\n assert msg in str(err.value)\n msg = \"'values' must refer to an existing column or columns\"\n with pytest.raises(ValueError) as err:\n dd.pivot_table(ddf, index=\"A\", columns=\"C\", values=[[\"B\"]])\n assert msg in str(err.value)\n\n msg = \"aggfunc must be either 'mean', 'sum' or 'count'\"\n with pytest.raises(ValueError) as err:\n dd.pivot_table(ddf, index=\"A\", columns=\"C\", values=\"B\", aggfunc=[\"sum\"])\n assert msg in str(err.value)\n\n with pytest.raises(ValueError) as err:\n dd.pivot_table(ddf, index=\"A\", columns=\"C\", values=\"B\", aggfunc=\"xx\")\n assert msg in str(err.value)\n\n # unknown categories\n ddf._meta = make_meta({\"A\": object, \"B\": float, \"C\": \"category\"})\n msg = \"'columns' must have known categories\"\n with pytest.raises(ValueError) as err:\n dd.pivot_table(ddf, index=\"A\", columns=\"C\", values=[\"B\"])\n assert msg in str(err.value)\n\n df = pd.DataFrame(\n {\n \"A\": np.random.choice(list(\"abc\"), size=10),\n \"B\": np.random.randn(10),\n \"C\": np.random.choice(list(\"abc\"), size=10),\n }\n )\n ddf = dd.from_pandas(df, 2)\n msg = \"'columns' must be category dtype\"\n with pytest.raises(ValueError) as err:\n dd.pivot_table(ddf, index=\"A\", columns=\"C\", values=\"B\")\n assert msg in str(err.value)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_from_distutils_version_im_shifted_sum.return.df_a_b_c": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_from_distutils_version_im_shifted_sum.return.df_a_b_c", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_rolling.py", "file_name": "test_rolling.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 44, "span_ids": ["imports", "shifted_sum"], "tokens": 334}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from distutils.version import LooseVersion\n\nimport pandas as pd\nimport pytest\nimport numpy as np\n\nimport dask.array as da\nimport dask.dataframe as dd\nfrom dask.dataframe.utils import assert_eq, PANDAS_VERSION\n\nN = 40\ndf = pd.DataFrame(\n {\n \"a\": np.random.randn(N).cumsum(),\n \"b\": np.random.randint(100, size=(N,)),\n \"c\": np.random.randint(100, size=(N,)),\n \"d\": np.random.randint(100, size=(N,)),\n \"e\": np.random.randint(100, size=(N,)),\n }\n)\nddf = dd.from_pandas(df, 3)\n\nidx = (\n pd.date_range(\"2016-01-01\", freq=\"3s\", periods=100)\n | pd.date_range(\"2016-01-01\", freq=\"5s\", periods=100)\n)[:N]\n\nts = pd.DataFrame(\n {\n \"a\": np.random.randn(N).cumsum(),\n \"b\": np.random.randint(100, size=(N,)),\n \"c\": np.random.randint(100, size=(N,)),\n \"d\": np.random.randint(100, size=(N,)),\n \"e\": np.random.randint(100, size=(N,)),\n },\n index=idx,\n)\ndts = dd.from_pandas(ts, 3)\n\n\ndef shifted_sum(df, before, after, c=0):\n a = df.shift(before)\n b = df.shift(-after)\n return df + a + b + c", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_map_overlap_test_map_overlap.for_before_after_in_0_.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_map_overlap_test_map_overlap.for_before_after_in_0_.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_rolling.py", "file_name": "test_rolling.py", "file_type": "text/x-python", "category": "test", "start_line": 47, "end_line": 59, "span_ids": ["test_map_overlap"], "tokens": 167}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"npartitions\", [1, 4])\ndef test_map_overlap(npartitions):\n ddf = dd.from_pandas(df, npartitions)\n for before, after in [(0, 3), (3, 0), (3, 3), (0, 0)]:\n # DataFrame\n res = ddf.map_overlap(shifted_sum, before, after, before, after, c=2)\n sol = shifted_sum(df, before, after, c=2)\n assert_eq(res, sol)\n\n # Series\n res = ddf.b.map_overlap(shifted_sum, before, after, before, after, c=2)\n sol = shifted_sum(df.b, before, after, c=2)\n assert_eq(res, sol)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_map_overlap_names_test_map_overlap_names.assert_res4__name_res_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_map_overlap_names_test_map_overlap_names.assert_res4__name_res_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_rolling.py", "file_name": "test_rolling.py", "file_type": "text/x-python", "category": "test", "start_line": 62, "end_line": 77, "span_ids": ["test_map_overlap_names"], "tokens": 207}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_map_overlap_names():\n npartitions = 3\n ddf = dd.from_pandas(df, npartitions)\n\n res = ddf.map_overlap(shifted_sum, 0, 3, 0, 3, c=2)\n res2 = ddf.map_overlap(shifted_sum, 0, 3, 0, 3, c=2)\n assert set(res.dask) == set(res2.dask)\n\n res3 = ddf.map_overlap(shifted_sum, 0, 3, 0, 3, c=3)\n assert res3._name != res._name\n # Difference is just the final map\n diff = set(res3.dask).difference(res.dask)\n assert len(diff) == npartitions\n\n res4 = ddf.map_overlap(shifted_sum, 3, 0, 0, 3, c=2)\n assert res4._name != res._name", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_map_overlap_errors_test_map_overlap_errors.with_pytest_raises_TypeEr.ddf_map_overlap_shifted_s": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_map_overlap_errors_test_map_overlap_errors.with_pytest_raises_TypeEr.ddf_map_overlap_shifted_s", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_rolling.py", "file_name": "test_rolling.py", "file_type": "text/x-python", "category": "test", "start_line": 80, "end_line": 95, "span_ids": ["test_map_overlap_errors"], "tokens": 173}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_map_overlap_errors():\n # Non-integer\n with pytest.raises(ValueError):\n ddf.map_overlap(shifted_sum, 0.5, 3, 0, 2, c=2)\n\n # Negative\n with pytest.raises(ValueError):\n ddf.map_overlap(shifted_sum, 0, -5, 0, 2, c=2)\n\n # Partition size < window size\n with pytest.raises(NotImplementedError):\n ddf.map_overlap(shifted_sum, 0, 100, 0, 100, c=2).compute()\n\n # Offset with non-datetime\n with pytest.raises(TypeError):\n ddf.map_overlap(shifted_sum, pd.Timedelta(\"1s\"), pd.Timedelta(\"1s\"), 0, 2, c=2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_map_overlap_provide_meta_test_map_overlap_provide_meta.assert_eq_res_sol_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_map_overlap_provide_meta_test_map_overlap_provide_meta.assert_eq_res_sol_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_rolling.py", "file_name": "test_rolling.py", "file_type": "text/x-python", "category": "test", "start_line": 98, "end_line": 109, "span_ids": ["test_map_overlap_provide_meta"], "tokens": 157}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_map_overlap_provide_meta():\n df = pd.DataFrame(\n {\"x\": [1, 2, 4, 7, 11], \"y\": [1.0, 2.0, 3.0, 4.0, 5.0]}\n ).rename_axis(\"myindex\")\n ddf = dd.from_pandas(df, npartitions=2)\n\n # Provide meta spec, but not full metadata\n res = ddf.map_overlap(\n lambda df: df.rolling(2).sum(), 2, 0, meta={\"x\": \"i8\", \"y\": \"i8\"}\n )\n sol = df.rolling(2).sum()\n assert_eq(res, sol)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_mad_rolling_method_args_check_less_precise._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_mad_rolling_method_args_check_less_precise._", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_rolling.py", "file_name": "test_rolling.py", "file_type": "text/x-python", "category": "test", "start_line": 112, "end_line": 134, "span_ids": ["mad", "impl:13"], "tokens": 221}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def mad(x):\n return np.fabs(x - x.mean()).mean()\n\n\nrolling_method_args_check_less_precise = [\n (\"count\", (), False),\n (\"sum\", (), False),\n (\"mean\", (), False),\n (\"median\", (), False),\n (\"min\", (), False),\n (\"max\", (), False),\n (\"std\", (), True),\n (\"var\", (), True),\n (\"skew\", (), True), # here and elsewhere, results for kurt and skew are\n (\"kurt\", (), True), # checked with check_less_precise=True so that we are\n # only looking at 3ish decimal places for the equality check\n # rather than 5ish. I have encountered a case where a test\n # seems to have failed due to numerical problems with kurt.\n # So far, I am only weakening the check for kurt and skew,\n # as they involve third degree powers and higher\n (\"quantile\", (0.38,), False),\n (\"apply\", (mad,), False),\n]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_rolling_methods_test_rolling_methods.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_rolling_methods_test_rolling_methods.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_rolling.py", "file_name": "test_rolling.py", "file_type": "text/x-python", "category": "test", "start_line": 137, "end_line": 168, "span_ids": ["test_rolling_methods"], "tokens": 269}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"method,args,check_less_precise\", rolling_method_args_check_less_precise\n)\n@pytest.mark.parametrize(\"window\", [1, 2, 4, 5])\n@pytest.mark.parametrize(\"center\", [True, False])\ndef test_rolling_methods(method, args, window, center, check_less_precise):\n if dd._compat.PANDAS_GT_110:\n check_less_precise = {}\n else:\n check_less_precise = {\"check_less_precise\": check_less_precise}\n # DataFrame\n prolling = df.rolling(window, center=center)\n drolling = ddf.rolling(window, center=center)\n if method == \"apply\":\n kwargs = {\"raw\": False}\n else:\n kwargs = {}\n\n assert_eq(\n getattr(prolling, method)(*args, **kwargs),\n getattr(drolling, method)(*args, **kwargs),\n **check_less_precise,\n )\n\n # Series\n prolling = df.a.rolling(window, center=center)\n drolling = ddf.a.rolling(window, center=center)\n assert_eq(\n getattr(prolling, method)(*args, **kwargs),\n getattr(drolling, method)(*args, **kwargs),\n **check_less_precise,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_if_PANDAS_VERSION_0_2_test_rolling_cov.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_if_PANDAS_VERSION_0_2_test_rolling_cov.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_rolling.py", "file_name": "test_rolling.py", "file_type": "text/x-python", "category": "test", "start_line": 171, "end_line": 191, "span_ids": ["test_rolling_cov", "impl:15"], "tokens": 195}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "if PANDAS_VERSION <= \"0.25.0\":\n filter_panel_warning = pytest.mark.filterwarnings(\n \"ignore::DeprecationWarning:pandas[.*]\"\n )\nelse:\n filter_panel_warning = lambda f: f\n\n\n@filter_panel_warning\n@pytest.mark.parametrize(\"window\", [1, 2, 4, 5])\n@pytest.mark.parametrize(\"center\", [True, False])\ndef test_rolling_cov(window, center):\n # DataFrame\n prolling = df.drop(\"a\", 1).rolling(window, center=center)\n drolling = ddf.drop(\"a\", 1).rolling(window, center=center)\n assert_eq(prolling.cov(), drolling.cov())\n\n # Series\n prolling = df.b.rolling(window, center=center)\n drolling = ddf.b.rolling(window, center=center)\n assert_eq(prolling.cov(), drolling.cov())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_rolling_raises_test_rolling_names.assert_sorted_a_rolling_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_rolling_raises_test_rolling_names.assert_sorted_a_rolling_2", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_rolling.py", "file_name": "test_rolling.py", "file_type": "text/x-python", "category": "test", "start_line": 194, "end_line": 211, "span_ids": ["test_rolling_raises", "test_rolling_names"], "tokens": 260}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_rolling_raises():\n df = pd.DataFrame(\n {\"a\": np.random.randn(25).cumsum(), \"b\": np.random.randint(100, size=(25,))}\n )\n ddf = dd.from_pandas(df, 3)\n pytest.raises(ValueError, lambda: ddf.rolling(1.5))\n pytest.raises(ValueError, lambda: ddf.rolling(-1))\n pytest.raises(ValueError, lambda: ddf.rolling(3, min_periods=1.2))\n pytest.raises(ValueError, lambda: ddf.rolling(3, min_periods=-2))\n pytest.raises(ValueError, lambda: ddf.rolling(3, axis=10))\n pytest.raises(ValueError, lambda: ddf.rolling(3, axis=\"coulombs\"))\n pytest.raises(NotImplementedError, lambda: ddf.rolling(100).mean().compute())\n\n\ndef test_rolling_names():\n df = pd.DataFrame({\"a\": [1, 2, 3], \"b\": [4, 5, 6]})\n a = dd.from_pandas(df, npartitions=2)\n assert sorted(a.rolling(2).sum().dask) == sorted(a.rolling(2).sum().dask)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_rolling_axis_test_rolling_axis.assert_eq_s_rolling_5_ax": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_rolling_axis_test_rolling_axis.assert_eq_s_rolling_5_ax", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_rolling.py", "file_name": "test_rolling.py", "file_type": "text/x-python", "category": "test", "start_line": 214, "end_line": 231, "span_ids": ["test_rolling_axis"], "tokens": 237}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_rolling_axis():\n df = pd.DataFrame(np.random.randn(20, 16))\n ddf = dd.from_pandas(df, npartitions=3)\n\n assert_eq(df.rolling(3, axis=0).mean(), ddf.rolling(3, axis=0).mean())\n assert_eq(df.rolling(3, axis=1).mean(), ddf.rolling(3, axis=1).mean())\n assert_eq(\n df.rolling(3, min_periods=1, axis=1).mean(),\n ddf.rolling(3, min_periods=1, axis=1).mean(),\n )\n assert_eq(\n df.rolling(3, axis=\"columns\").mean(), ddf.rolling(3, axis=\"columns\").mean()\n )\n assert_eq(df.rolling(3, axis=\"rows\").mean(), ddf.rolling(3, axis=\"rows\").mean())\n\n s = df[3]\n ds = ddf[3]\n assert_eq(s.rolling(5, axis=0).std(), ds.rolling(5, axis=0).std())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_rolling_partition_size_test_rolling_partition_size.for_obj_dobj_in_df_dd.with_pytest_raises_NotImp.dobj_rolling_12_mean_c": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_rolling_partition_size_test_rolling_partition_size.for_obj_dobj_in_df_dd.with_pytest_raises_NotImp.dobj_rolling_12_mean_c", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_rolling.py", "file_name": "test_rolling.py", "file_type": "text/x-python", "category": "test", "start_line": 234, "end_line": 242, "span_ids": ["test_rolling_partition_size"], "tokens": 122}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_rolling_partition_size():\n df = pd.DataFrame(np.random.randn(50, 2))\n ddf = dd.from_pandas(df, npartitions=5)\n\n for obj, dobj in [(df, ddf), (df[0], ddf[0])]:\n assert_eq(obj.rolling(10).mean(), dobj.rolling(10).mean())\n assert_eq(obj.rolling(11).mean(), dobj.rolling(11).mean())\n with pytest.raises(NotImplementedError):\n dobj.rolling(12).mean().compute()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_rolling_repr_test_time_rolling_constructor.assert_result__min_period": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_rolling_repr_test_time_rolling_constructor.assert_result__min_period", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_rolling.py", "file_name": "test_rolling.py", "file_type": "text/x-python", "category": "test", "start_line": 245, "end_line": 264, "span_ids": ["test_time_rolling_constructor", "test_rolling_repr", "test_time_rolling_repr"], "tokens": 182}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_rolling_repr():\n ddf = dd.from_pandas(pd.DataFrame([10] * 30), npartitions=3)\n res = repr(ddf.rolling(4))\n assert res == \"Rolling [window=4,center=False,axis=0]\"\n\n\ndef test_time_rolling_repr():\n res = repr(dts.rolling(\"4s\"))\n assert res == \"Rolling [window=4000000000,center=False,win_type=freq,axis=0]\"\n\n\ndef test_time_rolling_constructor():\n result = dts.rolling(\"4s\")\n assert result.window == \"4s\"\n assert result.min_periods is None\n assert result.win_type is None\n\n assert result._win_type == \"freq\"\n assert result._window == 4000000000 # ns\n assert result._min_periods == 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_time_rolling_methods_test_time_rolling_methods.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_time_rolling_methods_test_time_rolling_methods.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_rolling.py", "file_name": "test_rolling.py", "file_type": "text/x-python", "category": "test", "start_line": 267, "end_line": 300, "span_ids": ["test_time_rolling_methods"], "tokens": 292}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"method,args,check_less_precise\", rolling_method_args_check_less_precise\n)\n@pytest.mark.parametrize(\"window\", [\"1S\", \"2S\", \"3S\", pd.offsets.Second(5)])\ndef test_time_rolling_methods(method, args, window, check_less_precise):\n if dd._compat.PANDAS_GT_110:\n if check_less_precise:\n check_less_precise = {\"atol\": 0.5e-3, \"rtol\": 0.5e-3}\n else:\n check_less_precise = {}\n else:\n check_less_precise = {\"check_less_precise\": check_less_precise}\n\n # DataFrame\n if method == \"apply\":\n kwargs = {\"raw\": False}\n else:\n kwargs = {}\n prolling = ts.rolling(window)\n drolling = dts.rolling(window)\n assert_eq(\n getattr(prolling, method)(*args, **kwargs),\n getattr(drolling, method)(*args, **kwargs),\n **check_less_precise,\n )\n\n # Series\n prolling = ts.a.rolling(window)\n drolling = dts.a.rolling(window)\n assert_eq(\n getattr(prolling, method)(*args, **kwargs),\n getattr(drolling, method)(*args, **kwargs),\n **check_less_precise,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_time_rolling_cov_test_time_rolling_cov.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_time_rolling_cov_test_time_rolling_cov.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_rolling.py", "file_name": "test_rolling.py", "file_type": "text/x-python", "category": "test", "start_line": 303, "end_line": 314, "span_ids": ["test_time_rolling_cov"], "tokens": 128}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@filter_panel_warning\n@pytest.mark.parametrize(\"window\", [\"1S\", \"2S\", \"3S\", pd.offsets.Second(5)])\ndef test_time_rolling_cov(window):\n # DataFrame\n prolling = ts.drop(\"a\", 1).rolling(window)\n drolling = dts.drop(\"a\", 1).rolling(window)\n assert_eq(prolling.cov(), drolling.cov())\n\n # Series\n prolling = ts.b.rolling(window)\n drolling = dts.b.rolling(window)\n assert_eq(prolling.cov(), drolling.cov())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_time_rolling_large_window_fixed_chunks_test_time_rolling_large_window_fixed_chunks.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_time_rolling_large_window_fixed_chunks_test_time_rolling_large_window_fixed_chunks.None_2", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_rolling.py", "file_name": "test_rolling.py", "file_type": "text/x-python", "category": "test", "start_line": 317, "end_line": 332, "span_ids": ["test_time_rolling_large_window_fixed_chunks"], "tokens": 198}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"window,N\",\n [(\"1s\", 10), (\"2s\", 10), (\"10s\", 10), (\"10h\", 10), (\"10s\", 100), (\"10h\", 100)],\n)\ndef test_time_rolling_large_window_fixed_chunks(window, N):\n df = pd.DataFrame(\n {\n \"a\": pd.date_range(\"2016-01-01 00:00:00\", periods=N, freq=\"1s\"),\n \"b\": np.random.randint(100, size=(N,)),\n }\n )\n df = df.set_index(\"a\")\n ddf = dd.from_pandas(df, 5)\n assert_eq(ddf.rolling(window).sum(), df.rolling(window).sum())\n assert_eq(ddf.rolling(window).count(), df.rolling(window).count())\n assert_eq(ddf.rolling(window).mean(), df.rolling(window).mean())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_time_rolling_large_window_variable_chunks_test_time_rolling.assert_eq_result_expecte": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_time_rolling_large_window_variable_chunks_test_time_rolling.assert_eq_result_expecte", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_rolling.py", "file_name": "test_rolling.py", "file_type": "text/x-python", "category": "test", "start_line": 335, "end_line": 359, "span_ids": ["test_time_rolling_large_window_variable_chunks", "test_time_rolling"], "tokens": 322}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"window\", [\"2s\", \"5s\", \"20s\", \"10h\"])\ndef test_time_rolling_large_window_variable_chunks(window):\n df = pd.DataFrame(\n {\n \"a\": pd.date_range(\"2016-01-01 00:00:00\", periods=100, freq=\"1s\"),\n \"b\": np.random.randint(100, size=(100,)),\n }\n )\n ddf = dd.from_pandas(df, 5)\n ddf = ddf.repartition(divisions=[0, 5, 20, 28, 33, 54, 79, 80, 82, 99])\n df = df.set_index(\"a\")\n ddf = ddf.set_index(\"a\")\n assert_eq(ddf.rolling(window).sum(), df.rolling(window).sum())\n assert_eq(ddf.rolling(window).count(), df.rolling(window).count())\n assert_eq(ddf.rolling(window).mean(), df.rolling(window).mean())\n\n\n@pytest.mark.parametrize(\"before, after\", [(\"6s\", \"6s\"), (\"2s\", \"2s\"), (\"6s\", \"2s\")])\ndef test_time_rolling(before, after):\n window = before\n before = pd.Timedelta(before)\n after = pd.Timedelta(after)\n result = dts.map_overlap(lambda x: x.rolling(window).count(), before, after)\n expected = dts.compute().rolling(window).count()\n assert_eq(result, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_rolling_agg_aggregate_test_rolling_agg_aggregate.None_4": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_rolling_agg_aggregate_test_rolling_agg_aggregate.None_4", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_rolling.py", "file_name": "test_rolling.py", "file_type": "text/x-python", "category": "test", "start_line": 362, "end_line": 390, "span_ids": ["test_rolling_agg_aggregate"], "tokens": 306}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_rolling_agg_aggregate():\n df = pd.DataFrame({\"A\": range(5), \"B\": range(0, 10, 2)})\n ddf = dd.from_pandas(df, npartitions=3)\n\n assert_eq(\n df.rolling(window=3).agg([np.mean, np.std]),\n ddf.rolling(window=3).agg([np.mean, np.std]),\n )\n\n assert_eq(\n df.rolling(window=3).agg({\"A\": np.sum, \"B\": lambda x: np.std(x, ddof=1)}),\n ddf.rolling(window=3).agg({\"A\": np.sum, \"B\": lambda x: np.std(x, ddof=1)}),\n )\n\n assert_eq(\n df.rolling(window=3).agg([np.sum, np.mean]),\n ddf.rolling(window=3).agg([np.sum, np.mean]),\n )\n\n assert_eq(\n df.rolling(window=3).agg({\"A\": [np.sum, np.mean]}),\n ddf.rolling(window=3).agg({\"A\": [np.sum, np.mean]}),\n )\n\n kwargs = {\"raw\": True}\n assert_eq(\n df.rolling(window=3).apply(lambda x: np.std(x, ddof=1), **kwargs),\n ddf.rolling(window=3).apply(lambda x: np.std(x, ddof=1), **kwargs),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_rolling_numba_engine_test_rolling_numba_engine.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_rolling_numba_engine_test_rolling_numba_engine.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_rolling.py", "file_name": "test_rolling.py", "file_type": "text/x-python", "category": "test", "start_line": 393, "end_line": 410, "span_ids": ["test_rolling_numba_engine"], "tokens": 224}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(not dd._compat.PANDAS_GT_100, reason=\"needs pandas>=1.0.0\")\n@pytest.mark.xfail(da.numpy_compat._numpy_120, reason=\"sparse-383\")\ndef test_rolling_numba_engine():\n numba = pytest.importorskip(\"numba\")\n if not dd._compat.PANDAS_GT_104 and LooseVersion(numba.__version__) >= \"0.49\":\n # Was fixed in https://github.com/pandas-dev/pandas/pull/33687\n pytest.xfail(\"Known incompatibility between pandas and numba\")\n\n df = pd.DataFrame({\"A\": range(5), \"B\": range(0, 10, 2)})\n ddf = dd.from_pandas(df, npartitions=3)\n\n def f(x):\n return np.sum(x) + 5\n\n assert_eq(\n df.rolling(3).apply(f, engine=\"numba\", raw=True),\n ddf.rolling(3).apply(f, engine=\"numba\", raw=True),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_rolling_apply_numba_raises_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_rolling_apply_numba_raises_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_rolling.py", "file_name": "test_rolling.py", "file_type": "text/x-python", "category": "test", "start_line": 413, "end_line": 419, "span_ids": ["test_rolling_apply_numba_raises"], "tokens": 118}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(dd._compat.PANDAS_GT_100, reason=\"Requires pandas<1.0.0\")\ndef test_rolling_apply_numba_raises():\n df = pd.DataFrame({\"A\": range(5), \"B\": range(0, 10, 2)})\n ddf = dd.from_pandas(df, npartitions=3)\n with pytest.raises(NotImplementedError, match=\"pandas>=1.0.0\"):\n ddf.rolling(3).apply(lambda x: x.sum(), engine=\"numba\", raw=True)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_itertools_shuffle_func.shuffle": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_itertools_shuffle_func.shuffle", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 44, "span_ids": ["imports"], "tokens": 370}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import itertools\nimport os\nimport random\nimport tempfile\nfrom unittest import mock\n\nimport pandas as pd\nimport pytest\nimport pickle\nimport numpy as np\nimport string\nimport multiprocessing as mp\nfrom copy import copy\n\nimport dask\nimport dask.dataframe as dd\nfrom dask.dataframe._compat import tm, assert_categorical_equal\nfrom dask import delayed\nfrom dask.base import compute_as_if_collection\nfrom dask.optimization import cull\nfrom dask.dataframe.shuffle import (\n shuffle,\n partitioning_index,\n rearrange_by_column,\n rearrange_by_divisions,\n maybe_buffered_partd,\n remove_nans,\n)\nfrom dask.dataframe.utils import assert_eq, make_meta\n\ndsk = {\n (\"x\", 0): pd.DataFrame({\"a\": [1, 2, 3], \"b\": [1, 4, 7]}, index=[0, 1, 3]),\n (\"x\", 1): pd.DataFrame({\"a\": [4, 5, 6], \"b\": [2, 5, 8]}, index=[5, 6, 8]),\n (\"x\", 2): pd.DataFrame({\"a\": [7, 8, 9], \"b\": [3, 6, 9]}, index=[9, 9, 9]),\n}\nmeta = make_meta({\"a\": \"i8\", \"b\": \"i8\"}, index=pd.Index([], \"i8\"))\nd = dd.DataFrame(dsk, \"x\", meta, [0, 4, 9, 9])\nfull = d.compute()\nCHECK_FREQ = {}\nif dd._compat.PANDAS_GT_110:\n CHECK_FREQ[\"check_freq\"] = False\n\n\nshuffle_func = shuffle", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py__conflicts_with_keyword__test_shuffle.assert_shuffle_func_d_d_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py__conflicts_with_keyword__test_shuffle.assert_shuffle_func_d_d_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 43, "end_line": 58, "span_ids": ["imports", "test_shuffle"], "tokens": 145}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": " # conflicts with keyword argument\n\n\n@pytest.mark.parametrize(\"shuffle\", [\"disk\", \"tasks\"])\ndef test_shuffle(shuffle):\n s = shuffle_func(d, d.b, shuffle=shuffle)\n assert isinstance(s, dd.DataFrame)\n assert s.npartitions == d.npartitions\n\n x = dask.get(s.dask, (s._name, 0))\n y = dask.get(s.dask, (s._name, 1))\n\n assert not (set(x.b) & set(y.b)) # disjoint\n assert set(s.dask).issuperset(d.dask)\n\n assert shuffle_func(d, d.b)._name == shuffle_func(d, d.b)._name", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_default_partitions_test_shuffle_npartitions_task.assert_set_map_tuple_sc_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_default_partitions_test_shuffle_npartitions_task.assert_set_map_tuple_sc_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 61, "end_line": 75, "span_ids": ["test_default_partitions", "test_shuffle_npartitions_task"], "tokens": 161}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_default_partitions():\n assert shuffle(d, d.b).npartitions == d.npartitions\n\n\ndef test_shuffle_npartitions_task():\n df = pd.DataFrame({\"x\": np.random.random(100)})\n ddf = dd.from_pandas(df, npartitions=10)\n s = shuffle(ddf, ddf.x, shuffle=\"tasks\", npartitions=17, max_branch=4)\n sc = s.compute(scheduler=\"sync\")\n assert s.npartitions == 17\n assert set(s.dask).issuperset(set(ddf.dask))\n\n assert len(sc) == len(df)\n assert list(s.columns) == list(df.columns)\n assert set(map(tuple, sc.values.tolist())) == set(map(tuple, df.values.tolist()))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_index_with_non_series_test_shuffle_from_one_partition_to_one_other.for_i_in_1_2_.assert_len_a_compute_sche": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_index_with_non_series_test_shuffle_from_one_partition_to_one_other.for_i_in_1_2_.assert_len_a_compute_sche", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 78, "end_line": 102, "span_ids": ["test_index_with_dataframe", "test_shuffle_from_one_partition_to_one_other", "test_index_with_non_series"], "tokens": 256}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"method\", [\"disk\", \"tasks\"])\ndef test_index_with_non_series(method):\n from dask.dataframe.tests.test_multi import list_eq\n\n list_eq(shuffle(d, d.b, shuffle=method), shuffle(d, \"b\", shuffle=method))\n\n\n@pytest.mark.parametrize(\"method\", [\"disk\", \"tasks\"])\ndef test_index_with_dataframe(method):\n res1 = shuffle(d, d[[\"b\"]], shuffle=method).compute()\n res2 = shuffle(d, [\"b\"], shuffle=method).compute()\n res3 = shuffle(d, \"b\", shuffle=method).compute()\n\n assert sorted(res1.values.tolist()) == sorted(res2.values.tolist())\n assert sorted(res1.values.tolist()) == sorted(res3.values.tolist())\n\n\n@pytest.mark.parametrize(\"method\", [\"disk\", \"tasks\"])\ndef test_shuffle_from_one_partition_to_one_other(method):\n df = pd.DataFrame({\"x\": [1, 2, 3]})\n a = dd.from_pandas(df, 1)\n\n for i in [1, 2]:\n b = shuffle(a, \"x\", npartitions=i, shuffle=method)\n assert len(a.compute(scheduler=\"sync\")) == len(b.compute(scheduler=\"sync\"))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_shuffle_empty_partitions_test_shuffle_empty_partitions.for_p_in_parts_.assert_s_columns_p_col": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_shuffle_empty_partitions_test_shuffle_empty_partitions.for_p_in_parts_.assert_s_columns_p_col", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 105, "end_line": 112, "span_ids": ["test_shuffle_empty_partitions"], "tokens": 111}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"method\", [\"disk\", \"tasks\"])\ndef test_shuffle_empty_partitions(method):\n df = pd.DataFrame({\"x\": [1, 2, 3] * 10})\n ddf = dd.from_pandas(df, npartitions=3)\n s = shuffle(ddf, ddf.x, npartitions=6, shuffle=method)\n parts = compute_as_if_collection(dd.DataFrame, s.dask, s.__dask_keys__())\n for p in parts:\n assert s.columns == p.columns", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_df2_df2.pd_DataFrame_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_df2_df2.pd_DataFrame_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 115, "end_line": 126, "span_ids": ["impl:15"], "tokens": 186}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "df2 = pd.DataFrame(\n {\n \"i32\": np.array([1, 2, 3] * 3, dtype=\"int32\"),\n \"f32\": np.array([None, 2.5, 3.5] * 3, dtype=\"float32\"),\n \"cat\": pd.Series([\"a\", \"b\", \"c\"] * 3).astype(\"category\"),\n \"obj\": pd.Series([\"d\", \"e\", \"f\"] * 3),\n \"bool\": np.array([True, False, True] * 3),\n \"dt\": pd.Series(pd.date_range(\"20130101\", periods=9)),\n \"dt_tz\": pd.Series(pd.date_range(\"20130101\", periods=9, tz=\"US/Eastern\")),\n \"td\": pd.Series(pd.timedelta_range(\"2000\", periods=9)),\n }\n)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_partitioning_index_test_partitioning_index.None_7": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_partitioning_index_test_partitioning_index.None_7", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 129, "end_line": 145, "span_ids": ["test_partitioning_index"], "tokens": 200}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_partitioning_index():\n res = partitioning_index(df2.i32, 3)\n assert ((res < 3) & (res >= 0)).all()\n assert len(np.unique(res)) > 1\n\n assert (partitioning_index(df2.i32, 3) == partitioning_index(df2.i32, 3)).all()\n\n res = partitioning_index(df2[[\"i32\"]], 3)\n assert ((res < 3) & (res >= 0)).all()\n assert len(np.unique(res)) > 1\n\n res = partitioning_index(df2[[\"cat\", \"bool\", \"f32\"]], 2)\n assert ((0 <= res) & (res < 2)).all()\n\n res = partitioning_index(df2.index, 4)\n assert ((res < 4) & (res >= 0)).all()\n assert len(np.unique(res)) > 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_partitioning_index_categorical_on_values_test_partitioning_index_categorical_on_values.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_partitioning_index_categorical_on_values_test_partitioning_index_categorical_on_values.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 148, "end_line": 160, "span_ids": ["test_partitioning_index_categorical_on_values"], "tokens": 148}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_partitioning_index_categorical_on_values():\n df = pd.DataFrame({\"a\": list(string.ascii_letters), \"b\": [1, 2, 3, 4] * 13})\n df.a = df.a.astype(\"category\")\n df2 = df.copy()\n df2.a = df2.a.cat.set_categories(list(reversed(df2.a.cat.categories)))\n\n res = partitioning_index(df.a, 5)\n res2 = partitioning_index(df2.a, 5)\n assert (res == res2).all()\n\n res = partitioning_index(df, 5)\n res2 = partitioning_index(df2, 5)\n assert (res == res2).all()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_tasks_test_set_index_tasks.None_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_tasks_test_set_index_tasks.None_5", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 163, "end_line": 184, "span_ids": ["test_set_index_tasks"], "tokens": 236}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"npartitions\", [1, 4, 7, pytest.param(23, marks=pytest.mark.slow)]\n)\ndef test_set_index_tasks(npartitions):\n df = pd.DataFrame(\n {\"x\": np.random.random(100), \"y\": np.random.random(100) // 0.2},\n index=np.random.random(100),\n )\n\n ddf = dd.from_pandas(df, npartitions=npartitions)\n\n assert_eq(df.set_index(\"x\"), ddf.set_index(\"x\", shuffle=\"tasks\"))\n\n assert_eq(df.set_index(\"y\"), ddf.set_index(\"y\", shuffle=\"tasks\"))\n\n assert_eq(df.set_index(df.x), ddf.set_index(ddf.x, shuffle=\"tasks\"))\n\n assert_eq(df.set_index(df.x + df.y), ddf.set_index(ddf.x + ddf.y, shuffle=\"tasks\"))\n\n assert_eq(df.set_index(df.x + 1), ddf.set_index(ddf.x + 1, shuffle=\"tasks\"))\n\n assert_eq(df.set_index(df.index), ddf.set_index(ddf.index, shuffle=\"tasks\"))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_self_index_test_set_index_self_index.assert_eq_b_df_set_index": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_self_index_test_set_index_self_index.assert_eq_b_df_set_index", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 187, "end_line": 198, "span_ids": ["test_set_index_self_index"], "tokens": 107}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"shuffle\", [\"disk\", \"tasks\"])\ndef test_set_index_self_index(shuffle):\n df = pd.DataFrame(\n {\"x\": np.random.random(100), \"y\": np.random.random(100) // 0.2},\n index=np.random.random(100),\n )\n\n a = dd.from_pandas(df, npartitions=4)\n b = a.set_index(a.index, shuffle=shuffle)\n assert a is b\n\n assert_eq(b, df.set_index(df.index))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_names_test_set_index_names.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_names_test_set_index_names.None_3", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 201, "end_line": 221, "span_ids": ["test_set_index_names"], "tokens": 238}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"shuffle\", [\"tasks\"])\ndef test_set_index_names(shuffle):\n df = pd.DataFrame(\n {\"x\": np.random.random(100), \"y\": np.random.random(100) // 0.2},\n index=np.random.random(100),\n )\n\n ddf = dd.from_pandas(df, npartitions=4)\n\n assert set(ddf.set_index(\"x\", shuffle=shuffle).dask) == set(\n ddf.set_index(\"x\", shuffle=shuffle).dask\n )\n assert set(ddf.set_index(\"x\", shuffle=shuffle).dask) != set(\n ddf.set_index(\"y\", shuffle=shuffle).dask\n )\n assert set(ddf.set_index(\"x\", max_branch=4, shuffle=shuffle).dask) != set(\n ddf.set_index(\"x\", max_branch=3, shuffle=shuffle).dask\n )\n assert set(ddf.set_index(\"x\", drop=True, shuffle=shuffle).dask) != set(\n ddf.set_index(\"x\", drop=False, shuffle=shuffle).dask\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_tasks_2_test_set_index_tasks_2.df2_value_sum_compute_s": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_tasks_2_test_set_index_tasks_2.df2_value_sum_compute_s", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 224, "end_line": 236, "span_ids": ["test_set_index_tasks_2"], "tokens": 103}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"shuffle\", [\"disk\", \"tasks\"])\ndef test_set_index_tasks_2(shuffle):\n df = dd.demo.make_timeseries(\n \"2000\",\n \"2004\",\n {\"value\": float, \"name\": str, \"id\": int},\n freq=\"2H\",\n partition_freq=\"1M\",\n seed=1,\n )\n\n df2 = df.set_index(\"name\", shuffle=shuffle)\n df2.value.sum().compute(scheduler=\"sync\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_tasks_3_test_set_index_tasks_3.assert_ddf2_npartitions_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_tasks_3_test_set_index_tasks_3.assert_ddf2_npartitions_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 239, "end_line": 249, "span_ids": ["test_set_index_tasks_3"], "tokens": 127}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"shuffle\", [\"disk\", \"tasks\"])\ndef test_set_index_tasks_3(shuffle):\n df = pd.DataFrame(np.random.random((10, 2)), columns=[\"x\", \"y\"])\n ddf = dd.from_pandas(df, npartitions=5)\n\n ddf2 = ddf.set_index(\n \"x\", shuffle=shuffle, max_branch=2, npartitions=ddf.npartitions\n )\n df2 = df.set_index(\"x\")\n assert_eq(df2, ddf2)\n assert ddf2.npartitions == ddf.npartitions", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_shuffle_sort_test_shuffle_sort.assert_eq_ddf2_loc_2_3_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_shuffle_sort_test_shuffle_sort.assert_eq_ddf2_loc_2_3_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 252, "end_line": 260, "span_ids": ["test_shuffle_sort"], "tokens": 125}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"shuffle\", [\"tasks\", \"disk\"])\ndef test_shuffle_sort(shuffle):\n df = pd.DataFrame({\"x\": [1, 2, 3, 2, 1], \"y\": [9, 8, 7, 1, 5]})\n ddf = dd.from_pandas(df, npartitions=3)\n\n df2 = df.set_index(\"x\").sort_index()\n ddf2 = ddf.set_index(\"x\", shuffle=shuffle)\n\n assert_eq(ddf2.loc[2:3], df2.loc[2:3])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_rearrange_test_rearrange.for_i_in_a__partitions_dr.assert_sum_i_in_set_part_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_rearrange_test_rearrange.for_i_in_a__partitions_dr.assert_sum_i_in_set_part_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 263, "end_line": 280, "span_ids": ["test_rearrange"], "tokens": 212}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"shuffle\", [\"tasks\", \"disk\"])\n@pytest.mark.parametrize(\"scheduler\", [\"threads\", \"processes\"])\ndef test_rearrange(shuffle, scheduler):\n df = pd.DataFrame({\"x\": np.random.random(10)})\n ddf = dd.from_pandas(df, npartitions=4)\n ddf2 = ddf.assign(_partitions=ddf.x % 4)\n\n result = rearrange_by_column(ddf2, \"_partitions\", max_branch=32, shuffle=shuffle)\n assert result.npartitions == ddf.npartitions\n assert set(ddf.dask).issubset(result.dask)\n\n # Every value in exactly one partition\n a = result.compute(scheduler=scheduler)\n get = dask.base.get_scheduler(scheduler=scheduler)\n parts = get(result.dask, result.__dask_keys__())\n\n for i in a._partitions.drop_duplicates():\n assert sum(i in set(part._partitions) for part in parts) == 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_rearrange_cleanup_mock_shuffle_group_3.raise_ValueError_Mock_ex": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_rearrange_cleanup_mock_shuffle_group_3.raise_ValueError_Mock_ex", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 283, "end_line": 298, "span_ids": ["test_rearrange_cleanup", "mock_shuffle_group_3"], "tokens": 146}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_rearrange_cleanup():\n df = pd.DataFrame({\"x\": np.random.random(10)})\n ddf = dd.from_pandas(df, npartitions=4)\n ddf2 = ddf.assign(_partitions=ddf.x % 4)\n\n tmpdir = tempfile.mkdtemp()\n\n with dask.config.set(temporay_directory=str(tmpdir)):\n result = rearrange_by_column(ddf2, \"_partitions\", max_branch=32, shuffle=\"disk\")\n result.compute(scheduler=\"processes\")\n\n assert len(os.listdir(tmpdir)) == 0\n\n\ndef mock_shuffle_group_3(df, col, npartitions, p):\n raise ValueError(\"Mock exception!\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_rearrange_disk_cleanup_with_exception_test_rearrange_disk_cleanup_with_exception.assert_len_os_listdir_tmp": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_rearrange_disk_cleanup_with_exception_test_rearrange_disk_cleanup_with_exception.assert_len_os_listdir_tmp", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 301, "end_line": 318, "span_ids": ["test_rearrange_disk_cleanup_with_exception"], "tokens": 179}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_rearrange_disk_cleanup_with_exception():\n # ensure temporary files are cleaned up when there's an internal exception.\n\n with mock.patch(\"dask.dataframe.shuffle.shuffle_group_3\", new=mock_shuffle_group_3):\n df = pd.DataFrame({\"x\": np.random.random(10)})\n ddf = dd.from_pandas(df, npartitions=4)\n ddf2 = ddf.assign(_partitions=ddf.x % 4)\n\n tmpdir = tempfile.mkdtemp()\n\n with dask.config.set(temporay_directory=str(tmpdir)):\n with pytest.raises(ValueError, match=\"Mock exception!\"):\n result = rearrange_by_column(\n ddf2, \"_partitions\", max_branch=32, shuffle=\"disk\"\n )\n result.compute(scheduler=\"processes\")\n\n assert len(os.listdir(tmpdir)) == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_rearrange_by_column_with_narrow_divisions_test_maybe_buffered_partd.assert_isinstance_p2_part": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_rearrange_by_column_with_narrow_divisions_test_maybe_buffered_partd.assert_isinstance_p2_part", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 321, "end_line": 340, "span_ids": ["test_rearrange_by_column_with_narrow_divisions", "test_maybe_buffered_partd"], "tokens": 197}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_rearrange_by_column_with_narrow_divisions():\n from dask.dataframe.tests.test_multi import list_eq\n\n A = pd.DataFrame({\"x\": [1, 2, 3, 4, 5, 6], \"y\": [1, 1, 2, 2, 3, 4]})\n a = dd.repartition(A, [0, 4, 5])\n\n df = rearrange_by_divisions(a, \"x\", (0, 2, 5))\n list_eq(df, a)\n\n\ndef test_maybe_buffered_partd():\n import partd\n\n f = maybe_buffered_partd()\n p1 = f()\n assert isinstance(p1.partd, partd.Buffer)\n f2 = pickle.loads(pickle.dumps(f))\n assert not f2.buffer\n p2 = f2()\n assert isinstance(p2.partd, partd.File)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_with_explicit_divisions_test_set_index_with_explicit_divisions.with_pytest_raises_ValueE.ddf_set_index_x_divisi": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_with_explicit_divisions_test_set_index_with_explicit_divisions.with_pytest_raises_ValueE.ddf_set_index_x_divisi", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 343, "end_line": 360, "span_ids": ["test_set_index_with_explicit_divisions"], "tokens": 173}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_set_index_with_explicit_divisions():\n df = pd.DataFrame({\"x\": [4, 1, 2, 5]}, index=[10, 20, 30, 40])\n\n ddf = dd.from_pandas(df, npartitions=2)\n\n def throw(*args, **kwargs):\n raise Exception()\n\n with dask.config.set(get=throw):\n ddf2 = ddf.set_index(\"x\", divisions=[1, 3, 5])\n assert ddf2.divisions == (1, 3, 5)\n\n df2 = df.set_index(\"x\")\n assert_eq(ddf2, df2)\n\n # Divisions must be sorted\n with pytest.raises(ValueError):\n ddf.set_index(\"x\", divisions=[3, 1, 5])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_divisions_2_test_set_index_divisions_2.assert_list_result_comput": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_divisions_2_test_set_index_divisions_2.assert_list_result_comput", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 363, "end_line": 370, "span_ids": ["test_set_index_divisions_2"], "tokens": 113}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_set_index_divisions_2():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4, 5, 6], \"y\": list(\"abdabd\")})\n ddf = dd.from_pandas(df, 2)\n\n result = ddf.set_index(\"y\", divisions=[\"a\", \"c\", \"d\"])\n assert result.divisions == (\"a\", \"c\", \"d\")\n\n assert list(result.compute(scheduler=\"sync\").index[-2:]) == [\"d\", \"d\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_divisions_compute_test_set_index_divisions_compute.assert_len_d4_dask_len": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_divisions_compute_test_set_index_divisions_compute.assert_len_d4_dask_len", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 373, "end_line": 389, "span_ids": ["test_set_index_divisions_compute"], "tokens": 201}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_set_index_divisions_compute():\n d2 = d.set_index(\"b\", divisions=[0, 2, 9], compute=False)\n d3 = d.set_index(\"b\", divisions=[0, 2, 9], compute=True)\n\n assert_eq(d2, d3)\n assert_eq(d2, full.set_index(\"b\"))\n assert_eq(d3, full.set_index(\"b\"))\n assert len(d2.dask) > len(d3.dask)\n\n d4 = d.set_index(d.b, divisions=[0, 2, 9], compute=False)\n d5 = d.set_index(d.b, divisions=[0, 2, 9], compute=True)\n exp = full.copy()\n exp.index = exp.b\n assert_eq(d4, d5)\n assert_eq(d4, exp)\n assert_eq(d5, exp)\n assert len(d4.dask) > len(d5.dask)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_divisions_sorted_test_set_index_divisions_sorted.None_3.ddf_set_index_y_divisi": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_divisions_sorted_test_set_index_divisions_sorted.None_3.ddf_set_index_y_divisi", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 392, "end_line": 419, "span_ids": ["test_set_index_divisions_sorted"], "tokens": 361}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_set_index_divisions_sorted():\n p1 = pd.DataFrame({\"x\": [10, 11, 12], \"y\": [\"a\", \"a\", \"a\"]})\n p2 = pd.DataFrame({\"x\": [13, 14, 15], \"y\": [\"b\", \"b\", \"c\"]})\n p3 = pd.DataFrame({\"x\": [16, 17, 18], \"y\": [\"d\", \"e\", \"e\"]})\n\n ddf = dd.DataFrame(\n {(\"x\", 0): p1, (\"x\", 1): p2, (\"x\", 2): p3}, \"x\", p1, [None, None, None, None]\n )\n df = ddf.compute()\n\n def throw(*args, **kwargs):\n raise Exception(\"Shouldn't have computed\")\n\n with dask.config.set(get=throw):\n res = ddf.set_index(\"x\", divisions=[10, 13, 16, 18], sorted=True)\n assert_eq(res, df.set_index(\"x\"))\n\n with dask.config.set(get=throw):\n res = ddf.set_index(\"y\", divisions=[\"a\", \"b\", \"d\", \"e\"], sorted=True)\n assert_eq(res, df.set_index(\"y\"))\n\n # with sorted=True, divisions must be same length as df.divisions\n with pytest.raises(ValueError):\n ddf.set_index(\"y\", divisions=[\"a\", \"b\", \"c\", \"d\", \"e\"], sorted=True)\n\n # Divisions must be sorted\n with pytest.raises(ValueError):\n ddf.set_index(\"y\", divisions=[\"a\", \"b\", \"d\", \"c\"], sorted=True)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_consistent_divisions_test_set_index_consistent_divisions.assert_len_divisions_set_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_consistent_divisions_test_set_index_consistent_divisions.assert_len_divisions_set_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 422, "end_line": 437, "span_ids": ["test_set_index_consistent_divisions"], "tokens": 167}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.slow\ndef test_set_index_consistent_divisions():\n # See https://github.com/dask/dask/issues/3867\n df = pd.DataFrame(\n {\"x\": np.random.random(100), \"y\": np.random.random(100) // 0.2},\n index=np.random.random(100),\n )\n ddf = dd.from_pandas(df, npartitions=4)\n ddf = ddf.clear_divisions()\n\n ctx = mp.get_context(\"spawn\")\n pool = ctx.Pool(processes=8)\n with pool:\n results = [pool.apply_async(_set_index, (ddf, \"x\")) for _ in range(100)]\n divisions_set = set(result.get() for result in results)\n assert len(divisions_set) == 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py__set_index_make_part.return.pd_DataFrame_x_np_ran": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py__set_index_make_part.return.pd_DataFrame_x_np_ran", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 440, "end_line": 454, "span_ids": ["make_part", "_set_index", "test_set_index_reduces_partitions_small"], "tokens": 139}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _set_index(df, *args, **kwargs):\n return df.set_index(*args, **kwargs).divisions\n\n\n@pytest.mark.parametrize(\"shuffle\", [\"disk\", \"tasks\"])\ndef test_set_index_reduces_partitions_small(shuffle):\n df = pd.DataFrame({\"x\": np.random.random(100)})\n ddf = dd.from_pandas(df, npartitions=50)\n\n ddf2 = ddf.set_index(\"x\", shuffle=shuffle, npartitions=\"auto\")\n assert ddf2.npartitions < 10\n\n\ndef make_part(n):\n return pd.DataFrame({\"x\": np.random.random(n), \"y\": np.random.random(n)})", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_reduces_partitions_large_test_set_index_reduces_partitions_large.assert_1_ddf2_npartitio": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_reduces_partitions_large_test_set_index_reduces_partitions_large.assert_1_ddf2_npartitio", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 457, "end_line": 471, "span_ids": ["test_set_index_reduces_partitions_large"], "tokens": 149}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"shuffle\", [\"disk\", \"tasks\"])\ndef test_set_index_reduces_partitions_large(shuffle):\n nbytes = 1e6\n nparts = 50\n n = int(nbytes / (nparts * 8))\n ddf = dd.DataFrame(\n {(\"x\", i): (make_part, n) for i in range(nparts)},\n \"x\",\n make_part(1),\n [None] * (nparts + 1),\n )\n ddf2 = ddf.set_index(\n \"x\", shuffle=shuffle, npartitions=\"auto\", partition_size=nbytes\n )\n assert 1 < ddf2.npartitions < 20", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_doesnt_increase_partitions_test_set_index_detects_sorted_data.assert_len_ddf2_dask_d": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_doesnt_increase_partitions_test_set_index_detects_sorted_data.assert_len_ddf2_dask_d", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 474, "end_line": 497, "span_ids": ["test_set_index_detects_sorted_data", "test_set_index_doesnt_increase_partitions"], "tokens": 248}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"shuffle\", [\"disk\", \"tasks\"])\ndef test_set_index_doesnt_increase_partitions(shuffle):\n nparts = 2\n nbytes = 1e6\n n = int(nbytes / (nparts * 8))\n ddf = dd.DataFrame(\n {(\"x\", i): (make_part, n) for i in range(nparts)},\n \"x\",\n make_part(1),\n [None] * (nparts + 1),\n )\n ddf2 = ddf.set_index(\n \"x\", shuffle=shuffle, npartitions=\"auto\", partition_size=nbytes\n )\n assert ddf2.npartitions <= ddf.npartitions\n\n\n@pytest.mark.parametrize(\"shuffle\", [\"disk\", \"tasks\"])\ndef test_set_index_detects_sorted_data(shuffle):\n df = pd.DataFrame({\"x\": range(100), \"y\": range(100)})\n ddf = dd.from_pandas(df, npartitions=10, name=\"x\", sort=False)\n\n ddf2 = ddf.set_index(\"x\", shuffle=shuffle)\n assert len(ddf2.dask) < ddf.npartitions * 4", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_sorts_test_set_index_sorts.assert_ddf_set_index_tim": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_sorts_test_set_index_sorts.assert_ddf_set_index_tim", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 500, "end_line": 575, "span_ids": ["test_set_index_sorts"], "tokens": 728}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_set_index_sorts():\n # https://github.com/dask/dask/issues/2288\n vals = np.array(\n [\n 1348550149000000000,\n 1348550149000000000,\n 1348558142000000000,\n 1348558142000000000,\n 1348585928000000000,\n 1348585928000000000,\n 1348600739000000000,\n 1348601706000000000,\n 1348600739000000000,\n 1348601706000000000,\n 1348614789000000000,\n 1348614789000000000,\n 1348621037000000000,\n 1348621038000000000,\n 1348621040000000000,\n 1348621037000000000,\n 1348621038000000000,\n 1348621040000000000,\n 1348637628000000000,\n 1348638159000000000,\n 1348638160000000000,\n 1348638159000000000,\n 1348638160000000000,\n 1348637628000000000,\n 1348646354000000000,\n 1348646354000000000,\n 1348659107000000000,\n 1348657111000000000,\n 1348659107000000000,\n 1348657111000000000,\n 1348672876000000000,\n 1348672876000000000,\n 1348682787000000000,\n 1348681985000000000,\n 1348682787000000000,\n 1348681985000000000,\n 1348728167000000000,\n 1348728167000000000,\n 1348730745000000000,\n 1348730745000000000,\n 1348750198000000000,\n 1348750198000000000,\n 1348750198000000000,\n 1348753539000000000,\n 1348753539000000000,\n 1348753539000000000,\n 1348754449000000000,\n 1348754449000000000,\n 1348761333000000000,\n 1348761554000000000,\n 1348761610000000000,\n 1348761333000000000,\n 1348761554000000000,\n 1348761610000000000,\n 1348782624000000000,\n 1348782624000000000,\n 1348782624000000000,\n 1348782624000000000,\n ]\n )\n vals = pd.to_datetime(vals, unit=\"ns\")\n breaks = [10, 36, 58]\n dfs = []\n\n for i in range(len(breaks)):\n lo = sum(breaks[:i])\n hi = sum(breaks[i : i + 1])\n\n dfs.append(pd.DataFrame({\"timestamp\": vals[lo:hi]}, index=range(lo, hi)))\n\n ddf = dd.concat(dfs).clear_divisions()\n assert ddf.set_index(\"timestamp\").index.compute().is_monotonic is True", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_test_set_index.assert_eq_d5_full_set_in": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_test_set_index.assert_eq_d5_full_set_in", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 578, "end_line": 603, "span_ids": ["test_set_index"], "tokens": 334}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_set_index():\n dsk = {\n (\"x\", 0): pd.DataFrame({\"a\": [1, 2, 3], \"b\": [4, 2, 6]}, index=[0, 1, 3]),\n (\"x\", 1): pd.DataFrame({\"a\": [4, 5, 6], \"b\": [3, 5, 8]}, index=[5, 6, 8]),\n (\"x\", 2): pd.DataFrame({\"a\": [7, 8, 9], \"b\": [9, 1, 8]}, index=[9, 9, 9]),\n }\n d = dd.DataFrame(dsk, \"x\", meta, [0, 4, 9, 9])\n full = d.compute()\n\n d2 = d.set_index(\"b\", npartitions=3)\n assert d2.npartitions == 3\n assert d2.index.name == \"b\"\n assert_eq(d2, full.set_index(\"b\"))\n\n d3 = d.set_index(d.b, npartitions=3)\n assert d3.npartitions == 3\n assert d3.index.name == \"b\"\n assert_eq(d3, full.set_index(full.b))\n\n d4 = d.set_index(\"b\")\n assert d4.index.name == \"b\"\n assert_eq(d4, full.set_index(\"b\"))\n\n d5 = d.set_index([\"b\"])\n assert d5.index.name == \"b\"\n assert_eq(d5, full.set_index([\"b\"]))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_interpolate_test_set_index_interpolate_int.assert_all_np_issubdtype_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_interpolate_test_set_index_interpolate_int.assert_all_np_issubdtype_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 606, "end_line": 625, "span_ids": ["test_set_index_interpolate_int", "test_set_index_interpolate"], "tokens": 275}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_set_index_interpolate():\n df = pd.DataFrame({\"x\": [4, 1, 1, 3, 3], \"y\": [1.0, 1, 1, 1, 2]})\n d = dd.from_pandas(df, 2)\n\n d1 = d.set_index(\"x\", npartitions=3)\n assert d1.npartitions == 3\n assert set(d1.divisions) == set([1, 2, 3, 4])\n\n d2 = d.set_index(\"y\", npartitions=3)\n assert d2.divisions[0] == 1.0\n assert 1.0 < d2.divisions[1] < d2.divisions[2] < 2.0\n assert d2.divisions[3] == 2.0\n\n\ndef test_set_index_interpolate_int():\n L = sorted(list(range(0, 200, 10)) * 2)\n df = pd.DataFrame({\"x\": 2 * L})\n d = dd.from_pandas(df, 2)\n d1 = d.set_index(\"x\", npartitions=10)\n assert all(np.issubdtype(type(x), np.integer) for x in d1.divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_timezone_test_set_index_timezone.with_pytest_raises_TypeEr.d2_divisions_0_s2badt": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_timezone_test_set_index_timezone.with_pytest_raises_TypeEr.d2_divisions_0_s2badt", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 628, "end_line": 649, "span_ids": ["test_set_index_timezone"], "tokens": 334}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_set_index_timezone():\n s_naive = pd.Series(pd.date_range(\"20130101\", periods=3))\n s_aware = pd.Series(pd.date_range(\"20130101\", periods=3, tz=\"US/Eastern\"))\n df = pd.DataFrame({\"tz\": s_aware, \"notz\": s_naive})\n d = dd.from_pandas(df, 2)\n\n d1 = d.set_index(\"notz\", npartitions=2)\n s1 = pd.DatetimeIndex(s_naive.values, dtype=s_naive.dtype)\n assert d1.divisions[0] == s_naive[0] == s1[0]\n assert d1.divisions[-1] == s_naive[2] == s1[2]\n\n # We currently lose \"freq\". Converting data with pandas-defined dtypes\n # to numpy or pure Python can be lossy like this.\n d2 = d.set_index(\"tz\", npartitions=2)\n s2 = pd.DatetimeIndex(s_aware, dtype=s_aware.dtype)\n assert d2.divisions[0] == s2[0]\n assert d2.divisions[-1] == s2[2]\n assert d2.divisions[0].tz == s2[0].tz\n assert d2.divisions[0].tz is not None\n s2badtype = pd.DatetimeIndex(s_aware.values, dtype=s_naive.dtype)\n with pytest.raises(TypeError):\n d2.divisions[0] == s2badtype[0]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_drop_test_set_index_drop.assert_eq_ddf_set_index_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_drop_test_set_index_drop.assert_eq_ddf_set_index_2", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 652, "end_line": 680, "span_ids": ["test_set_index_drop"], "tokens": 426}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"drop\", [True, False])\ndef test_set_index_drop(drop):\n pdf = pd.DataFrame(\n {\n \"A\": list(\"ABAABBABAA\"),\n \"B\": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],\n \"C\": [1, 2, 3, 2, 1, 3, 2, 4, 2, 3],\n }\n )\n ddf = dd.from_pandas(pdf, 3)\n\n assert_eq(ddf.set_index(\"A\", drop=drop), pdf.set_index(\"A\", drop=drop))\n assert_eq(ddf.set_index(\"B\", drop=drop), pdf.set_index(\"B\", drop=drop))\n assert_eq(ddf.set_index(\"C\", drop=drop), pdf.set_index(\"C\", drop=drop))\n assert_eq(ddf.set_index(ddf.A, drop=drop), pdf.set_index(pdf.A, drop=drop))\n assert_eq(ddf.set_index(ddf.B, drop=drop), pdf.set_index(pdf.B, drop=drop))\n assert_eq(ddf.set_index(ddf.C, drop=drop), pdf.set_index(pdf.C, drop=drop))\n\n # numeric columns\n pdf = pd.DataFrame(\n {\n 0: list(\"ABAABBABAA\"),\n 1: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],\n 2: [1, 2, 3, 2, 1, 3, 2, 4, 2, 3],\n }\n )\n ddf = dd.from_pandas(pdf, 3)\n assert_eq(ddf.set_index(0, drop=drop), pdf.set_index(0, drop=drop))\n assert_eq(ddf.set_index(2, drop=drop), pdf.set_index(2, drop=drop))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_raises_error_on_bad_input_test_set_index_raises_error_on_bad_input.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_raises_error_on_bad_input_test_set_index_raises_error_on_bad_input.None_2", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 683, "end_line": 698, "span_ids": ["test_set_index_raises_error_on_bad_input"], "tokens": 185}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_set_index_raises_error_on_bad_input():\n df = pd.DataFrame({\"a\": [1, 2, 3, 4, 5, 6, 7], \"b\": [7, 6, 5, 4, 3, 2, 1]})\n ddf = dd.from_pandas(df, 2)\n\n msg = r\"Dask dataframe does not yet support multi-indexes\"\n with pytest.raises(NotImplementedError) as err:\n ddf.set_index([\"a\", \"b\"])\n assert msg in str(err.value)\n\n with pytest.raises(NotImplementedError) as err:\n ddf.set_index([[\"a\", \"b\"]])\n assert msg in str(err.value)\n\n with pytest.raises(NotImplementedError) as err:\n ddf.set_index([[\"a\"]])\n assert msg in str(err.value)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_sorted_true_test_set_index_sorted_true.with_pytest_raises_ValueE.a_set_index_a_z_sorted_T": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_sorted_true_test_set_index_sorted_true.with_pytest_raises_ValueE.a_set_index_a_z_sorted_T", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 701, "end_line": 721, "span_ids": ["test_set_index_sorted_true"], "tokens": 231}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_set_index_sorted_true():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4], \"y\": [10, 20, 20, 40], \"z\": [4, 3, 2, 1]})\n a = dd.from_pandas(df, 2, sort=False)\n assert not a.known_divisions\n\n b = a.set_index(\"x\", sorted=True)\n assert b.known_divisions\n assert set(a.dask).issubset(set(b.dask))\n\n for drop in [True, False]:\n assert_eq(a.set_index(\"x\", drop=drop), df.set_index(\"x\", drop=drop))\n assert_eq(\n a.set_index(a.x, sorted=True, drop=drop), df.set_index(df.x, drop=drop)\n )\n assert_eq(\n a.set_index(a.x + 1, sorted=True, drop=drop),\n df.set_index(df.x + 1, drop=drop),\n )\n\n with pytest.raises(ValueError):\n a.set_index(a.z, sorted=True)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_sorted_single_partition_test_set_index_sorted_min_max_same.assert_df2_divisions_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_sorted_single_partition_test_set_index_sorted_min_max_same.assert_df2_divisions_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 724, "end_line": 741, "span_ids": ["test_set_index_sorted_single_partition", "test_set_index_sorted_min_max_same"], "tokens": 212}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_set_index_sorted_single_partition():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4], \"y\": [1, 0, 1, 0]})\n ddf = dd.from_pandas(df, npartitions=1)\n assert_eq(ddf.set_index(\"x\", sorted=True), df.set_index(\"x\"))\n\n\ndef test_set_index_sorted_min_max_same():\n a = pd.DataFrame({\"x\": [1, 2, 3], \"y\": [0, 0, 0]})\n b = pd.DataFrame({\"x\": [1, 2, 3], \"y\": [1, 1, 1]})\n\n aa = delayed(a)\n bb = delayed(b)\n\n df = dd.from_delayed([aa, bb], meta=a)\n assert not df.known_divisions\n\n df2 = df.set_index(\"y\", sorted=True)\n assert df2.divisions == (0, 1, 1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_empty_partition_test_set_index_empty_partition.for_conv_in_converters_.assert_assert_eq_ddf_set_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_empty_partition_test_set_index_empty_partition.for_conv_in_converters_.assert_assert_eq_ddf_set_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 744, "end_line": 761, "span_ids": ["test_set_index_empty_partition"], "tokens": 165}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_set_index_empty_partition():\n test_vals = [1, 2, 3]\n\n converters = [int, float, str, lambda x: pd.to_datetime(x, unit=\"ns\")]\n\n for conv in converters:\n df = pd.DataFrame(\n [{\"x\": conv(i), \"y\": i} for i in test_vals], columns=[\"x\", \"y\"]\n )\n ddf = dd.concat(\n [\n dd.from_pandas(df, npartitions=1),\n dd.from_pandas(df[df.y > df.y.max()], npartitions=1),\n ]\n )\n\n assert any(ddf.get_partition(p).compute().empty for p in range(ddf.npartitions))\n assert assert_eq(ddf.set_index(\"x\"), df.set_index(\"x\"))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_on_empty_test_set_index_on_empty.for_converter_in_converte.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_on_empty_test_set_index_on_empty.for_converter_in_converte.None_2", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 764, "end_line": 778, "span_ids": ["test_set_index_on_empty"], "tokens": 161}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_set_index_on_empty():\n test_vals = [1, 2, 3, 4]\n converters = [int, float, str, lambda x: pd.to_datetime(x, unit=\"ns\")]\n\n for converter in converters:\n df = pd.DataFrame([{\"x\": converter(x), \"y\": x} for x in test_vals])\n ddf = dd.from_pandas(df, npartitions=4)\n\n assert ddf.npartitions > 1\n\n ddf = ddf[ddf.y > df.y.max()].set_index(\"x\")\n expected_df = df[df.y > df.y.max()].set_index(\"x\")\n\n assert assert_eq(ddf, expected_df, **CHECK_FREQ)\n assert ddf.npartitions == 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_categorical_test_set_index_categorical.assert_categorical_equal_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_categorical_test_set_index_categorical.assert_categorical_equal_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 781, "end_line": 794, "span_ids": ["test_set_index_categorical"], "tokens": 148}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_set_index_categorical():\n # https://github.com/dask/dask/issues/5671\n order = list(reversed(string.ascii_letters))\n values = list(string.ascii_letters)\n random.shuffle(values)\n dtype = pd.api.types.CategoricalDtype(order, ordered=True)\n df = pd.DataFrame({\"A\": pd.Categorical(values, dtype=dtype), \"B\": 1})\n\n result = dd.from_pandas(df, npartitions=2).set_index(\"A\")\n assert len(result) == len(df)\n\n # sorted with the metric defined by the Categorical\n divisions = pd.Categorical(result.divisions, dtype=dtype)\n assert_categorical_equal(divisions, divisions.sort_values())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_compute_divisions_test_compute_divisions.assert_b_known_divisions": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_compute_divisions_test_compute_divisions.assert_b_known_divisions", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 797, "end_line": 810, "span_ids": ["test_compute_divisions"], "tokens": 142}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_compute_divisions():\n from dask.dataframe.shuffle import compute_and_set_divisions\n\n df = pd.DataFrame(\n {\"x\": [1, 2, 3, 4], \"y\": [10, 20, 20, 40], \"z\": [4, 3, 2, 1]},\n index=[1, 3, 10, 20],\n )\n a = dd.from_pandas(df, 2, sort=False)\n assert not a.known_divisions\n\n b = compute_and_set_divisions(copy(a))\n\n assert_eq(a, b, check_divisions=False)\n assert b.known_divisions", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_empty_partitions_test_empty_partitions.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_empty_partitions_test_empty_partitions.None_2", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 813, "end_line": 826, "span_ids": ["test_empty_partitions"], "tokens": 153}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_empty_partitions():\n # See https://github.com/dask/dask/issues/2408\n df = pd.DataFrame({\"a\": list(range(10))})\n df[\"b\"] = df[\"a\"] % 3\n df[\"c\"] = df[\"b\"].astype(str)\n\n ddf = dd.from_pandas(df, npartitions=3)\n ddf = ddf.set_index(\"b\")\n ddf = ddf.repartition(npartitions=3)\n ddf.get_partition(0).compute()\n assert_eq(ddf, df.set_index(\"b\"))\n\n ddf = ddf.set_index(\"c\")\n assert_eq(ddf, df.set_index(\"b\").set_index(\"c\"))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_remove_nans_test_remove_nans.for_conv_none_val_in_con.for_inputs_expected_in_t.assert_remove_nans_params": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_remove_nans_test_remove_nans.for_conv_none_val_in_con.for_inputs_expected_in_t.assert_remove_nans_params", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 829, "end_line": 852, "span_ids": ["test_remove_nans"], "tokens": 301}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_remove_nans():\n tests = [\n ((1, 1, 2), (1, 1, 2)),\n ((None, 1, 2), (1, 1, 2)),\n ((1, None, 2), (1, 2, 2)),\n ((1, 2, None), (1, 2, 2)),\n ((1, 2, None, None), (1, 2, 2, 2)),\n ((None, None, 1, 2), (1, 1, 1, 2)),\n ((1, None, None, 2), (1, 2, 2, 2)),\n ((None, 1, None, 2, None, 3, None), (1, 1, 2, 2, 3, 3, 3)),\n ]\n\n converters = [\n (int, np.nan),\n (float, np.nan),\n (str, np.nan),\n (lambda x: pd.to_datetime(x, unit=\"ns\"), np.datetime64(\"NaT\")),\n ]\n\n for conv, none_val in converters:\n for inputs, expected in tests:\n params = [none_val if x is None else conv(x) for x in inputs]\n expected = [conv(x) for x in expected]\n assert remove_nans(params) == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_gh_2730_test_gh_2730.tm_assert_frame_equal_res": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_gh_2730_test_gh_2730.tm_assert_frame_equal_res", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 855, "end_line": 869, "span_ids": ["test_gh_2730"], "tokens": 155}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.slow\ndef test_gh_2730():\n large = pd.DataFrame({\"KEY\": np.arange(0, 50000)})\n small = pd.DataFrame({\"KEY\": np.arange(25, 500)})\n\n dd_left = dd.from_pandas(small, npartitions=3)\n dd_right = dd.from_pandas(large, npartitions=257)\n\n with dask.config.set(shuffle=\"tasks\", scheduler=\"sync\"):\n dd_merged = dd_left.merge(dd_right, how=\"inner\", on=\"KEY\")\n result = dd_merged.compute()\n\n expected = large.merge(small, how=\"inner\", on=\"KEY\")\n\n tm.assert_frame_equal(result.sort_values(\"KEY\").reset_index(drop=True), expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_does_not_repeat_work_due_to_optimizations_test_set_index_errors_with_inplace_kwarg.with_pytest_raises_NotImp.ddf_set_index_a_inplac": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_does_not_repeat_work_due_to_optimizations_test_set_index_errors_with_inplace_kwarg.with_pytest_raises_NotImp.ddf_set_index_a_inplac", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 872, "end_line": 903, "span_ids": ["test_set_index_does_not_repeat_work_due_to_optimizations", "test_set_index_errors_with_inplace_kwarg"], "tokens": 312}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"npartitions\", [None, \"auto\"])\ndef test_set_index_does_not_repeat_work_due_to_optimizations(npartitions):\n # Atomic counter\n count = itertools.count()\n\n def increment():\n next(count)\n\n def make_part(dummy, n):\n return pd.DataFrame({\"x\": np.random.random(n), \"y\": np.random.random(n)})\n\n nbytes = 1e6\n nparts = 50\n n = int(nbytes / (nparts * 8))\n\n dsk = {(\"inc\", i): (increment,) for i in range(nparts)}\n dsk.update({(\"x\", i): (make_part, (\"inc\", i), n) for i in range(nparts)})\n ddf = dd.DataFrame(dsk, \"x\", make_part(None, 1), [None] * (nparts + 1))\n\n ddf.set_index(\"x\", npartitions=npartitions)\n ntimes = next(count)\n assert ntimes == nparts\n\n\ndef test_set_index_errors_with_inplace_kwarg():\n df = pd.DataFrame({\"a\": [9, 8, 7], \"b\": [6, 5, 4], \"c\": [3, 2, 1]})\n ddf = dd.from_pandas(df, npartitions=1)\n\n ddf.set_index(\"a\")\n\n with pytest.raises(NotImplementedError):\n ddf.set_index(\"a\", inplace=True)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_timestamp_test_set_index_timestamp.assert_eq_df2_ddf_set_in": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_timestamp_test_set_index_timestamp.assert_eq_df2_ddf_set_in", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 906, "end_line": 922, "span_ids": ["test_set_index_timestamp"], "tokens": 232}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_set_index_timestamp():\n df = pd.DataFrame({\"A\": pd.date_range(\"2000\", periods=12, tz=\"US/Central\"), \"B\": 1})\n ddf = dd.from_pandas(df, 2)\n divisions = (\n pd.Timestamp(\"2000-01-01 00:00:00-0600\", tz=\"US/Central\", freq=\"D\"),\n pd.Timestamp(\"2000-01-12 00:00:00-0600\", tz=\"US/Central\", freq=\"D\"),\n )\n\n # Note: `freq` is lost during round trip\n df2 = df.set_index(\"A\")\n ddf_new_div = ddf.set_index(\"A\", divisions=divisions)\n for (ts1, ts2) in zip(divisions, ddf_new_div.divisions):\n assert ts1.value == ts2.value\n assert ts1.tz == ts2.tz\n\n assert_eq(df2, ddf_new_div, **CHECK_FREQ)\n assert_eq(df2, ddf.set_index(\"A\"), **CHECK_FREQ)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_disk_shuffle_with_compression_option_test_disk_shuffle_with_unknown_compression.with_dask_config_set_da.with_pytest_raises_.test_shuffle_disk_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_disk_shuffle_with_compression_option_test_disk_shuffle_with_unknown_compression.with_dask_config_set_da.with_pytest_raises_.test_shuffle_disk_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 925, "end_line": 945, "span_ids": ["test_disk_shuffle_with_compression_option", "test_disk_shuffle_with_unknown_compression"], "tokens": 174}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"compression\", [None, \"ZLib\"])\ndef test_disk_shuffle_with_compression_option(compression):\n # test if dataframe shuffle works both with and without compression\n with dask.config.set({\"dataframe.shuffle-compression\": compression}):\n test_shuffle(\"disk\")\n\n\n@pytest.mark.parametrize(\"compression\", [\"UNKOWN_COMPRESSION_ALGO\"])\ndef test_disk_shuffle_with_unknown_compression(compression):\n # test if dask raises an error in case of fault config string\n with dask.config.set({\"dataframe.shuffle-compression\": compression}):\n with pytest.raises(\n ImportError,\n match=(\n \"Not able to import and load {0} as compression algorithm.\"\n \"Please check if the library is installed and supported by Partd.\".format(\n compression\n )\n ),\n ):\n test_shuffle(\"disk\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_disk_shuffle_check_actual_compression_test_disk_shuffle_check_actual_compression.assert_len_uncompressed_d": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_disk_shuffle_check_actual_compression_test_disk_shuffle_check_actual_compression.assert_len_uncompressed_d", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 948, "end_line": 967, "span_ids": ["test_disk_shuffle_check_actual_compression"], "tokens": 245}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_disk_shuffle_check_actual_compression():\n # test if the compression switch is really respected by testing the size of the actual partd-data on disk\n def generate_raw_partd_file(compression):\n # generate and write a dummy dataframe to disk and return the raw data bytes\n df1 = pd.DataFrame({\"a\": list(range(10000))})\n df1[\"b\"] = (df1[\"a\"] * 123).astype(str)\n with dask.config.set({\"dataframe.shuffle-compression\": compression}):\n p1 = maybe_buffered_partd(buffer=False, tempdir=None)()\n p1.append({\"x\": df1})\n # get underlying filename from partd - depending on nested structure of partd object\n filename = (\n p1.partd.partd.filename(\"x\") if compression else p1.partd.filename(\"x\")\n )\n return open(filename, \"rb\").read()\n\n # get compressed and uncompressed raw data\n uncompressed_data = generate_raw_partd_file(compression=None)\n compressed_data = generate_raw_partd_file(compression=\"BZ2\")\n\n assert len(uncompressed_data) > len(compressed_data)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_pytest__BASE_UFUNCS._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_pytest__BASE_UFUNCS._", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_ufunc.py", "file_name": "test_ufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 55, "span_ids": ["imports"], "tokens": 254}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\n\npd = pytest.importorskip(\"pandas\")\n\nimport numpy as np\n\nimport dask.array as da\nimport dask.dataframe as dd\nfrom dask.dataframe.utils import assert_eq\n\n\n_BASE_UFUNCS = [\n \"conj\",\n \"exp\",\n \"log\",\n \"log2\",\n \"log10\",\n \"log1p\",\n \"expm1\",\n \"sqrt\",\n \"square\",\n \"sin\",\n \"cos\",\n \"tan\",\n \"arcsin\",\n \"arccos\",\n \"arctan\",\n \"sinh\",\n \"cosh\",\n \"tanh\",\n \"arcsinh\",\n \"arccosh\",\n \"arctanh\",\n \"deg2rad\",\n \"rad2deg\",\n \"isfinite\",\n \"isinf\",\n \"isnan\",\n \"signbit\",\n \"degrees\",\n \"radians\",\n \"rint\",\n \"fabs\",\n \"sign\",\n \"absolute\",\n \"floor\",\n \"ceil\",\n \"trunc\",\n \"logical_not\",\n \"cbrt\",\n \"exp2\",\n \"negative\",\n \"reciprocal\",\n \"spacing\",\n]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_test_ufunc_test_ufunc.None_4.assert_eq_dafunc_pandas_i": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_test_ufunc_test_ufunc.None_4.assert_eq_dafunc_pandas_i", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_ufunc.py", "file_name": "test_ufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 58, "end_line": 140, "span_ids": ["test_ufunc"], "tokens": 693}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"pandas_input\",\n [\n pd.Series(np.random.randint(1, 100, size=20)),\n pd.Series(np.abs(np.random.randn(100))),\n pd.DataFrame(\n {\n \"A\": np.random.randint(1, 100, size=20),\n \"B\": np.random.randint(1, 100, size=20),\n \"C\": np.abs(np.random.randn(20)),\n }\n ),\n pd.Series(\n np.random.randint(1, 100, size=20), index=list(\"abcdefghijklmnopqrst\")\n ),\n pd.Series(np.abs(np.random.randn(20)), index=list(\"abcdefghijklmnopqrst\")),\n pd.DataFrame(\n {\n \"A\": np.random.randint(1, 100, size=20),\n \"B\": np.random.randint(1, 100, size=20),\n \"C\": np.abs(np.random.randn(20)),\n },\n index=list(\"abcdefghijklmnopqrst\"),\n ),\n ],\n)\n@pytest.mark.parametrize(\"ufunc\", _BASE_UFUNCS)\ndef test_ufunc(pandas_input, ufunc):\n dafunc = getattr(da, ufunc)\n npfunc = getattr(np, ufunc)\n\n dask_input = dd.from_pandas(pandas_input, 3)\n pandas_type = pandas_input.__class__\n dask_type = dask_input.__class__\n\n # applying Dask ufunc doesn't trigger computation\n with pytest.warns(None):\n # Some cause warnings (arcsine)\n assert isinstance(dafunc(dask_input), dask_type)\n assert_eq(dafunc(dask_input), npfunc(pandas_input))\n\n # applying NumPy ufunc is lazy\n if isinstance(npfunc, np.ufunc):\n assert isinstance(npfunc(dask_input), dask_type)\n else:\n assert isinstance(npfunc(dask_input), pandas_type)\n assert_eq(npfunc(dask_input), npfunc(pandas_input))\n\n # applying Dask ufunc to normal Series triggers computation\n assert isinstance(dafunc(pandas_input), pandas_type)\n assert_eq(dafunc(dask_input), npfunc(pandas_input))\n\n # Index\n if pandas_input.index.dtype in [object, str]:\n return\n if ufunc in (\"logical_not\", \"signbit\", \"isnan\", \"isinf\", \"isfinite\"):\n return\n\n with pytest.warns(None):\n assert isinstance(dafunc(dask_input.index), dd.Index)\n assert_eq(\n dafunc(dask_input.index),\n npfunc(pandas_input.index),\n check_divisions=ufunc != \"spacing\",\n )\n\n # applying NumPy ufunc is lazy\n if isinstance(npfunc, np.ufunc):\n assert isinstance(npfunc(dask_input.index), dd.Index)\n else:\n assert isinstance(npfunc(dask_input.index), pd.Index)\n\n assert_eq(\n npfunc(dask_input.index),\n npfunc(dask_input.index),\n check_divisions=ufunc != \"spacing\",\n )\n\n # applying Dask ufunc to normal Series triggers computation\n with pytest.warns(None):\n # some (da.log) cause warnings\n assert isinstance(dafunc(pandas_input.index), pd.Index)\n assert_eq(dafunc(pandas_input), npfunc(pandas_input))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_test_ufunc_array_wrap_test_ufunc_array_wrap.None_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_test_ufunc_array_wrap_test_ufunc_array_wrap.None_5", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_ufunc.py", "file_name": "test_ufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 143, "end_line": 210, "span_ids": ["test_ufunc_array_wrap"], "tokens": 586}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"ufunc\",\n [\n pytest.param(\n \"isreal\", marks=pytest.mark.filterwarnings(\"ignore::FutureWarning\")\n ),\n \"iscomplex\",\n pytest.param(\"real\", marks=pytest.mark.filterwarnings(\"ignore::FutureWarning\")),\n pytest.param(\"imag\", marks=pytest.mark.filterwarnings(\"ignore::FutureWarning\")),\n \"angle\",\n \"fix\",\n \"i0\",\n \"sinc\",\n \"nan_to_num\",\n ],\n)\ndef test_ufunc_array_wrap(ufunc):\n \"\"\"\n some np.ufuncs doesn't call __array_wrap__\n (or __array_ufunc__ starting from numpy v.1.13.0), it should work as below\n\n - da.ufunc(dd.Series) => dd.Series\n - da.ufunc(pd.Series) => np.ndarray\n - np.ufunc(dd.Series) => np.ndarray\n - np.ufunc(pd.Series) => np.ndarray\n \"\"\"\n if ufunc == \"fix\" and np.__version__ >= \"1.13.0\":\n pytest.skip(\"fix calls floor in a way that we do not yet support\")\n\n dafunc = getattr(da, ufunc)\n npfunc = getattr(np, ufunc)\n\n s = pd.Series(\n np.random.randint(1, 100, size=20), index=list(\"abcdefghijklmnopqrst\")\n )\n ds = dd.from_pandas(s, 3)\n\n # applying Dask ufunc doesn't trigger computation\n assert isinstance(dafunc(ds), dd.Series)\n assert_eq(dafunc(ds), pd.Series(npfunc(s), index=s.index))\n\n assert isinstance(npfunc(ds), np.ndarray)\n np.testing.assert_equal(npfunc(ds), npfunc(s))\n\n assert isinstance(dafunc(s), np.ndarray)\n np.testing.assert_array_equal(dafunc(s), npfunc(s))\n\n df = pd.DataFrame(\n {\n \"A\": np.random.randint(1, 100, size=20),\n \"B\": np.random.randint(1, 100, size=20),\n \"C\": np.abs(np.random.randn(20)),\n },\n index=list(\"abcdefghijklmnopqrst\"),\n )\n ddf = dd.from_pandas(df, 3)\n\n # applying Dask ufunc doesn't trigger computation\n assert isinstance(dafunc(ddf), dd.DataFrame)\n # result may be read-only ndarray\n exp = pd.DataFrame(npfunc(df).copy(), columns=df.columns, index=df.index)\n assert_eq(dafunc(ddf), exp)\n\n assert isinstance(npfunc(ddf), np.ndarray)\n np.testing.assert_array_equal(npfunc(ddf), npfunc(df))\n\n assert isinstance(dafunc(df), np.ndarray)\n np.testing.assert_array_equal(dafunc(df), npfunc(df))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py__UFUNCS_2ARG__UFUNCS_2ARG._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py__UFUNCS_2ARG__UFUNCS_2ARG._", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_ufunc.py", "file_name": "test_ufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 213, "end_line": 238, "span_ids": ["impl:5"], "tokens": 145}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "_UFUNCS_2ARG = [\n \"logaddexp\",\n \"logaddexp2\",\n \"arctan2\",\n \"hypot\",\n \"copysign\",\n \"nextafter\",\n \"ldexp\",\n pytest.param(\"fmod\", marks=[pytest.mark.filterwarnings(\"ignore::RuntimeWarning\")]),\n \"logical_and\",\n \"logical_or\",\n \"logical_xor\",\n \"maximum\",\n \"minimum\",\n \"fmax\",\n \"fmin\",\n \"greater\",\n \"greater_equal\",\n \"less\",\n \"less_equal\",\n \"not_equal\",\n \"equal\",\n \"logical_or\",\n \"logical_and\",\n \"logical_xor\",\n]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_test_ufunc_with_2args_test_ufunc_with_2args.assert_eq_dafunc_pandas1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_test_ufunc_with_2args_test_ufunc_with_2args.assert_eq_dafunc_pandas1_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_ufunc.py", "file_name": "test_ufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 241, "end_line": 286, "span_ids": ["test_ufunc_with_2args"], "tokens": 477}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"ufunc\", _UFUNCS_2ARG)\n@pytest.mark.parametrize(\n \"make_pandas_input\",\n [\n lambda: pd.Series(np.random.randint(1, 100, size=20)),\n lambda: pd.DataFrame(\n np.random.randint(1, 100, size=(20, 2)), columns=[\"A\", \"B\"]\n ),\n ],\n)\ndef test_ufunc_with_2args(ufunc, make_pandas_input):\n\n dafunc = getattr(da, ufunc)\n npfunc = getattr(np, ufunc)\n\n pandas1 = make_pandas_input()\n pandas2 = make_pandas_input()\n\n dask1 = dd.from_pandas(pandas1, 3)\n dask2 = dd.from_pandas(pandas2, 4)\n\n pandas_type = pandas1.__class__\n dask_type = dask1.__class__\n\n # applying Dask ufunc doesn't trigger computation\n assert isinstance(dafunc(dask1, dask2), dask_type)\n assert_eq(dafunc(dask1, dask2), npfunc(pandas1, pandas2))\n\n # should be fine with pandas as a second arg, too\n assert isinstance(dafunc(dask1, pandas2), dask_type)\n assert_eq(dafunc(dask1, pandas2), npfunc(pandas1, pandas2))\n\n # applying NumPy ufunc is lazy\n if isinstance(npfunc, np.ufunc):\n assert isinstance(npfunc(dask1, dask2), dask_type)\n assert isinstance(npfunc(dask1, pandas2), dask_type)\n else:\n assert isinstance(npfunc(dask1, dask2), pandas_type)\n assert isinstance(npfunc(dask1, pandas2), pandas_type)\n\n assert_eq(npfunc(dask1, dask2), npfunc(pandas1, pandas2))\n assert_eq(npfunc(dask1, pandas2), npfunc(pandas1, pandas2))\n\n # applying Dask ufunc to normal Series triggers computation\n assert isinstance(dafunc(pandas1, pandas2), pandas_type)\n assert_eq(dafunc(pandas1, pandas2), npfunc(pandas1, pandas2))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_test_clip_test_clip.assert_eq_da_clip_pandas_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_test_clip_test_clip.assert_eq_da_clip_pandas_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_ufunc.py", "file_name": "test_ufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 289, "end_line": 318, "span_ids": ["test_clip"], "tokens": 280}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"pandas,min,max\",\n [\n (pd.Series(np.random.randint(1, 100, size=20)), 5, 50),\n (\n pd.DataFrame(np.random.randint(1, 100, size=(20, 2)), columns=[\"A\", \"B\"]),\n 5.5,\n 40.5,\n ),\n ],\n)\ndef test_clip(pandas, min, max):\n\n dask = dd.from_pandas(pandas, 3)\n pandas_type = pandas.__class__\n dask_type = dask.__class__\n\n # clip internally calls dd.Series.clip\n\n # applying Dask ufunc doesn't trigger computation\n assert isinstance(da.clip(dask, min, max), dask_type)\n assert_eq(da.clip(dask, min, max), np.clip(pandas, min, max))\n\n # applying Numpy ufunc doesn't trigger computation\n assert isinstance(np.clip(dask, min, max), dask_type)\n assert_eq(np.clip(dask, min, max), np.clip(pandas, min, max))\n\n # applying Dask ufunc to normal pandas objects triggers computation\n assert isinstance(da.clip(pandas, min, max), pandas_type)\n assert_eq(da.clip(pandas, min, max), np.clip(pandas, min, max))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_test_frame_ufunc_out_test_frame_ufunc_out.None_1.assert_eq_ddf_out_np_exp": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_test_frame_ufunc_out_test_frame_ufunc_out.None_1.assert_eq_ddf_out_np_exp", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_ufunc.py", "file_name": "test_ufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 321, "end_line": 341, "span_ids": ["test_frame_ufunc_out"], "tokens": 233}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"ufunc\", _BASE_UFUNCS)\ndef test_frame_ufunc_out(ufunc):\n npfunc = getattr(np, ufunc)\n dafunc = getattr(da, ufunc)\n\n input_matrix = np.random.randint(1, 100, size=(20, 2))\n\n df = pd.DataFrame(input_matrix, columns=[\"A\", \"B\"])\n ddf = dd.from_pandas(df, 3)\n df_out = pd.DataFrame(np.random.randint(1, 100, size=(20, 2)), columns=[\"Y\", \"Z\"])\n ddf_out_np = dd.from_pandas(df_out, 3)\n ddf_out_da = dd.from_pandas(df_out, 3)\n\n with pytest.warns(None):\n npfunc(ddf, out=ddf_out_np)\n dafunc(ddf, out=ddf_out_da)\n assert_eq(ddf_out_np, ddf_out_da)\n\n with pytest.warns(None):\n expected = pd.DataFrame(npfunc(input_matrix), columns=[\"A\", \"B\"])\n assert_eq(ddf_out_np, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_test_frame_2ufunc_out_test_frame_2ufunc_out.assert_eq_ddf_out_expect": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_test_frame_2ufunc_out_test_frame_2ufunc_out.assert_eq_ddf_out_expect", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_ufunc.py", "file_name": "test_ufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 344, "end_line": 372, "span_ids": ["test_frame_2ufunc_out"], "tokens": 270}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_frame_2ufunc_out():\n input_matrix = np.random.randint(1, 100, size=(20, 2))\n\n df = pd.DataFrame(input_matrix, columns=[\"A\", \"B\"])\n ddf = dd.from_pandas(df, 3)\n\n # column number mismatch\n df_out = pd.DataFrame(\n np.random.randint(1, 100, size=(20, 3)), columns=[\"X\", \"Y\", \"Z\"]\n )\n ddf_out = dd.from_pandas(df_out, 3)\n\n with pytest.raises(ValueError):\n np.sin(ddf, out=ddf_out)\n\n # types mismatch\n ddf_out = dd.from_pandas(pd.Series([0]), 1)\n with pytest.raises(TypeError):\n np.sin(ddf, out=ddf_out)\n\n df_out = pd.DataFrame(np.random.randint(1, 100, size=(20, 2)), columns=[\"X\", \"Y\"])\n ddf_out = dd.from_pandas(df_out, 3)\n\n np.sin(ddf, out=ddf_out)\n np.add(ddf_out, 10, out=ddf_out)\n\n expected = pd.DataFrame(np.sin(input_matrix) + 10, columns=[\"A\", \"B\"])\n\n assert_eq(ddf_out, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_test_mixed_types_test_mixed_types.assert_eq_dafunc_arg2_ar": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_test_mixed_types_test_mixed_types.assert_eq_dafunc_arg2_ar", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_ufunc.py", "file_name": "test_ufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 375, "end_line": 427, "span_ids": ["test_mixed_types"], "tokens": 498}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"arg1\",\n [\n pd.Series(np.abs(np.random.randn(100))),\n pd.DataFrame(\n {\n \"A\": np.random.randint(1, 100, size=20),\n \"B\": np.random.randint(1, 100, size=20),\n \"C\": np.abs(np.random.randn(20)),\n }\n ),\n ],\n)\n@pytest.mark.parametrize(\"arg2\", [2, dd.from_pandas(pd.Series([0]), 1).sum()])\n@pytest.mark.parametrize(\"ufunc\", _UFUNCS_2ARG)\ndef test_mixed_types(ufunc, arg1, arg2):\n npfunc = getattr(np, ufunc)\n dafunc = getattr(da, ufunc)\n\n dask = dd.from_pandas(arg1, 3)\n\n pandas_type = arg1.__class__\n dask_type = dask.__class__\n\n # applying Dask ufunc doesn't trigger computation\n assert isinstance(dafunc(dask, arg2), dask_type)\n assert_eq(dafunc(dask, arg2), npfunc(dask, arg2))\n\n # applying NumPy ufunc is lazy\n assert isinstance(npfunc(dask, arg2), dask_type)\n assert_eq(npfunc(dask, arg2), npfunc(arg1, arg2))\n\n # applying Dask ufunc to normal Series triggers computation\n assert isinstance(dafunc(arg1, arg2), pandas_type)\n assert_eq(dafunc(arg1, arg2), npfunc(arg1, arg2))\n\n # swapping arguments\n\n # first parameter of ldexp should be array-like\n if ufunc == \"ldexp\":\n return\n\n # applying Dask ufunc doesn't trigger computation\n assert isinstance(dafunc(arg2, dask), dask_type)\n assert_eq(dafunc(arg2, dask), npfunc(arg2, dask))\n\n # applying NumPy ufunc is lazy\n assert isinstance(npfunc(arg2, dask), dask_type)\n assert_eq(npfunc(arg2, dask), npfunc(arg2, dask))\n\n # applying Dask ufunc to normal Series triggers computation\n assert isinstance(dafunc(arg2, arg1), pandas_type)\n assert_eq(dafunc(arg2, arg1), npfunc(arg2, arg1))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_test_2args_with_array_test_2args_with_array.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_test_2args_with_array_test_2args_with_array.None_2", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_ufunc.py", "file_name": "test_ufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 430, "end_line": 468, "span_ids": ["test_2args_with_array"], "tokens": 374}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"ufunc\", _UFUNCS_2ARG)\n@pytest.mark.parametrize(\n \"pandas,darray\",\n [\n (\n pd.Series(np.random.randint(1, 100, size=(100,))),\n da.from_array(np.random.randint(1, 100, size=(100,)), chunks=(50,)),\n ),\n (\n pd.DataFrame(np.random.randint(1, 100, size=(20, 2)), columns=[\"A\", \"B\"]),\n da.from_array(np.random.randint(1, 100, size=(20, 2)), chunks=(10, 2)),\n ),\n ],\n)\ndef test_2args_with_array(ufunc, pandas, darray):\n dafunc = getattr(da, ufunc)\n npfunc = getattr(np, ufunc)\n\n dask = dd.from_pandas(pandas, 2)\n dask_type = dask.__class__\n\n # applying Dask ufunc doesn't trigger computation\n assert isinstance(dafunc(dask, darray), dask_type)\n assert isinstance(dafunc(darray, dask), dask_type)\n\n np.testing.assert_array_equal(\n dafunc(dask, darray).compute().values, npfunc(pandas.values, darray).compute()\n )\n\n # applying NumPy ufunc is lazy\n assert isinstance(npfunc(dask, darray), dask_type)\n assert isinstance(npfunc(darray, dask), dask_type)\n\n np.testing.assert_array_equal(\n npfunc(dask, darray).compute().values, npfunc(pandas.values, darray.compute())\n )\n np.testing.assert_array_equal(\n npfunc(darray, dask).compute().values, npfunc(darray.compute(), pandas.values)\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_test_ufunc_with_reduction_test_ufunc_with_reduction.with_pytest_warns_None_.assert_eq_np_redfunc_np_u": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_test_ufunc_with_reduction_test_ufunc_with_reduction.with_pytest_warns_None_.assert_eq_np_redfunc_np_u", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_ufunc.py", "file_name": "test_ufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 471, "end_line": 494, "span_ids": ["test_ufunc_with_reduction"], "tokens": 219}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"redfunc\", [\"sum\", \"prod\", \"min\", \"max\", \"mean\"])\n@pytest.mark.parametrize(\"ufunc\", _BASE_UFUNCS)\n@pytest.mark.parametrize(\n \"pandas\",\n [\n pd.Series(np.abs(np.random.randn(100))),\n pd.DataFrame(\n {\n \"A\": np.random.randint(1, 100, size=20),\n \"B\": np.random.randint(1, 100, size=20),\n \"C\": np.abs(np.random.randn(20)),\n }\n ),\n ],\n)\ndef test_ufunc_with_reduction(redfunc, ufunc, pandas):\n dask = dd.from_pandas(pandas, 3)\n\n np_redfunc = getattr(np, redfunc)\n np_ufunc = getattr(np, ufunc)\n\n with pytest.warns(None):\n assert isinstance(np_redfunc(dask), (dd.DataFrame, dd.Series, dd.core.Scalar))\n assert_eq(np_redfunc(np_ufunc(dask)), np_redfunc(np_ufunc(pandas)))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_test_ufunc_numpy_scalar_comparison_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_test_ufunc_numpy_scalar_comparison_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_ufunc.py", "file_name": "test_ufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 497, "end_line": 518, "span_ids": ["test_ufunc_numpy_scalar_comparison"], "tokens": 175}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"pandas\",\n [\n pd.Series(np.random.randint(1, 100, size=100)),\n pd.DataFrame(\n {\n \"A\": np.random.randint(1, 100, size=20),\n \"B\": np.random.randint(1, 100, size=20),\n \"C\": np.abs(np.random.randn(20)),\n }\n ),\n ],\n)\n@pytest.mark.parametrize(\"scalar\", [15, 16.4, np.int64(15), np.float64(16.4)])\ndef test_ufunc_numpy_scalar_comparison(pandas, scalar):\n # Regression test for issue #3392\n\n dask_compare = scalar >= dd.from_pandas(pandas, npartitions=3)\n pandas_compare = scalar >= pandas\n\n assert_eq(dask_compare, pandas_compare)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_re_pytest": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_re_pytest", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_utils_dataframe.py", "file_name": "test_utils_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 23, "span_ids": ["imports"], "tokens": 121}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import re\n\nimport numpy as np\nimport pandas as pd\nimport dask.dataframe as dd\nfrom dask.dataframe._compat import tm\nfrom dask.dataframe.core import apply_and_enforce\nfrom dask.dataframe.utils import (\n shard_df_on_index,\n meta_nonempty,\n make_meta,\n raise_on_meta_error,\n check_meta,\n check_matching_columns,\n UNKNOWN_CATEGORIES,\n is_dataframe_like,\n is_series_like,\n is_index_like,\n PANDAS_GT_0240,\n PANDAS_GT_100,\n)\n\nimport pytest", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_shard_df_on_index_test_shard_df_on_index.assert_list_result_2_ind": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_shard_df_on_index_test_shard_df_on_index.assert_list_result_2_ind", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_utils_dataframe.py", "file_name": "test_utils_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 26, "end_line": 34, "span_ids": ["test_shard_df_on_index"], "tokens": 131}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_shard_df_on_index():\n df = pd.DataFrame(\n {\"x\": [1, 2, 3, 4, 5, 6], \"y\": list(\"abdabd\")}, index=[10, 20, 30, 40, 50, 60]\n )\n\n result = list(shard_df_on_index(df, [20, 50]))\n assert list(result[0].index) == [10]\n assert list(result[1].index) == [20, 30, 40]\n assert list(result[2].index) == [50, 60]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_make_meta_test_make_meta.assert_pytest_raises_Type": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_make_meta_test_make_meta.assert_pytest_raises_Type", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_utils_dataframe.py", "file_name": "test_utils_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 37, "end_line": 120, "span_ids": ["test_make_meta"], "tokens": 760}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_make_meta():\n df = pd.DataFrame(\n {\"a\": [1, 2, 3], \"b\": list(\"abc\"), \"c\": [1.0, 2.0, 3.0]}, index=[10, 20, 30]\n )\n\n # Pandas dataframe\n meta = make_meta(df)\n assert len(meta) == 0\n assert (meta.dtypes == df.dtypes).all()\n assert isinstance(meta.index, type(df.index))\n\n # Pandas series\n meta = make_meta(df.a)\n assert len(meta) == 0\n assert meta.dtype == df.a.dtype\n assert isinstance(meta.index, type(df.index))\n\n # Pandas index\n meta = make_meta(df.index)\n assert isinstance(meta, type(df.index))\n assert len(meta) == 0\n\n # Dask object\n ddf = dd.from_pandas(df, npartitions=2)\n assert make_meta(ddf) is ddf._meta\n\n # Dict\n meta = make_meta({\"a\": \"i8\", \"b\": \"O\", \"c\": \"f8\"})\n assert isinstance(meta, pd.DataFrame)\n assert len(meta) == 0\n assert (meta.dtypes == df.dtypes).all()\n assert isinstance(meta.index, pd.RangeIndex)\n\n # Iterable\n meta = make_meta([(\"a\", \"i8\"), (\"c\", \"f8\"), (\"b\", \"O\")])\n assert (meta.columns == [\"a\", \"c\", \"b\"]).all()\n assert len(meta) == 0\n assert (meta.dtypes == df.dtypes[meta.dtypes.index]).all()\n assert isinstance(meta.index, pd.RangeIndex)\n\n # Tuple\n meta = make_meta((\"a\", \"i8\"))\n assert isinstance(meta, pd.Series)\n assert len(meta) == 0\n assert meta.dtype == \"i8\"\n assert meta.name == \"a\"\n\n # With index\n meta = make_meta({\"a\": \"i8\", \"b\": \"i4\"}, index=pd.Int64Index([1, 2], name=\"foo\"))\n assert isinstance(meta.index, pd.Int64Index)\n assert len(meta.index) == 0\n meta = make_meta((\"a\", \"i8\"), index=pd.Int64Index([1, 2], name=\"foo\"))\n assert isinstance(meta.index, pd.Int64Index)\n assert len(meta.index) == 0\n\n # Categoricals\n meta = make_meta({\"a\": \"category\"})\n assert len(meta.a.cat.categories) == 1\n assert meta.a.cat.categories[0] == UNKNOWN_CATEGORIES\n meta = make_meta((\"a\", \"category\"))\n assert len(meta.cat.categories) == 1\n assert meta.cat.categories[0] == UNKNOWN_CATEGORIES\n\n # Numpy scalar\n meta = make_meta(np.float64(1.0))\n assert isinstance(meta, np.float64)\n\n # Python scalar\n meta = make_meta(1.0)\n assert isinstance(meta, np.float64)\n\n # Timestamp\n x = pd.Timestamp(2000, 1, 1)\n meta = make_meta(x)\n assert meta is x\n\n # Dtype expressions\n meta = make_meta(\"i8\")\n assert isinstance(meta, np.int64)\n meta = make_meta(float)\n assert isinstance(meta, np.dtype(float).type)\n meta = make_meta(np.dtype(\"bool\"))\n assert isinstance(meta, np.bool_)\n assert pytest.raises(TypeError, lambda: make_meta(None))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_meta_nonempty_test_meta_nonempty.assert_df3_A_s_al": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_meta_nonempty_test_meta_nonempty.assert_df3_A_s_al", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_utils_dataframe.py", "file_name": "test_utils_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 123, "end_line": 159, "span_ids": ["test_meta_nonempty"], "tokens": 465}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_meta_nonempty():\n df1 = pd.DataFrame(\n {\n \"A\": pd.Categorical([\"Alice\", \"Bob\", \"Carol\"]),\n \"B\": list(\"abc\"),\n \"C\": \"bar\",\n \"D\": np.float32(1),\n \"E\": np.int32(1),\n \"F\": pd.Timestamp(\"2016-01-01\"),\n \"G\": pd.date_range(\"2016-01-01\", periods=3, tz=\"America/New_York\"),\n \"H\": pd.Timedelta(\"1 hours\"),\n \"I\": np.void(b\" \"),\n \"J\": pd.Categorical([UNKNOWN_CATEGORIES] * 3),\n \"K\": pd.Categorical([None, None, None]),\n },\n columns=list(\"DCBAHGFEIJK\"),\n )\n df2 = df1.iloc[0:0]\n df3 = meta_nonempty(df2)\n assert (df3.dtypes == df2.dtypes).all()\n assert df3[\"A\"][0] == \"Alice\"\n assert df3[\"B\"][0] == \"foo\"\n assert df3[\"C\"][0] == \"foo\"\n assert df3[\"D\"][0] == np.float32(1)\n assert df3[\"D\"][0].dtype == \"f4\"\n assert df3[\"E\"][0] == np.int32(1)\n assert df3[\"E\"][0].dtype == \"i4\"\n assert df3[\"F\"][0] == pd.Timestamp(\"1970-01-01 00:00:00\")\n assert df3[\"G\"][0] == pd.Timestamp(\"1970-01-01 00:00:00\", tz=\"America/New_York\")\n assert df3[\"H\"][0] == pd.Timedelta(\"1\")\n assert df3[\"I\"][0] == \"foo\"\n assert df3[\"J\"][0] == UNKNOWN_CATEGORIES\n assert len(df3[\"K\"].cat.categories) == 0\n\n s = meta_nonempty(df2[\"A\"])\n assert s.dtype == df2[\"A\"].dtype\n assert (df3[\"A\"] == s).all()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_meta_duplicated_test_meta_nonempty_empty_categories.for_dtype_in_O_f8_.assert_res_name_s_name": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_meta_duplicated_test_meta_nonempty_empty_categories.for_dtype_in_O_f8_.assert_res_name_s_name", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_utils_dataframe.py", "file_name": "test_utils_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 160, "end_line": 190, "span_ids": ["test_meta_nonempty_empty_categories", "test_meta_duplicated"], "tokens": 255}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_meta_duplicated():\n df = pd.DataFrame(columns=[\"A\", \"A\", \"B\"])\n res = meta_nonempty(df)\n\n exp = pd.DataFrame(\n [[\"foo\", \"foo\", \"foo\"], [\"foo\", \"foo\", \"foo\"]],\n index=[\"a\", \"b\"],\n columns=[\"A\", \"A\", \"B\"],\n )\n tm.assert_frame_equal(res, exp)\n\n\ndef test_meta_nonempty_empty_categories():\n for dtype in [\"O\", \"f8\", \"M8[ns]\"]:\n # Index\n idx = pd.CategoricalIndex(\n [], pd.Index([], dtype=dtype), ordered=True, name=\"foo\"\n )\n res = meta_nonempty(idx)\n assert type(res) is pd.CategoricalIndex\n assert type(res.categories) is type(idx.categories)\n assert res.ordered == idx.ordered\n assert res.name == idx.name\n # Series\n s = idx.to_series()\n res = meta_nonempty(s)\n assert res.dtype == \"category\"\n assert s.dtype == \"category\"\n assert type(res.cat.categories) is type(s.cat.categories)\n assert res.cat.ordered == s.cat.ordered\n assert res.name == s.name", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_meta_nonempty_index_test_meta_nonempty_index.None_26": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_meta_nonempty_index_test_meta_nonempty_index.None_26", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_utils_dataframe.py", "file_name": "test_utils_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 193, "end_line": 273, "span_ids": ["test_meta_nonempty_index"], "tokens": 784}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_meta_nonempty_index():\n idx = pd.RangeIndex(1, name=\"foo\")\n res = meta_nonempty(idx)\n assert type(res) is pd.RangeIndex\n assert res.name == idx.name\n\n idx = pd.Int64Index([1], name=\"foo\")\n res = meta_nonempty(idx)\n assert type(res) is pd.Int64Index\n assert res.name == idx.name\n\n idx = pd.Index([\"a\"], name=\"foo\")\n res = meta_nonempty(idx)\n assert type(res) is pd.Index\n assert res.name == idx.name\n\n idx = pd.DatetimeIndex([\"1970-01-01\"], freq=\"d\", tz=\"America/New_York\", name=\"foo\")\n res = meta_nonempty(idx)\n assert type(res) is pd.DatetimeIndex\n assert res.tz == idx.tz\n assert res.freq == idx.freq\n assert res.name == idx.name\n\n idx = pd.PeriodIndex([\"1970-01-01\"], freq=\"d\", name=\"foo\")\n res = meta_nonempty(idx)\n assert type(res) is pd.PeriodIndex\n assert res.freq == idx.freq\n assert res.name == idx.name\n\n idx = pd.TimedeltaIndex([np.timedelta64(1, \"D\")], freq=\"d\", name=\"foo\")\n res = meta_nonempty(idx)\n assert type(res) is pd.TimedeltaIndex\n assert res.freq == idx.freq\n assert res.name == idx.name\n\n idx = pd.CategoricalIndex([\"xyx\"], [\"xyx\", \"zzz\"], ordered=True, name=\"foo\")\n res = meta_nonempty(idx)\n assert type(res) is pd.CategoricalIndex\n assert (res.categories == idx.categories).all()\n assert res.ordered == idx.ordered\n assert res.name == idx.name\n\n idx = pd.CategoricalIndex([], [UNKNOWN_CATEGORIES], ordered=True, name=\"foo\")\n res = meta_nonempty(idx)\n assert type(res) is pd.CategoricalIndex\n assert res.ordered == idx.ordered\n assert res.name == idx.name\n\n levels = [pd.Int64Index([1], name=\"a\"), pd.Float64Index([1.0], name=\"b\")]\n codes = [[0], [0]]\n if PANDAS_GT_0240:\n kwargs = {\"codes\": codes}\n else:\n kwargs = {\"labels\": codes}\n idx = pd.MultiIndex(levels=levels, names=[\"a\", \"b\"], **kwargs)\n res = meta_nonempty(idx)\n assert type(res) is pd.MultiIndex\n for idx1, idx2 in zip(idx.levels, res.levels):\n assert type(idx1) is type(idx2)\n assert idx1.name == idx2.name\n assert res.names == idx.names\n\n levels = [\n pd.Int64Index([1], name=\"a\"),\n pd.CategoricalIndex(data=[\"xyx\"], categories=[\"xyx\"], name=\"b\"),\n pd.TimedeltaIndex([np.timedelta64(1, \"D\")], name=\"timedelta\"),\n ]\n\n codes = [[0], [0], [0]]\n if PANDAS_GT_0240:\n kwargs = {\"codes\": codes}\n else:\n kwargs = {\"labels\": codes}\n\n idx = pd.MultiIndex(levels=levels, names=[\"a\", \"b\", \"timedelta\"], **kwargs)\n res = meta_nonempty(idx)\n assert type(res) is pd.MultiIndex\n for idx1, idx2 in zip(idx.levels, res.levels):\n assert type(idx1) is type(idx2)\n assert idx1.name == idx2.name\n assert res.names == idx.names", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_meta_nonempty_uint64index_test_raise_on_meta_error.None_1.else_.assert_False_should_hav": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_meta_nonempty_uint64index_test_raise_on_meta_error.None_1.else_.assert_False_should_hav", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_utils_dataframe.py", "file_name": "test_utils_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 276, "end_line": 309, "span_ids": ["test_meta_nonempty_uint64index", "test_meta_nonempty_scalar", "test_raise_on_meta_error"], "tokens": 250}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_meta_nonempty_uint64index():\n idx = pd.UInt64Index([1], name=\"foo\")\n res = meta_nonempty(idx)\n assert type(res) is pd.UInt64Index\n assert res.name == idx.name\n\n\ndef test_meta_nonempty_scalar():\n meta = meta_nonempty(np.float64(1.0))\n assert isinstance(meta, np.float64)\n\n x = pd.Timestamp(2000, 1, 1)\n meta = meta_nonempty(x)\n assert meta is x\n\n\ndef test_raise_on_meta_error():\n try:\n with raise_on_meta_error():\n raise RuntimeError(\"Bad stuff\")\n except Exception as e:\n assert e.args[0].startswith(\"Metadata inference failed.\\n\")\n assert \"RuntimeError\" in e.args[0]\n else:\n assert False, \"should have errored\"\n\n try:\n with raise_on_meta_error(\"myfunc\"):\n raise RuntimeError(\"Bad stuff\")\n except Exception as e:\n assert e.args[0].startswith(\"Metadata inference failed in `myfunc`.\\n\")\n assert \"RuntimeError\" in e.args[0]\n else:\n assert False, \"should have errored\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_check_meta_test_check_meta.None_6": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_check_meta_test_check_meta.None_6", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_utils_dataframe.py", "file_name": "test_utils_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 312, "end_line": 370, "span_ids": ["test_check_meta"], "tokens": 575}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_check_meta():\n df = pd.DataFrame(\n {\n \"a\": [\"x\", \"y\", \"z\"],\n \"b\": [True, False, True],\n \"c\": [1, 2.5, 3.5],\n \"d\": [1, 2, 3],\n \"e\": pd.Categorical([\"x\", \"y\", \"z\"]),\n \"f\": pd.Series([1, 2, 3], dtype=np.uint64),\n }\n )\n meta = df.iloc[:0]\n\n # DataFrame metadata passthrough if correct\n assert check_meta(df, meta) is df\n # Series metadata passthrough if correct\n e = df.e\n assert check_meta(e, meta.e) is e\n # numeric_equal means floats and ints are equivalent\n d = df.d\n f = df.f\n assert check_meta(d, meta.d.astype(\"f8\"), numeric_equal=True) is d\n assert check_meta(f, meta.f.astype(\"f8\"), numeric_equal=True) is f\n assert check_meta(f, meta.f.astype(\"i8\"), numeric_equal=True) is f\n\n # Series metadata error\n with pytest.raises(ValueError) as err:\n check_meta(d, meta.d.astype(\"f8\"), numeric_equal=False)\n assert str(err.value) == (\n \"Metadata mismatch found.\\n\"\n \"\\n\"\n \"Partition type: `pandas.core.series.Series`\\n\"\n \"+----------+---------+\\n\"\n \"| | dtype |\\n\"\n \"+----------+---------+\\n\"\n \"| Found | int64 |\\n\"\n \"| Expected | float64 |\\n\"\n \"+----------+---------+\"\n )\n\n # DataFrame metadata error\n meta2 = meta.astype({\"a\": \"category\", \"d\": \"f8\"})[[\"a\", \"b\", \"c\", \"d\"]]\n df2 = df[[\"a\", \"b\", \"d\", \"e\"]]\n with pytest.raises(ValueError) as err:\n check_meta(df2, meta2, funcname=\"from_delayed\")\n\n exp = (\n \"Metadata mismatch found in `from_delayed`.\\n\"\n \"\\n\"\n \"Partition type: `pandas.core.frame.DataFrame`\\n\"\n \"+--------+----------+----------+\\n\"\n \"| Column | Found | Expected |\\n\"\n \"+--------+----------+----------+\\n\"\n \"| 'a' | object | category |\\n\"\n \"| 'c' | - | float64 |\\n\"\n \"| 'e' | category | - |\\n\"\n \"+--------+----------+----------+\"\n )\n assert str(err.value) == exp", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_check_matching_columns_raises_appropriate_errors_test_check_meta_typename.assert_pandas_in_str_in": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_check_matching_columns_raises_appropriate_errors_test_check_meta_typename.assert_pandas_in_str_in", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_utils_dataframe.py", "file_name": "test_utils_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 373, "end_line": 397, "span_ids": ["test_check_matching_columns_raises_appropriate_errors", "test_check_meta_typename"], "tokens": 226}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_check_matching_columns_raises_appropriate_errors():\n df = pd.DataFrame(columns=[\"a\", \"b\", \"c\"])\n\n meta = pd.DataFrame(columns=[\"b\", \"a\", \"c\"])\n with pytest.raises(ValueError, match=\"Order of columns does not match\"):\n assert check_matching_columns(meta, df)\n\n meta = pd.DataFrame(columns=[\"a\", \"b\", \"c\", \"d\"])\n with pytest.raises(ValueError, match=\"Missing: \\\\['d'\\\\]\"):\n assert check_matching_columns(meta, df)\n\n meta = pd.DataFrame(columns=[\"a\", \"b\"])\n with pytest.raises(ValueError, match=\"Extra: \\\\['c'\\\\]\"):\n assert check_matching_columns(meta, df)\n\n\ndef test_check_meta_typename():\n df = pd.DataFrame({\"x\": []})\n ddf = dd.from_pandas(df, npartitions=1)\n check_meta(df, df)\n with pytest.raises(Exception) as info:\n check_meta(ddf, df)\n\n assert \"dask\" in str(info.value)\n assert \"pandas\" in str(info.value)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_is_dataframe_like_test_is_dataframe_like.None_20": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_is_dataframe_like_test_is_dataframe_like.None_20", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_utils_dataframe.py", "file_name": "test_utils_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 400, "end_line": 432, "span_ids": ["test_is_dataframe_like"], "tokens": 294}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"frame_value_counts\", [True, False])\ndef test_is_dataframe_like(monkeypatch, frame_value_counts):\n # When we drop support for pandas 1.0, this compat check can\n # be dropped\n if frame_value_counts:\n monkeypatch.setattr(pd.DataFrame, \"value_counts\", lambda x: None, raising=False)\n\n df = pd.DataFrame({\"x\": [1, 2, 3]})\n ddf = dd.from_pandas(df, npartitions=1)\n\n assert is_dataframe_like(df)\n assert is_dataframe_like(ddf)\n assert not is_dataframe_like(df.x)\n assert not is_dataframe_like(ddf.x)\n assert not is_dataframe_like(df.index)\n assert not is_dataframe_like(ddf.index)\n assert not is_dataframe_like(pd.DataFrame)\n\n assert not is_series_like(df)\n assert not is_series_like(ddf)\n assert is_series_like(df.x)\n assert is_series_like(ddf.x)\n assert not is_series_like(df.index)\n assert not is_series_like(ddf.index)\n assert not is_series_like(pd.Series)\n\n assert not is_index_like(df)\n assert not is_index_like(ddf)\n assert not is_index_like(df.x)\n assert not is_index_like(ddf.x)\n assert is_index_like(df.index)\n assert is_index_like(ddf.index)\n assert not is_index_like(pd.Index)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_apply_and_enforce_message_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_apply_and_enforce_message_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_utils_dataframe.py", "file_name": "test_utils_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 435, "end_line": 454, "span_ids": ["test_nonempty_series_sparse", "test_apply_and_enforce_message"], "tokens": 182}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_apply_and_enforce_message():\n def func():\n return pd.DataFrame(columns=[\"A\", \"B\", \"C\"], index=[0])\n\n meta = pd.DataFrame(columns=[\"A\", \"D\"], index=[0])\n with pytest.raises(ValueError, match=\"Extra: *['B', 'C']\"):\n apply_and_enforce(_func=func, _meta=meta)\n\n with pytest.raises(ValueError, match=re.escape(\"Missing: ['D']\")):\n apply_and_enforce(_func=func, _meta=meta)\n\n\n@pytest.mark.skipif(not PANDAS_GT_100, reason=\"Only pandas>1\")\ndef test_nonempty_series_sparse():\n ser = pd.Series(pd.array([0, 1], dtype=\"Sparse\"))\n with pytest.warns(None) as w:\n dd.utils._nonempty_series(ser)\n\n assert len(w) == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/resample.py_pd__resample_series.return.out_reindex_new_index_fi": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/resample.py_pd__resample_series.return.out_reindex_new_index_fi", "embedding": null, "metadata": {"file_path": "dask/dataframe/tseries/resample.py", "file_name": "resample.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 56, "span_ids": ["imports", "getnanos", "_resample_series"], "tokens": 339}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pandas as pd\nimport numpy as np\nfrom pandas.core.resample import Resampler as pd_Resampler\n\nfrom ..core import DataFrame, Series\nfrom ...base import tokenize\nfrom ...utils import derived_from\nfrom ...highlevelgraph import HighLevelGraph\nfrom .._compat import PANDAS_GT_0240\nfrom .. import methods\n\n\ndef getnanos(rule):\n try:\n return getattr(rule, \"nanos\", None)\n except ValueError:\n return None\n\n\ndef _resample_series(\n series,\n start,\n end,\n reindex_closed,\n rule,\n resample_kwargs,\n how,\n fill_value,\n how_args,\n how_kwargs,\n):\n out = getattr(series.resample(rule, **resample_kwargs), how)(\n *how_args, **how_kwargs\n )\n if PANDAS_GT_0240:\n new_index = pd.date_range(\n start.tz_localize(None),\n end.tz_localize(None),\n freq=rule,\n closed=reindex_closed,\n name=out.index.name,\n ).tz_localize(start.tz, nonexistent=\"shift_forward\")\n\n else:\n new_index = pd.date_range(\n start, end, freq=rule, closed=reindex_closed, name=out.index.name\n )\n\n if not out.index.isin(new_index).all():\n raise ValueError(\n \"Index is not contained within new index. This can often be \"\n \"resolved by using larger partitions, or unambiguous \"\n \"frequencies: 'Q', 'A'...\"\n )\n\n return out.reindex(new_index, fill_value=fill_value)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/resample.py__resample_bin_and_out_divs__resample_bin_and_out_divs.return.tuple_map_pd_Timestamp_n": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/resample.py__resample_bin_and_out_divs__resample_bin_and_out_divs.return.tuple_map_pd_Timestamp_n", "embedding": null, "metadata": {"file_path": "dask/dataframe/tseries/resample.py", "file_name": "resample.py", "file_type": "text/x-python", "category": "implementation", "start_line": 59, "end_line": 96, "span_ids": ["_resample_bin_and_out_divs"], "tokens": 412}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _resample_bin_and_out_divs(divisions, rule, closed=\"left\", label=\"left\"):\n rule = pd.tseries.frequencies.to_offset(rule)\n g = pd.Grouper(freq=rule, how=\"count\", closed=closed, label=label)\n\n # Determine bins to apply `how` to. Disregard labeling scheme.\n divs = pd.Series(range(len(divisions)), index=divisions)\n temp = divs.resample(rule, closed=closed, label=\"left\").count()\n tempdivs = temp.loc[temp > 0].index\n\n # Cleanup closed == 'right' and label == 'right'\n res = pd.offsets.Nano() if hasattr(rule, \"delta\") else pd.offsets.Day()\n if g.closed == \"right\":\n newdivs = tempdivs + res\n else:\n newdivs = tempdivs\n if g.label == \"right\":\n outdivs = tempdivs + rule\n else:\n outdivs = tempdivs\n\n newdivs = methods.tolist(newdivs)\n outdivs = methods.tolist(outdivs)\n\n # Adjust ends\n if newdivs[0] < divisions[0]:\n newdivs[0] = divisions[0]\n if newdivs[-1] < divisions[-1]:\n if len(newdivs) < len(divs):\n setter = lambda a, val: a.append(val)\n else:\n setter = lambda a, val: a.__setitem__(-1, val)\n setter(newdivs, divisions[-1] + res)\n if outdivs[-1] > divisions[-1]:\n setter(outdivs, outdivs[-1])\n elif outdivs[-1] < divisions[-1]:\n setter(outdivs, temp.index[-1])\n\n return tuple(map(pd.Timestamp, newdivs)), tuple(map(pd.Timestamp, outdivs))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/resample.py_Resampler_Resampler.__init__.self._kwargs.kwargs": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/resample.py_Resampler_Resampler.__init__.self._kwargs.kwargs", "embedding": null, "metadata": {"file_path": "dask/dataframe/tseries/resample.py", "file_name": "resample.py", "file_type": "text/x-python", "category": "implementation", "start_line": 99, "end_line": 129, "span_ids": ["Resampler.__init__", "Resampler"], "tokens": 217}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Resampler(object):\n \"\"\"Class for resampling timeseries data.\n\n This class is commonly encountered when using ``obj.resample(...)`` which\n return ``Resampler`` objects.\n\n Parameters\n ----------\n obj : Dask DataFrame or Series\n Data to be resampled.\n rule : str, tuple, datetime.timedelta, DateOffset or None\n The offset string or object representing the target conversion.\n kwargs : optional\n Keyword arguments passed to underlying pandas resampling function.\n\n Returns\n -------\n Resampler instance of the appropriate type\n \"\"\"\n\n def __init__(self, obj, rule, **kwargs):\n if not obj.known_divisions:\n msg = (\n \"Can only resample dataframes with known divisions\\n\"\n \"See https://docs.dask.org/en/latest/dataframe-design.html#partitions\\n\"\n \"for more information.\"\n )\n raise ValueError(msg)\n self.obj = obj\n self._rule = pd.tseries.frequencies.to_offset(rule)\n self._kwargs = kwargs", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/resample.py_Resampler._agg_Resampler._agg.return.Series_graph_name_meta_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/resample.py_Resampler._agg_Resampler._agg.return.Series_graph_name_meta_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tseries/resample.py", "file_name": "resample.py", "file_type": "text/x-python", "category": "implementation", "start_line": 131, "end_line": 190, "span_ids": ["Resampler._agg"], "tokens": 454}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Resampler(object):\n\n def _agg(self, how, meta=None, fill_value=np.nan, how_args=(), how_kwargs={}):\n \"\"\"Aggregate using one or more operations\n\n Parameters\n ----------\n how : str\n Name of aggregation operation\n fill_value : scalar, optional\n Value to use for missing values, applied during upsampling.\n Default is NaN.\n how_args : optional\n Positional arguments for aggregation operation.\n how_kwargs : optional\n Keyword arguments for aggregation operation.\n\n Returns\n -------\n Dask DataFrame or Series\n \"\"\"\n rule = self._rule\n kwargs = self._kwargs\n name = \"resample-\" + tokenize(\n self.obj, rule, kwargs, how, *how_args, **how_kwargs\n )\n\n # Create a grouper to determine closed and label conventions\n newdivs, outdivs = _resample_bin_and_out_divs(\n self.obj.divisions, rule, **kwargs\n )\n\n # Repartition divs into bins. These won't match labels after mapping\n partitioned = self.obj.repartition(newdivs, force=True)\n\n keys = partitioned.__dask_keys__()\n dsk = {}\n\n args = zip(keys, outdivs, outdivs[1:], [\"left\"] * (len(keys) - 1) + [None])\n for i, (k, s, e, c) in enumerate(args):\n dsk[(name, i)] = (\n _resample_series,\n k,\n s,\n e,\n c,\n rule,\n kwargs,\n how,\n fill_value,\n list(how_args),\n how_kwargs,\n )\n\n # Infer output metadata\n meta_r = self.obj._meta_nonempty.resample(self._rule, **self._kwargs)\n meta = getattr(meta_r, how)(*how_args, **how_kwargs)\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[partitioned])\n if isinstance(meta, pd.DataFrame):\n return DataFrame(graph, name, meta, outdivs)\n return Series(graph, name, meta, outdivs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/resample.py_Resampler.agg_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/resample.py_Resampler.agg_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tseries/resample.py", "file_name": "resample.py", "file_type": "text/x-python", "category": "implementation", "start_line": 192, "end_line": 259, "span_ids": ["Resampler.sum", "Resampler.agg", "Resampler.min", "Resampler.std", "Resampler.quantile", "Resampler.size", "Resampler.count", "Resampler.max", "Resampler.ohlc", "Resampler.sem", "Resampler.var", "Resampler.mean", "Resampler.first", "Resampler.nunique", "Resampler.prod", "Resampler.last", "Resampler.median"], "tokens": 411}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Resampler(object):\n\n @derived_from(pd_Resampler)\n def agg(self, agg_funcs, *args, **kwargs):\n return self._agg(\"agg\", how_args=(agg_funcs,) + args, how_kwargs=kwargs)\n\n @derived_from(pd_Resampler)\n def count(self):\n return self._agg(\"count\", fill_value=0)\n\n @derived_from(pd_Resampler)\n def first(self):\n return self._agg(\"first\")\n\n @derived_from(pd_Resampler)\n def last(self):\n return self._agg(\"last\")\n\n @derived_from(pd_Resampler)\n def mean(self):\n return self._agg(\"mean\")\n\n @derived_from(pd_Resampler)\n def min(self):\n return self._agg(\"min\")\n\n @derived_from(pd_Resampler)\n def median(self):\n return self._agg(\"median\")\n\n @derived_from(pd_Resampler)\n def max(self):\n return self._agg(\"max\")\n\n @derived_from(pd_Resampler)\n def nunique(self):\n return self._agg(\"nunique\", fill_value=0)\n\n @derived_from(pd_Resampler)\n def ohlc(self):\n return self._agg(\"ohlc\")\n\n @derived_from(pd_Resampler)\n def prod(self):\n return self._agg(\"prod\")\n\n @derived_from(pd_Resampler)\n def sem(self):\n return self._agg(\"sem\")\n\n @derived_from(pd_Resampler)\n def std(self):\n return self._agg(\"std\")\n\n @derived_from(pd_Resampler)\n def size(self):\n return self._agg(\"size\", fill_value=0)\n\n @derived_from(pd_Resampler)\n def sum(self):\n return self._agg(\"sum\", fill_value=0)\n\n @derived_from(pd_Resampler)\n def var(self):\n return self._agg(\"var\")\n\n @derived_from(pd_Resampler)\n def quantile(self):\n return self._agg(\"quantile\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_from_itertools_import_pro_test_series_resample.assert_expected_index_1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_from_itertools_import_pro_test_series_resample.assert_expected_index_1_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tseries/tests/test_resample.py", "file_name": "test_resample.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 50, "span_ids": ["imports", "test_series_resample", "resample"], "tokens": 415}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from itertools import product\n\nimport pandas as pd\nimport pytest\n\nfrom dask.dataframe.utils import assert_eq, PANDAS_VERSION\nfrom dask.dataframe._compat import PANDAS_GT_0240\nimport dask.dataframe as dd\n\nCHECK_FREQ = {}\nif dd._compat.PANDAS_GT_110:\n CHECK_FREQ[\"check_freq\"] = False\n\n\ndef resample(df, freq, how=\"mean\", **kwargs):\n return getattr(df.resample(freq, **kwargs), how)()\n\n\n@pytest.mark.parametrize(\n [\"obj\", \"method\", \"npartitions\", \"freq\", \"closed\", \"label\"],\n list(\n product(\n [\"series\", \"frame\"],\n [\"count\", \"mean\", \"ohlc\"],\n [2, 5],\n [\"30T\", \"h\", \"d\", \"w\", \"M\"],\n [\"right\", \"left\"],\n [\"right\", \"left\"],\n )\n ),\n)\ndef test_series_resample(obj, method, npartitions, freq, closed, label):\n index = pd.date_range(\"1-1-2000\", \"2-15-2000\", freq=\"h\")\n index = index.union(pd.date_range(\"4-15-2000\", \"5-15-2000\", freq=\"h\"))\n if obj == \"series\":\n ps = pd.Series(range(len(index)), index=index)\n elif obj == \"frame\":\n ps = pd.DataFrame({\"a\": range(len(index))}, index=index)\n ds = dd.from_pandas(ps, npartitions=npartitions)\n # Series output\n\n result = resample(ds, freq, how=method, closed=closed, label=label)\n expected = resample(ps, freq, how=method, closed=closed, label=label)\n\n assert_eq(result, expected, check_dtype=False)\n\n divisions = result.divisions\n\n assert expected.index[0] == divisions[0]\n assert expected.index[-1] == divisions[-1]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_test_resample_has_correct_fill_value_test_resample_has_correct_fill_value.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_test_resample_has_correct_fill_value_test_resample_has_correct_fill_value.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tseries/tests/test_resample.py", "file_name": "test_resample.py", "file_type": "text/x-python", "category": "test", "start_line": 53, "end_line": 62, "span_ids": ["test_resample_has_correct_fill_value"], "tokens": 139}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"method\", [\"count\", \"nunique\", \"size\", \"sum\"])\ndef test_resample_has_correct_fill_value(method):\n index = pd.date_range(\"2000-01-01\", \"2000-02-15\", freq=\"h\")\n index = index.union(pd.date_range(\"4-15-2000\", \"5-15-2000\", freq=\"h\"))\n ps = pd.Series(range(len(index)), index=index)\n ds = dd.from_pandas(ps, npartitions=2)\n\n assert_eq(\n getattr(ds.resample(\"30min\"), method)(), getattr(ps.resample(\"30min\"), method)()\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_test_resample_agg_test_resample_agg.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_test_resample_agg_test_resample_agg.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tseries/tests/test_resample.py", "file_name": "test_resample.py", "file_type": "text/x-python", "category": "test", "start_line": 65, "end_line": 74, "span_ids": ["test_resample_agg"], "tokens": 120}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_resample_agg():\n index = pd.date_range(\"2000-01-01\", \"2000-02-15\", freq=\"h\")\n ps = pd.Series(range(len(index)), index=index)\n ds = dd.from_pandas(ps, npartitions=2)\n\n assert_eq(ds.resample(\"10min\").agg(\"mean\"), ps.resample(\"10min\").agg(\"mean\"))\n assert_eq(\n ds.resample(\"10min\").agg([\"mean\", \"min\"]),\n ps.resample(\"10min\").agg([\"mean\", \"min\"]),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_test_resample_agg_passes_kwargs_test_resample_agg_passes_kwargs.assert_ds_resample_2h_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_test_resample_agg_passes_kwargs_test_resample_agg_passes_kwargs.assert_ds_resample_2h_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tseries/tests/test_resample.py", "file_name": "test_resample.py", "file_type": "text/x-python", "category": "test", "start_line": 77, "end_line": 86, "span_ids": ["test_resample_agg_passes_kwargs"], "tokens": 137}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_resample_agg_passes_kwargs():\n index = pd.date_range(\"2000-01-01\", \"2000-02-15\", freq=\"h\")\n ps = pd.Series(range(len(index)), index=index)\n ds = dd.from_pandas(ps, npartitions=2)\n\n def foo(series, bar=1, *args, **kwargs):\n return bar\n\n assert_eq(ds.resample(\"2h\").agg(foo, bar=2), ps.resample(\"2h\").agg(foo, bar=2))\n assert (ds.resample(\"2h\").agg(foo, bar=2) == 2).compute().all()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_test_resample_throws_error_when_parition_index_does_not_match_index_test_resample_throws_error_when_parition_index_does_not_match_index.with_pytest_raises_ValueE.ds_resample_2M_count_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_test_resample_throws_error_when_parition_index_does_not_match_index_test_resample_throws_error_when_parition_index_does_not_match_index.with_pytest_raises_ValueE.ds_resample_2M_count_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tseries/tests/test_resample.py", "file_name": "test_resample.py", "file_type": "text/x-python", "category": "test", "start_line": 89, "end_line": 95, "span_ids": ["test_resample_throws_error_when_parition_index_does_not_match_index"], "tokens": 126}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_resample_throws_error_when_parition_index_does_not_match_index():\n index = pd.date_range(\"1-1-2000\", \"2-15-2000\", freq=\"D\")\n index = index.union(pd.date_range(\"4-15-2000\", \"5-15-2000\", freq=\"D\"))\n ps = pd.Series(range(len(index)), index=index)\n ds = dd.from_pandas(ps, npartitions=5)\n with pytest.raises(ValueError, match=\"Index is not contained within new index.\"):\n ds.resample(\"2M\").count().compute()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_test_resample_pads_last_division_to_avoid_off_by_one_test_resample_pads_last_division_to_avoid_off_by_one.assert_eq_actual_expecte": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_test_resample_pads_last_division_to_avoid_off_by_one_test_resample_pads_last_division_to_avoid_off_by_one.assert_eq_actual_expecte", "embedding": null, "metadata": {"file_path": "dask/dataframe/tseries/tests/test_resample.py", "file_name": "test_resample.py", "file_type": "text/x-python", "category": "test", "start_line": 98, "end_line": 132, "span_ids": ["test_resample_pads_last_division_to_avoid_off_by_one"], "tokens": 361}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_resample_pads_last_division_to_avoid_off_by_one():\n # https://github.com/dask/dask/issues/6230\n times = [\n 1545362463409128000,\n 1545362504369352000,\n 1545362545326966000,\n 1545363118769636000,\n 1545363159726490000,\n 1545363200687178000,\n 1545363241648824000,\n 1573318190393973000,\n 1573318231353350000,\n 1573318272313774000,\n 1573318313275299000,\n 1573318354233962000,\n 1573318395195456000,\n 1573318436154609000,\n 1580687544437145000,\n 1580687585394881000,\n 1580687667316809000,\n 1580687708275414000,\n 1580687790195742000,\n 1580687831154951000,\n 1580687872115363000,\n 1580687954035133000,\n 1559127673402811000,\n ]\n\n df = pd.DataFrame({\"Time\": times, \"Counts\": range(len(times))})\n df[\"Time\"] = pd.to_datetime(df[\"Time\"], utc=True)\n expected = df.set_index(\"Time\").resample(\"1Q\").size()\n\n ddf = dd.from_pandas(df, npartitions=2).set_index(\"Time\")\n actual = ddf.resample(\"1Q\").size().compute()\n assert_eq(actual, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_test_resample_does_not_evenly_divide_day_test_resample_does_not_evenly_divide_day.assert_eq_result_expecte": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_test_resample_does_not_evenly_divide_day_test_resample_does_not_evenly_divide_day.assert_eq_result_expecte", "embedding": null, "metadata": {"file_path": "dask/dataframe/tseries/tests/test_resample.py", "file_name": "test_resample.py", "file_type": "text/x-python", "category": "test", "start_line": 135, "end_line": 146, "span_ids": ["test_resample_does_not_evenly_divide_day"], "tokens": 149}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_resample_does_not_evenly_divide_day():\n import numpy as np\n\n index = pd.date_range(\"2012-01-02\", \"2012-02-02\", freq=\"H\")\n index = index.union(pd.date_range(\"2012-03-02\", \"2012-04-02\", freq=\"H\"))\n df = pd.DataFrame({\"p\": np.random.random(len(index))}, index=index)\n ddf = dd.from_pandas(df, npartitions=5)\n # Frequency doesn't evenly divide day\n expected = df.resample(\"2D\").count()\n result = ddf.resample(\"2D\").count().compute()\n\n assert_eq(result, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_test_series_resample_does_not_evenly_divide_day_test_series_resample_does_not_evenly_divide_day.assert_eq_result_expecte": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_test_series_resample_does_not_evenly_divide_day_test_series_resample_does_not_evenly_divide_day.assert_eq_result_expecte", "embedding": null, "metadata": {"file_path": "dask/dataframe/tseries/tests/test_resample.py", "file_name": "test_resample.py", "file_type": "text/x-python", "category": "test", "start_line": 149, "end_line": 160, "span_ids": ["test_series_resample_does_not_evenly_divide_day"], "tokens": 164}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_series_resample_does_not_evenly_divide_day():\n index = pd.date_range(\"2012-01-02 00:00:00\", \"2012-01-02 01:00:00\", freq=\"T\")\n index = index.union(\n pd.date_range(\"2012-01-02 06:00:00\", \"2012-01-02 08:00:00\", freq=\"T\")\n )\n s = pd.Series(range(len(index)), index=index)\n ds = dd.from_pandas(s, npartitions=5)\n # Frequency doesn't evenly divide day\n expected = s.resample(\"57T\").mean()\n result = ds.resample(\"57T\").mean().compute()\n\n assert_eq(result, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_test_unknown_divisions_error_test_resample_index_name.assert_ddf_resample_D_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_test_unknown_divisions_error_test_resample_index_name.assert_ddf_resample_D_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tseries/tests/test_resample.py", "file_name": "test_resample.py", "file_type": "text/x-python", "category": "test", "start_line": 163, "end_line": 186, "span_ids": ["test_unknown_divisions_error", "test_resample_index_name"], "tokens": 202}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_unknown_divisions_error():\n df = pd.DataFrame({\"x\": [1, 2, 3]})\n ddf = dd.from_pandas(df, npartitions=2, sort=False)\n try:\n ddf.x.resample(\"1m\").mean()\n assert False\n except ValueError as e:\n assert \"divisions\" in str(e)\n\n\ndef test_resample_index_name():\n import numpy as np\n from datetime import datetime, timedelta\n\n date_today = datetime.now()\n days = pd.date_range(date_today, date_today + timedelta(20), freq=\"D\")\n data = np.random.randint(1, high=100, size=len(days))\n\n df = pd.DataFrame({\"date\": days, \"values\": data})\n df = df.set_index(\"date\")\n\n ddf = dd.from_pandas(df, npartitions=4)\n\n assert ddf.resample(\"D\").mean().head().index.name == \"date\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_test_series_resample_non_existent_datetime_test_series_resample_non_existent_datetime.assert_eq_result_expecte": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_test_series_resample_non_existent_datetime_test_series_resample_non_existent_datetime.assert_eq_result_expecte", "embedding": null, "metadata": {"file_path": "dask/dataframe/tseries/tests/test_resample.py", "file_name": "test_resample.py", "file_type": "text/x-python", "category": "test", "start_line": 189, "end_line": 202, "span_ids": ["test_series_resample_non_existent_datetime"], "tokens": 180}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(not PANDAS_GT_0240, reason=\"nonexistent not in 0.23 or older\")\ndef test_series_resample_non_existent_datetime():\n index = [\n pd.Timestamp(\"2016-10-15 00:00:00\"),\n pd.Timestamp(\"2016-10-16 10:00:00\"),\n pd.Timestamp(\"2016-10-17 00:00:00\"),\n ]\n df = pd.DataFrame([[1], [2], [3]], index=index)\n df.index = df.index.tz_localize(\"America/Sao_Paulo\")\n ddf = dd.from_pandas(df, npartitions=1)\n result = ddf.resample(\"1D\").mean()\n expected = df.resample(\"1D\").mean()\n\n assert_eq(result, expected, **CHECK_FREQ)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_test_common_aggs_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_test_common_aggs_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tseries/tests/test_resample.py", "file_name": "test_resample.py", "file_type": "text/x-python", "category": "test", "start_line": 205, "end_line": 218, "span_ids": ["test_common_aggs"], "tokens": 156}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(PANDAS_VERSION <= \"0.23.4\", reason=\"quantile not in 0.23\")\n@pytest.mark.parametrize(\"agg\", [\"nunique\", \"mean\", \"count\", \"size\", \"quantile\"])\ndef test_common_aggs(agg):\n index = pd.date_range(\"2000-01-01\", \"2000-02-15\", freq=\"h\")\n ps = pd.Series(range(len(index)), index=index)\n ds = dd.from_pandas(ps, npartitions=2)\n\n f = lambda df: getattr(df, agg)()\n\n res = f(ps.resample(\"1d\"))\n expected = f(ds.resample(\"1d\"))\n\n assert_eq(res, expected, check_dtype=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_math_is_integer_na_dtype.return.isinstance_dtype_types_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_math_is_integer_na_dtype.return.isinstance_dtype_types_", "embedding": null, "metadata": {"file_path": "dask/dataframe/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 62, "span_ids": ["imports", "is_integer_na_dtype"], "tokens": 369}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import math\nimport numbers\nimport re\nimport textwrap\nfrom collections.abc import Iterator, Mapping\n\nimport sys\nimport traceback\nfrom contextlib import contextmanager\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.api.types import (\n is_categorical_dtype,\n is_scalar,\n is_sparse,\n is_period_dtype,\n is_datetime64tz_dtype,\n is_interval_dtype,\n)\n\n# include these here for compat\nfrom ._compat import ( # noqa: F401\n PANDAS_VERSION,\n PANDAS_GT_0240,\n PANDAS_GT_0250,\n PANDAS_GT_100,\n PANDAS_GT_110,\n HAS_INT_NA,\n tm,\n)\n\nfrom .extensions import make_array_nonempty, make_scalar\nfrom ..base import is_dask_collection\nfrom ..core import get_deps\nfrom ..local import get_sync\nfrom ..utils import asciitable, is_arraylike, Dispatch, typename\nfrom ..utils import is_dataframe_like as dask_is_dataframe_like\nfrom ..utils import is_series_like as dask_is_series_like\nfrom ..utils import is_index_like as dask_is_index_like\n\n# register pandas extension types\nfrom . import _dtypes # noqa: F401\nfrom . import methods\n\n\ndef is_integer_na_dtype(t):\n dtype = getattr(t, \"dtype\", t)\n if HAS_INT_NA:\n types = (\n pd.Int8Dtype,\n pd.Int16Dtype,\n pd.Int32Dtype,\n pd.Int64Dtype,\n pd.UInt8Dtype,\n pd.UInt16Dtype,\n pd.UInt32Dtype,\n pd.UInt64Dtype,\n )\n else:\n types = ()\n return isinstance(dtype, types)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_shard_df_on_index_shard_df_on_index.if_not_len_divisions_.else_.yield_df_iloc_indices_1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_shard_df_on_index_shard_df_on_index.if_not_len_divisions_.else_.yield_df_iloc_indices_1_", "embedding": null, "metadata": {"file_path": "dask/dataframe/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 65, "end_line": 118, "span_ids": ["shard_df_on_index"], "tokens": 423}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def shard_df_on_index(df, divisions):\n \"\"\"Shard a DataFrame by ranges on its index\n\n Examples\n --------\n\n >>> df = pd.DataFrame({'a': [0, 10, 20, 30, 40], 'b': [5, 4 ,3, 2, 1]})\n >>> df\n a b\n 0 0 5\n 1 10 4\n 2 20 3\n 3 30 2\n 4 40 1\n\n >>> shards = list(shard_df_on_index(df, [2, 4]))\n >>> shards[0]\n a b\n 0 0 5\n 1 10 4\n\n >>> shards[1]\n a b\n 2 20 3\n 3 30 2\n\n >>> shards[2]\n a b\n 4 40 1\n\n >>> list(shard_df_on_index(df, []))[0] # empty case\n a b\n 0 0 5\n 1 10 4\n 2 20 3\n 3 30 2\n 4 40 1\n \"\"\"\n\n if isinstance(divisions, Iterator):\n divisions = list(divisions)\n if not len(divisions):\n yield df\n else:\n divisions = np.array(divisions)\n df = df.sort_index()\n index = df.index\n if is_categorical_dtype(index):\n index = index.as_ordered()\n indices = index.searchsorted(divisions)\n yield df.iloc[: indices[0]]\n for i in range(len(indices) - 1):\n yield df.iloc[indices[i] : indices[i + 1]]\n yield df.iloc[indices[-1] :]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py__META_TYPES__META_DESCRIPTION._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py__META_TYPES__META_DESCRIPTION._", "embedding": null, "metadata": {"file_path": "dask/dataframe/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 121, "end_line": 133, "span_ids": ["impl:4"], "tokens": 191}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "_META_TYPES = \"meta : pd.DataFrame, pd.Series, dict, iterable, tuple, optional\"\n_META_DESCRIPTION = \"\"\"\\\nAn empty ``pd.DataFrame`` or ``pd.Series`` that matches the dtypes and\ncolumn names of the output. This metadata is necessary for many algorithms\nin dask dataframe to work. For ease of use, some alternative inputs are\nalso available. Instead of a ``DataFrame``, a ``dict`` of ``{name: dtype}``\nor iterable of ``(name, dtype)`` can be provided (note that the order of\nthe names should match the order of the columns). Instead of a series, a\ntuple of ``(name, dtype)`` can be used. If not provided, dask will try to\ninfer the metadata. This may lead to unexpected results, so providing\n``meta`` is recommended. For more information, see\n``dask.dataframe.utils.make_meta``.\n\"\"\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_insert_meta_param_description_insert_meta_param_description.return.f": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_insert_meta_param_description_insert_meta_param_description.return.f", "embedding": null, "metadata": {"file_path": "dask/dataframe/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 136, "end_line": 160, "span_ids": ["insert_meta_param_description"], "tokens": 282}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def insert_meta_param_description(*args, **kwargs):\n \"\"\"Replace `$META` in docstring with param description.\n\n If pad keyword is provided, will pad description by that number of\n spaces (default is 8).\"\"\"\n if not args:\n return lambda f: insert_meta_param_description(f, **kwargs)\n f = args[0]\n indent = \" \" * kwargs.get(\"pad\", 8)\n body = textwrap.wrap(\n _META_DESCRIPTION, initial_indent=indent, subsequent_indent=indent, width=78\n )\n descr = \"{0}\\n{1}\".format(_META_TYPES, \"\\n\".join(body))\n if f.__doc__:\n if \"$META\" in f.__doc__:\n f.__doc__ = f.__doc__.replace(\"$META\", descr)\n else:\n # Put it at the end of the parameters section\n parameter_header = \"Parameters\\n%s----------\" % indent[4:]\n first, last = re.split(\"Parameters\\\\n[ ]*----------\", f.__doc__)\n parameters, rest = last.split(\"\\n\\n\", 1)\n f.__doc__ = \"{0}{1}{2}\\n{3}{4}\\n\\n{5}\".format(\n first, parameter_header, parameters, indent[4:], descr, rest\n )\n return f", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_raise_on_meta_error_raise_on_meta_error.try_.except_Exception_as_e_.raise_ValueError_msg_fro": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_raise_on_meta_error_raise_on_meta_error.try_.except_Exception_as_e_.raise_ValueError_msg_fro", "embedding": null, "metadata": {"file_path": "dask/dataframe/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 163, "end_line": 195, "span_ids": ["raise_on_meta_error"], "tokens": 281}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@contextmanager\ndef raise_on_meta_error(funcname=None, udf=False):\n \"\"\"Reraise errors in this block to show metadata inference failure.\n\n Parameters\n ----------\n funcname : str, optional\n If provided, will be added to the error message to indicate the\n name of the method that failed.\n \"\"\"\n try:\n yield\n except Exception as e:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n tb = \"\".join(traceback.format_tb(exc_traceback))\n msg = \"Metadata inference failed{0}.\\n\\n\"\n if udf:\n msg += (\n \"You have supplied a custom function and Dask is unable to \\n\"\n \"determine the type of output that that function returns. \\n\\n\"\n \"To resolve this please provide a meta= keyword.\\n\"\n \"The docstring of the Dask function you ran should have more information.\\n\\n\"\n )\n msg += (\n \"Original error is below:\\n\"\n \"------------------------\\n\"\n \"{1}\\n\\n\"\n \"Traceback:\\n\"\n \"---------\\n\"\n \"{2}\"\n )\n msg = msg.format(\" in `{0}`\".format(funcname) if funcname else \"\", repr(e), tb)\n raise ValueError(msg) from e", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_UNKNOWN_CATEGORIES_has_known_categories.raise_TypeError_Expected": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_UNKNOWN_CATEGORIES_has_known_categories.raise_TypeError_Expected", "embedding": null, "metadata": {"file_path": "dask/dataframe/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 198, "end_line": 213, "span_ids": ["has_known_categories", "impl:8"], "tokens": 109}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "UNKNOWN_CATEGORIES = \"__UNKNOWN_CATEGORIES__\"\n\n\ndef has_known_categories(x):\n \"\"\"Returns whether the categories in `x` are known.\n\n Parameters\n ----------\n x : Series or CategoricalIndex\n \"\"\"\n x = getattr(x, \"_meta\", x)\n if is_series_like(x):\n return UNKNOWN_CATEGORIES not in x.cat.categories\n elif is_index_like(x) and hasattr(x, \"categories\"):\n return UNKNOWN_CATEGORIES not in x.categories\n raise TypeError(\"Expected Series or CategoricalIndex\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_strip_unknown_categories_strip_unknown_categories.return.x": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_strip_unknown_categories_strip_unknown_categories.return.x", "embedding": null, "metadata": {"file_path": "dask/dataframe/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 216, "end_line": 242, "span_ids": ["strip_unknown_categories"], "tokens": 234}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def strip_unknown_categories(x, just_drop_unknown=False):\n \"\"\"Replace any unknown categoricals with empty categoricals.\n\n Useful for preventing ``UNKNOWN_CATEGORIES`` from leaking into results.\n \"\"\"\n if isinstance(x, (pd.Series, pd.DataFrame)):\n x = x.copy()\n if isinstance(x, pd.DataFrame):\n cat_mask = x.dtypes == \"category\"\n if cat_mask.any():\n cats = cat_mask[cat_mask].index\n for c in cats:\n if not has_known_categories(x[c]):\n if just_drop_unknown:\n x[c].cat.remove_categories(UNKNOWN_CATEGORIES, inplace=True)\n else:\n x[c].cat.set_categories([], inplace=True)\n elif isinstance(x, pd.Series):\n if is_categorical_dtype(x.dtype) and not has_known_categories(x):\n x.cat.set_categories([], inplace=True)\n if isinstance(x.index, pd.CategoricalIndex) and not has_known_categories(\n x.index\n ):\n x.index = x.index.set_categories([])\n elif isinstance(x, pd.CategoricalIndex) and not has_known_categories(x):\n x = x.set_categories([])\n return x", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_clear_known_categories_clear_known_categories.return.x": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_clear_known_categories_clear_known_categories.return.x", "embedding": null, "metadata": {"file_path": "dask/dataframe/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 245, "end_line": 275, "span_ids": ["clear_known_categories"], "tokens": 276}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def clear_known_categories(x, cols=None, index=True):\n \"\"\"Set categories to be unknown.\n\n Parameters\n ----------\n x : DataFrame, Series, Index\n cols : iterable, optional\n If x is a DataFrame, set only categoricals in these columns to unknown.\n By default, all categorical columns are set to unknown categoricals\n index : bool, optional\n If True and x is a Series or DataFrame, set the clear known categories\n in the index as well.\n \"\"\"\n if isinstance(x, (pd.Series, pd.DataFrame)):\n x = x.copy()\n if isinstance(x, pd.DataFrame):\n mask = x.dtypes == \"category\"\n if cols is None:\n cols = mask[mask].index\n elif not mask.loc[cols].all():\n raise ValueError(\"Not all columns are categoricals\")\n for c in cols:\n x[c].cat.set_categories([UNKNOWN_CATEGORIES], inplace=True)\n elif isinstance(x, pd.Series):\n if is_categorical_dtype(x.dtype):\n x.cat.set_categories([UNKNOWN_CATEGORIES], inplace=True)\n if index and isinstance(x.index, pd.CategoricalIndex):\n x.index = x.index.set_categories([UNKNOWN_CATEGORIES])\n elif isinstance(x, pd.CategoricalIndex):\n x = x.set_categories([UNKNOWN_CATEGORIES])\n return x", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py__empty_series_make_meta_index.return.x_0_0_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py__empty_series_make_meta_index.return.x_0_0_", "embedding": null, "metadata": {"file_path": "dask/dataframe/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 278, "end_line": 296, "span_ids": ["make_meta_pandas", "impl:10", "_empty_series", "make_meta_index"], "tokens": 125}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _empty_series(name, dtype, index=None):\n if isinstance(dtype, str) and dtype == \"category\":\n return pd.Series(\n pd.Categorical([UNKNOWN_CATEGORIES]), name=name, index=index\n ).iloc[:0]\n return pd.Series([], dtype=dtype, name=name, index=index)\n\n\nmake_meta = Dispatch(\"make_meta\")\n\n\n@make_meta.register((pd.Series, pd.DataFrame))\ndef make_meta_pandas(x, index=None):\n return x.iloc[:0]\n\n\n@make_meta.register(pd.Index)\ndef make_meta_index(x, index=None):\n return x[0:0]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_make_meta_object_make_meta_object.raise_TypeError_Don_t_kn": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_make_meta_object_make_meta_object.raise_TypeError_Don_t_kn", "embedding": null, "metadata": {"file_path": "dask/dataframe/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 299, "end_line": 365, "span_ids": ["make_meta_object"], "tokens": 620}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@make_meta.register(object)\ndef make_meta_object(x, index=None):\n \"\"\"Create an empty pandas object containing the desired metadata.\n\n Parameters\n ----------\n x : dict, tuple, list, pd.Series, pd.DataFrame, pd.Index, dtype, scalar\n To create a DataFrame, provide a `dict` mapping of `{name: dtype}`, or\n an iterable of `(name, dtype)` tuples. To create a `Series`, provide a\n tuple of `(name, dtype)`. If a pandas object, names, dtypes, and index\n should match the desired output. If a dtype or scalar, a scalar of the\n same dtype is returned.\n index : pd.Index, optional\n Any pandas index to use in the metadata. If none provided, a\n `RangeIndex` will be used.\n\n Examples\n --------\n\n >>> make_meta([('a', 'i8'), ('b', 'O')]) # doctest: +SKIP\n Empty DataFrame\n Columns: [a, b]\n Index: []\n >>> make_meta(('a', 'f8')) # doctest: +SKIP\n Series([], Name: a, dtype: float64)\n >>> make_meta('i8') # doctest: +SKIP\n 1\n \"\"\"\n if hasattr(x, \"_meta\"):\n return x._meta\n elif is_arraylike(x) and x.shape:\n return x[:0]\n\n if index is not None:\n index = make_meta(index)\n\n if isinstance(x, dict):\n return pd.DataFrame(\n {c: _empty_series(c, d, index=index) for (c, d) in x.items()}, index=index\n )\n if isinstance(x, tuple) and len(x) == 2:\n return _empty_series(x[0], x[1], index=index)\n elif isinstance(x, (list, tuple)):\n if not all(isinstance(i, tuple) and len(i) == 2 for i in x):\n raise ValueError(\n \"Expected iterable of tuples of (name, dtype), got {0}\".format(x)\n )\n return pd.DataFrame(\n {c: _empty_series(c, d, index=index) for (c, d) in x},\n columns=[c for c, d in x],\n index=index,\n )\n elif not hasattr(x, \"dtype\") and x is not None:\n # could be a string, a dtype object, or a python type. Skip `None`,\n # because it is implictly converted to `dtype('f8')`, which we don't\n # want here.\n try:\n dtype = np.dtype(x)\n return _scalar_from_dtype(dtype)\n except Exception:\n # Continue on to next check\n pass\n\n if is_scalar(x):\n return _nonempty_scalar(x)\n\n raise TypeError(\"Don't know how to create metadata from {0}\".format(x))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py__numeric_index_types_meta_nonempty_object.if_is_scalar_x_.else_.raise_TypeError_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py__numeric_index_types_meta_nonempty_object.if_is_scalar_x_.else_.raise_TypeError_", "embedding": null, "metadata": {"file_path": "dask/dataframe/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 367, "end_line": 385, "span_ids": ["meta_nonempty_object", "impl:12"], "tokens": 130}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "_numeric_index_types = (pd.Int64Index, pd.Float64Index, pd.UInt64Index)\n\nmeta_nonempty = Dispatch(\"meta_nonempty\")\n\n\n@meta_nonempty.register(object)\ndef meta_nonempty_object(x):\n \"\"\"Create a nonempty pandas object from the given metadata.\n\n Returns a pandas DataFrame, Series, or Index that contains two rows\n of fake data.\n \"\"\"\n if is_scalar(x):\n return _nonempty_scalar(x)\n else:\n raise TypeError(\n \"Expected Pandas-like Index, Series, DataFrame, or scalar, \"\n \"got {0}\".format(typename(type(x)))\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_meta_nonempty_dataframe_meta_nonempty_dataframe.return.res": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_meta_nonempty_dataframe_meta_nonempty_dataframe.return.res", "embedding": null, "metadata": {"file_path": "dask/dataframe/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 389, "end_line": 404, "span_ids": ["meta_nonempty_dataframe"], "tokens": 145}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@meta_nonempty.register(pd.DataFrame)\ndef meta_nonempty_dataframe(x):\n idx = meta_nonempty(x.index)\n dt_s_dict = dict()\n data = dict()\n for i, c in enumerate(x.columns):\n series = x.iloc[:, i]\n dt = series.dtype\n if dt not in dt_s_dict:\n dt_s_dict[dt] = _nonempty_series(x.iloc[:, i], idx=idx)\n data[i] = dt_s_dict[dt]\n res = pd.DataFrame(data, index=idx, columns=np.arange(len(x.columns)))\n res.columns = x.columns\n if PANDAS_GT_100:\n res.attrs = x.attrs\n return res", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py__nonempty_index__nonempty_index.raise_TypeError_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py__nonempty_index__nonempty_index.raise_TypeError_", "embedding": null, "metadata": {"file_path": "dask/dataframe/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 404, "end_line": 463, "span_ids": ["_nonempty_index"], "tokens": 654}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@meta_nonempty.register(pd.Index)\ndef _nonempty_index(idx):\n typ = type(idx)\n if typ is pd.RangeIndex:\n return pd.RangeIndex(2, name=idx.name)\n elif typ in _numeric_index_types:\n return typ([1, 2], name=idx.name)\n elif typ is pd.Index:\n return pd.Index([\"a\", \"b\"], name=idx.name)\n elif typ is pd.DatetimeIndex:\n start = \"1970-01-01\"\n # Need a non-monotonic decreasing index to avoid issues with\n # partial string indexing see https://github.com/dask/dask/issues/2389\n # and https://github.com/pandas-dev/pandas/issues/16515\n # This doesn't mean `_meta_nonempty` should ever rely on\n # `self.monotonic_increasing` or `self.monotonic_decreasing`\n try:\n return pd.date_range(\n start=start, periods=2, freq=idx.freq, tz=idx.tz, name=idx.name\n )\n except ValueError: # older pandas versions\n data = [start, \"1970-01-02\"] if idx.freq is None else None\n return pd.DatetimeIndex(\n data, start=start, periods=2, freq=idx.freq, tz=idx.tz, name=idx.name\n )\n elif typ is pd.PeriodIndex:\n return pd.period_range(\n start=\"1970-01-01\", periods=2, freq=idx.freq, name=idx.name\n )\n elif typ is pd.TimedeltaIndex:\n start = np.timedelta64(1, \"D\")\n try:\n return pd.timedelta_range(\n start=start, periods=2, freq=idx.freq, name=idx.name\n )\n except ValueError: # older pandas versions\n start = np.timedelta64(1, \"D\")\n data = [start, start + 1] if idx.freq is None else None\n return pd.TimedeltaIndex(\n data, start=start, periods=2, freq=idx.freq, name=idx.name\n )\n elif typ is pd.CategoricalIndex:\n if len(idx.categories) == 0:\n data = pd.Categorical(_nonempty_index(idx.categories), ordered=idx.ordered)\n else:\n data = pd.Categorical.from_codes(\n [-1, 0], categories=idx.categories, ordered=idx.ordered\n )\n return pd.CategoricalIndex(data, name=idx.name)\n elif typ is pd.MultiIndex:\n levels = [_nonempty_index(l) for l in idx.levels]\n codes = [[0, 0] for i in idx.levels]\n try:\n return pd.MultiIndex(levels=levels, codes=codes, names=idx.names)\n except TypeError: # older pandas versions\n return pd.MultiIndex(levels=levels, labels=codes, names=idx.names)\n\n raise TypeError(\n \"Don't know how to handle index of type {0}\".format(typename(type(idx)))\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_hash_object_dispatch_group_split_pandas.return.dict_zip_range_k_parts_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_hash_object_dispatch_group_split_pandas.return.dict_zip_range_k_parts_", "embedding": null, "metadata": {"file_path": "dask/dataframe/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 466, "end_line": 492, "span_ids": ["hash_object_pandas", "impl:16", "group_split_pandas", "impl:18"], "tokens": 221}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "hash_object_dispatch = Dispatch(\"hash_object_dispatch\")\n\n\n@hash_object_dispatch.register((pd.DataFrame, pd.Series, pd.Index))\ndef hash_object_pandas(\n obj, index=True, encoding=\"utf8\", hash_key=None, categorize=True\n):\n return pd.util.hash_pandas_object(\n obj, index=index, encoding=encoding, hash_key=hash_key, categorize=categorize\n )\n\n\ngroup_split_dispatch = Dispatch(\"group_split_dispatch\")\n\n\n@group_split_dispatch.register((pd.DataFrame, pd.Series, pd.Index))\ndef group_split_pandas(df, c, k, ignore_index=False):\n indexer, locations = pd._libs.algos.groupsort_indexer(\n c.astype(np.int64, copy=False), k\n )\n df2 = df.take(indexer)\n locations = locations.cumsum()\n parts = [\n df2.iloc[a:b].reset_index(drop=True) if ignore_index else df2.iloc[a:b]\n for a, b in zip(locations[:-1], locations[1:])\n ]\n return dict(zip(range(k), parts))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py__simple_fake_mapping__nonempty_scalar.raise_TypeError_Can_t_ha": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py__simple_fake_mapping__nonempty_scalar.raise_TypeError_Can_t_ha", "embedding": null, "metadata": {"file_path": "dask/dataframe/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 495, "end_line": 540, "span_ids": ["_scalar_from_dtype", "__18", "_nonempty_scalar", "impl:20", "_"], "tokens": 328}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "_simple_fake_mapping = {\n \"b\": np.bool_(True),\n \"V\": np.void(b\" \"),\n \"M\": np.datetime64(\"1970-01-01\"),\n \"m\": np.timedelta64(1),\n \"S\": np.str_(\"foo\"),\n \"a\": np.str_(\"foo\"),\n \"U\": np.unicode_(\"foo\"),\n \"O\": \"foo\",\n}\n\n\ndef _scalar_from_dtype(dtype):\n if dtype.kind in (\"i\", \"f\", \"u\"):\n return dtype.type(1)\n elif dtype.kind == \"c\":\n return dtype.type(complex(1, 0))\n elif dtype.kind in _simple_fake_mapping:\n o = _simple_fake_mapping[dtype.kind]\n return o.astype(dtype) if dtype.kind in (\"m\", \"M\") else o\n else:\n raise TypeError(\"Can't handle dtype: {0}\".format(dtype))\n\n\n@make_scalar.register(np.dtype)\ndef _(dtype):\n return _scalar_from_dtype(dtype)\n\n\n@make_scalar.register(pd.Timestamp)\n@make_scalar.register(pd.Timedelta)\n@make_scalar.register(pd.Period)\n@make_scalar.register(pd.Interval)\ndef _(x):\n return x\n\n\ndef _nonempty_scalar(x):\n if type(x) in make_scalar._lookup:\n return make_scalar(x)\n\n if np.isscalar(x):\n dtype = x.dtype if hasattr(x, \"dtype\") else np.dtype(type(x))\n return make_scalar(dtype)\n\n raise TypeError(\"Can't handle meta of type '{0}'\".format(typename(type(x))))\n\n\n###############################################################", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py__nonempty_series_is_index_like.return.dask_is_index_like_s_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py__nonempty_series_is_index_like.return.dask_is_index_like_s_", "embedding": null, "metadata": {"file_path": "dask/dataframe/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 546, "end_line": 607, "span_ids": ["is_series_like", "is_index_like", "is_dataframe_like", "_nonempty_series"], "tokens": 536}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@meta_nonempty.register(pd.Series)\ndef _nonempty_series(s, idx=None):\n # TODO: Use register dtypes with make_array_nonempty\n if idx is None:\n idx = _nonempty_index(s.index)\n dtype = s.dtype\n if is_datetime64tz_dtype(dtype):\n entry = pd.Timestamp(\"1970-01-01\", tz=dtype.tz)\n data = [entry, entry]\n elif is_categorical_dtype(dtype):\n if len(s.cat.categories):\n data = [s.cat.categories[0]] * 2\n cats = s.cat.categories\n else:\n data = _nonempty_index(s.cat.categories)\n cats = s.cat.categories[:0]\n data = pd.Categorical(data, categories=cats, ordered=s.cat.ordered)\n elif is_integer_na_dtype(dtype):\n data = pd.array([1, None], dtype=dtype)\n elif is_period_dtype(dtype):\n # pandas 0.24.0+ should infer this to be Series[Period[freq]]\n freq = dtype.freq\n data = [pd.Period(\"2000\", freq), pd.Period(\"2001\", freq)]\n elif is_sparse(dtype):\n # TODO: pandas <0.24\n # Pandas <= 0.23.4:\n if PANDAS_GT_0240:\n entry = _scalar_from_dtype(dtype.subtype)\n else:\n entry = _scalar_from_dtype(dtype.subtype)\n if PANDAS_GT_100:\n data = pd.array([entry, entry], dtype=dtype)\n else:\n data = pd.SparseArray([entry, entry], dtype=dtype)\n elif is_interval_dtype(dtype):\n entry = _scalar_from_dtype(dtype.subtype)\n if PANDAS_GT_0240:\n data = pd.array([entry, entry], dtype=dtype)\n else:\n data = np.array([entry, entry], dtype=dtype)\n elif type(dtype) in make_array_nonempty._lookup:\n data = make_array_nonempty(dtype)\n else:\n entry = _scalar_from_dtype(dtype)\n data = np.array([entry, entry], dtype=dtype)\n\n out = pd.Series(data, name=s.name, index=idx)\n if PANDAS_GT_100:\n out.attrs = s.attrs\n return out\n\n\ndef is_dataframe_like(df):\n return dask_is_dataframe_like(df)\n\n\ndef is_series_like(s):\n return dask_is_series_like(s)\n\n\ndef is_index_like(s):\n return dask_is_index_like(s)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_check_meta_check_meta.raise_ValueError_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_check_meta_check_meta.raise_ValueError_", "embedding": null, "metadata": {"file_path": "dask/dataframe/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 604, "end_line": 676, "span_ids": ["check_meta"], "tokens": 653}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def check_meta(x, meta, funcname=None, numeric_equal=True):\n \"\"\"Check that the dask metadata matches the result.\n\n If metadata matches, ``x`` is passed through unchanged. A nice error is\n raised if metadata doesn't match.\n\n Parameters\n ----------\n x : DataFrame, Series, or Index\n meta : DataFrame, Series, or Index\n The expected metadata that ``x`` should match\n funcname : str, optional\n The name of the function in which the metadata was specified. If\n provided, the function name will be included in the error message to be\n more helpful to users.\n numeric_equal : bool, optionl\n If True, integer and floating dtypes compare equal. This is useful due\n to panda's implicit conversion of integer to floating upon encountering\n missingness, which is hard to infer statically.\n \"\"\"\n eq_types = {\"i\", \"f\", \"u\"} if numeric_equal else set()\n\n def equal_dtypes(a, b):\n if is_categorical_dtype(a) != is_categorical_dtype(b):\n return False\n if isinstance(a, str) and a == \"-\" or isinstance(b, str) and b == \"-\":\n return False\n if is_categorical_dtype(a) and is_categorical_dtype(b):\n if UNKNOWN_CATEGORIES in a.categories or UNKNOWN_CATEGORIES in b.categories:\n return True\n return a == b\n return (a.kind in eq_types and b.kind in eq_types) or (a == b)\n\n if not (\n is_dataframe_like(meta) or is_series_like(meta) or is_index_like(meta)\n ) or is_dask_collection(meta):\n raise TypeError(\n \"Expected partition to be DataFrame, Series, or \"\n \"Index, got `%s`\" % typename(type(meta))\n )\n\n if type(x) != type(meta):\n errmsg = \"Expected partition of type `%s` but got `%s`\" % (\n typename(type(meta)),\n typename(type(x)),\n )\n elif is_dataframe_like(meta):\n dtypes = pd.concat([x.dtypes, meta.dtypes], axis=1, sort=True)\n bad_dtypes = [\n (repr(col), a, b)\n for col, a, b in dtypes.fillna(\"-\").itertuples()\n if not equal_dtypes(a, b)\n ]\n if bad_dtypes:\n errmsg = \"Partition type: `%s`\\n%s\" % (\n typename(type(meta)),\n asciitable([\"Column\", \"Found\", \"Expected\"], bad_dtypes),\n )\n else:\n check_matching_columns(meta, x)\n return x\n else:\n if equal_dtypes(x.dtype, meta.dtype):\n return x\n errmsg = \"Partition type: `%s`\\n%s\" % (\n typename(type(meta)),\n asciitable([\"\", \"dtype\"], [(\"Found\", x.dtype), (\"Expected\", meta.dtype)]),\n )\n\n raise ValueError(\n \"Metadata mismatch found%s.\\n\\n\"\n \"%s\" % ((\" in `%s`\" % funcname if funcname else \"\"), errmsg)\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_check_matching_columns_index_summary.return._entries_format": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_check_matching_columns_index_summary.return._entries_format", "embedding": null, "metadata": {"file_path": "dask/dataframe/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 679, "end_line": 707, "span_ids": ["check_matching_columns", "index_summary"], "tokens": 229}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def check_matching_columns(meta, actual):\n # Need nan_to_num otherwise nan comparison gives False\n if not np.array_equal(np.nan_to_num(meta.columns), np.nan_to_num(actual.columns)):\n extra = methods.tolist(actual.columns.difference(meta.columns))\n missing = methods.tolist(meta.columns.difference(actual.columns))\n if extra or missing:\n extra_info = f\" Extra: {extra}\\n Missing: {missing}\"\n else:\n extra_info = \"Order of columns does not match\"\n raise ValueError(\n \"The columns in the computed data do not match\"\n \" the columns in the provided metadata\\n\"\n f\"{extra_info}\"\n )\n\n\ndef index_summary(idx, name=None):\n \"\"\"Summarized representation of an Index.\"\"\"\n n = len(idx)\n if name is None:\n name = idx.__class__.__name__\n if n:\n head = idx[0]\n tail = idx[-1]\n summary = \", {} to {}\".format(head, tail)\n else:\n summary = \"\"\n\n return \"{}: {} entries{}\".format(name, n, summary)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py____check_dask.return.dsk": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py____check_dask.return.dsk", "embedding": null, "metadata": {"file_path": "dask/dataframe/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 710, "end_line": 773, "span_ids": ["index_summary", "_check_dask"], "tokens": 559}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@make_scalar.register(np.dtype)\ndef _(dtype):\n # ... other code\n\n\n###############################################################\n# Testing\n###############################################################\n\n\ndef _check_dask(dsk, check_names=True, check_dtypes=True, result=None):\n import dask.dataframe as dd\n\n if hasattr(dsk, \"__dask_graph__\"):\n graph = dsk.__dask_graph__()\n if hasattr(graph, \"validate\"):\n graph.validate()\n if result is None:\n result = dsk.compute(scheduler=\"sync\")\n if isinstance(dsk, dd.Index):\n assert \"Index\" in type(result).__name__, type(result)\n # assert type(dsk._meta) == type(result), type(dsk._meta)\n if check_names:\n assert dsk.name == result.name\n assert dsk._meta.name == result.name\n if isinstance(result, pd.MultiIndex):\n assert result.names == dsk._meta.names\n if check_dtypes:\n assert_dask_dtypes(dsk, result)\n elif isinstance(dsk, dd.Series):\n assert \"Series\" in type(result).__name__, type(result)\n assert type(dsk._meta) == type(result), type(dsk._meta)\n if check_names:\n assert dsk.name == result.name, (dsk.name, result.name)\n assert dsk._meta.name == result.name\n if check_dtypes:\n assert_dask_dtypes(dsk, result)\n _check_dask(\n dsk.index,\n check_names=check_names,\n check_dtypes=check_dtypes,\n result=result.index,\n )\n elif isinstance(dsk, dd.DataFrame):\n assert \"DataFrame\" in type(result).__name__, type(result)\n assert isinstance(dsk.columns, pd.Index), type(dsk.columns)\n assert type(dsk._meta) == type(result), type(dsk._meta)\n if check_names:\n tm.assert_index_equal(dsk.columns, result.columns)\n tm.assert_index_equal(dsk._meta.columns, result.columns)\n if check_dtypes:\n assert_dask_dtypes(dsk, result)\n _check_dask(\n dsk.index,\n check_names=check_names,\n check_dtypes=check_dtypes,\n result=result.index,\n )\n elif isinstance(dsk, dd.core.Scalar):\n assert np.isscalar(result) or isinstance(\n result, (pd.Timestamp, pd.Timedelta)\n )\n if check_dtypes:\n assert_dask_dtypes(dsk, result)\n else:\n msg = \"Unsupported dask instance {0} found\".format(type(dsk))\n raise AssertionError(msg)\n return result\n return dsk", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py__maybe_sort_assert_eq.return.True": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py__maybe_sort_assert_eq.return.True", "embedding": null, "metadata": {"file_path": "dask/dataframe/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 776, "end_line": 837, "span_ids": ["_maybe_sort", "assert_eq"], "tokens": 492}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _maybe_sort(a):\n # sort by value, then index\n try:\n if is_dataframe_like(a):\n if set(a.index.names) & set(a.columns):\n a.index.names = [\n \"-overlapped-index-name-%d\" % i for i in range(len(a.index.names))\n ]\n a = a.sort_values(by=methods.tolist(a.columns))\n else:\n a = a.sort_values()\n except (TypeError, IndexError, ValueError):\n pass\n return a.sort_index()\n\n\ndef assert_eq(\n a,\n b,\n check_names=True,\n check_dtypes=True,\n check_divisions=True,\n check_index=True,\n **kwargs,\n):\n if check_divisions:\n assert_divisions(a)\n assert_divisions(b)\n if hasattr(a, \"divisions\") and hasattr(b, \"divisions\"):\n at = type(np.asarray(a.divisions).tolist()[0]) # numpy to python\n bt = type(np.asarray(b.divisions).tolist()[0]) # scalar conversion\n assert at == bt, (at, bt)\n assert_sane_keynames(a)\n assert_sane_keynames(b)\n a = _check_dask(a, check_names=check_names, check_dtypes=check_dtypes)\n b = _check_dask(b, check_names=check_names, check_dtypes=check_dtypes)\n if not check_index:\n a = a.reset_index(drop=True)\n b = b.reset_index(drop=True)\n if hasattr(a, \"to_pandas\"):\n a = a.to_pandas()\n if hasattr(b, \"to_pandas\"):\n b = b.to_pandas()\n if isinstance(a, pd.DataFrame):\n a = _maybe_sort(a)\n b = _maybe_sort(b)\n tm.assert_frame_equal(a, b, **kwargs)\n elif isinstance(a, pd.Series):\n a = _maybe_sort(a)\n b = _maybe_sort(b)\n tm.assert_series_equal(a, b, check_names=check_names, **kwargs)\n elif isinstance(a, pd.Index):\n tm.assert_index_equal(a, b, **kwargs)\n else:\n if a == b:\n return True\n else:\n if np.isnan(a):\n assert np.isnan(b)\n else:\n assert np.allclose(a, b)\n return True", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_assert_dask_graph_assert_sane_keynames.for_k_in_ddf_dask_keys_.assert_k_split_0_is": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_assert_dask_graph_assert_sane_keynames.for_k_in_ddf_dask_keys_.assert_k_split_0_is", "embedding": null, "metadata": {"file_path": "dask/dataframe/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 840, "end_line": 888, "span_ids": ["assert_dask_graph", "assert_divisions", "assert_sane_keynames"], "tokens": 359}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def assert_dask_graph(dask, label):\n if hasattr(dask, \"dask\"):\n dask = dask.dask\n assert isinstance(dask, Mapping)\n for k in dask:\n if isinstance(k, tuple):\n k = k[0]\n if k.startswith(label):\n return True\n raise AssertionError(\n \"given dask graph doesn't contain label: {label}\".format(label=label)\n )\n\n\ndef assert_divisions(ddf):\n if not hasattr(ddf, \"divisions\"):\n return\n if not getattr(ddf, \"known_divisions\", False):\n return\n\n def index(x):\n if is_index_like(x):\n return x\n try:\n return x.index.get_level_values(0)\n except AttributeError:\n return x.index\n\n results = get_sync(ddf.dask, ddf.__dask_keys__())\n for i, df in enumerate(results[:-1]):\n if len(df):\n assert index(df).min() >= ddf.divisions[i]\n assert index(df).max() < ddf.divisions[i + 1]\n\n if len(results[-1]):\n assert index(results[-1]).min() >= ddf.divisions[-2]\n assert index(results[-1]).max() <= ddf.divisions[-1]\n\n\ndef assert_sane_keynames(ddf):\n if not hasattr(ddf, \"dask\"):\n return\n for k in ddf.dask.keys():\n while isinstance(k, tuple):\n k = k[0]\n assert isinstance(k, (str, bytes))\n assert len(k) < 100\n assert \" \" not in k\n assert k.split(\"-\")[0].isidentifier()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_assert_dask_dtypes_assert_dask_dtypes.if_not_is_dask_collection.else_.if_hasattr_ddf__meta_dt.else_.assert_type_ddf__meta_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_assert_dask_dtypes_assert_dask_dtypes.if_not_is_dask_collection.else_.if_hasattr_ddf__meta_dt.else_.assert_type_ddf__meta_", "embedding": null, "metadata": {"file_path": "dask/dataframe/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 891, "end_line": 924, "span_ids": ["assert_dask_dtypes"], "tokens": 336}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def assert_dask_dtypes(ddf, res, numeric_equal=True):\n \"\"\"Check that the dask metadata matches the result.\n\n If `numeric_equal`, integer and floating dtypes compare equal. This is\n useful due to the implicit conversion of integer to floating upon\n encountering missingness, which is hard to infer statically.\"\"\"\n\n eq_type_sets = [{\"O\", \"S\", \"U\", \"a\"}] # treat object and strings alike\n if numeric_equal:\n eq_type_sets.append({\"i\", \"f\", \"u\"})\n\n def eq_dtypes(a, b):\n return any(\n a.kind in eq_types and b.kind in eq_types for eq_types in eq_type_sets\n ) or (a == b)\n\n if not is_dask_collection(res) and is_dataframe_like(res):\n for col, a, b in pd.concat([ddf._meta.dtypes, res.dtypes], axis=1).itertuples():\n assert eq_dtypes(a, b)\n elif not is_dask_collection(res) and (is_index_like(res) or is_series_like(res)):\n a = ddf._meta.dtype\n b = res.dtype\n assert eq_dtypes(a, b)\n else:\n if hasattr(ddf._meta, \"dtype\"):\n a = ddf._meta.dtype\n if not hasattr(res, \"dtype\"):\n assert np.isscalar(res)\n b = np.dtype(type(res))\n else:\n b = res.dtype\n assert eq_dtypes(a, b)\n else:\n assert type(ddf._meta) == type(res)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_assert_max_deps_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_assert_max_deps_", "embedding": null, "metadata": {"file_path": "dask/dataframe/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 927, "end_line": 979, "span_ids": ["valid_divisions", "drop_by_shallow_copy", "assert_max_deps"], "tokens": 347}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def assert_max_deps(x, n, eq=True):\n dependencies, dependents = get_deps(x.dask)\n if eq:\n assert max(map(len, dependencies.values())) == n\n else:\n assert max(map(len, dependencies.values())) <= n\n\n\ndef valid_divisions(divisions):\n \"\"\"Are the provided divisions valid?\n\n Examples\n --------\n >>> valid_divisions([1, 2, 3])\n True\n >>> valid_divisions([3, 2, 1])\n False\n >>> valid_divisions([1, 1, 1])\n False\n >>> valid_divisions([0, 1, 1])\n True\n >>> valid_divisions(123)\n False\n >>> valid_divisions([0, float('nan'), 1])\n False\n \"\"\"\n if not isinstance(divisions, (tuple, list)):\n return False\n\n for i, x in enumerate(divisions[:-2]):\n if x >= divisions[i + 1]:\n return False\n if isinstance(x, numbers.Number) and math.isnan(x):\n return False\n\n for x in divisions[-2:]:\n if isinstance(x, numbers.Number) and math.isnan(x):\n return False\n\n if divisions[-2] > divisions[-1]:\n return False\n\n return True\n\n\ndef drop_by_shallow_copy(df, columns, errors=\"raise\"):\n \"\"\"Use shallow copy to drop columns in place\"\"\"\n df2 = df.copy(deep=False)\n if not pd.api.types.is_list_like(columns):\n columns = [columns]\n df2.drop(columns=columns, inplace=True, errors=errors)\n return df2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/datasets.py_random_timeseries.return.make_timeseries_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/datasets.py_random_timeseries.return.make_timeseries_", "embedding": null, "metadata": {"file_path": "dask/datasets.py", "file_name": "datasets.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 64, "span_ids": ["imports", "timeseries"], "tokens": 602}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import random\n\nfrom .utils import import_required\n\n\ndef timeseries(\n start=\"2000-01-01\",\n end=\"2000-01-31\",\n freq=\"1s\",\n partition_freq=\"1d\",\n dtypes={\"name\": str, \"id\": int, \"x\": float, \"y\": float},\n seed=None,\n **kwargs\n):\n \"\"\"Create timeseries dataframe with random data\n\n Parameters\n ----------\n start : datetime (or datetime-like string)\n Start of time series\n end : datetime (or datetime-like string)\n End of time series\n dtypes : dict\n Mapping of column names to types.\n Valid types include {float, int, str, 'category'}\n freq : string\n String like '2s' or '1H' or '12W' for the time series frequency\n partition_freq : string\n String like '1M' or '2Y' to divide the dataframe into partitions\n seed : int (optional)\n Randomstate seed\n kwargs:\n Keywords to pass down to individual column creation functions.\n Keywords should be prefixed by the column name and then an underscore.\n\n Examples\n --------\n >>> import dask\n >>> df = dask.datasets.timeseries()\n >>> df.head() # doctest: +SKIP\n timestamp id name x y\n 2000-01-01 00:00:00 967 Jerry -0.031348 -0.040633\n 2000-01-01 00:00:01 1066 Michael -0.262136 0.307107\n 2000-01-01 00:00:02 988 Wendy -0.526331 0.128641\n 2000-01-01 00:00:03 1016 Yvonne 0.620456 0.767270\n 2000-01-01 00:00:04 998 Ursula 0.684902 -0.463278\n >>> df = dask.datasets.timeseries(\n ... '2000', '2010',\n ... freq='2H', partition_freq='1D', seed=1, # data frequency\n ... dtypes={'value': float, 'name': str, 'id': int}, # data types\n ... id_lam=1000 # control number of items in id column\n ... )\n \"\"\"\n from dask.dataframe.io.demo import make_timeseries\n\n return make_timeseries(\n start=start,\n end=end,\n freq=freq,\n partition_freq=partition_freq,\n seed=seed,\n dtypes=dtypes,\n **kwargs\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/datasets.py__generate_mimesis__make_mimesis.return.db_Bag_dsk_name_npartit": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/datasets.py__generate_mimesis__make_mimesis.return.db_Bag_dsk_name_npartit", "embedding": null, "metadata": {"file_path": "dask/datasets.py", "file_name": "datasets.py", "file_type": "text/x-python", "category": "implementation", "start_line": 67, "end_line": 121, "span_ids": ["_generate_mimesis", "_make_mimesis"], "tokens": 367}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _generate_mimesis(field, schema_description, records_per_partition, seed):\n \"\"\"Generate data for a single partition of a dask bag\n\n See Also\n --------\n _make_mimesis\n \"\"\"\n from mimesis.schema import Schema, Field\n\n field = Field(seed=seed, **field)\n schema = Schema(schema=lambda: schema_description(field))\n for i in range(records_per_partition):\n yield schema.create(iterations=1)[0]\n\n\ndef _make_mimesis(field, schema, npartitions, records_per_partition, seed=None):\n \"\"\"\n Make a Dask Bag filled with data randomly generated by the mimesis projet\n\n Parameters\n ----------\n field: dict\n keyword arguments to pass to ``mimesis.Field``\n schema: Callable[Field] -> dict\n The schema to use to generate the data\n npartitions: int\n records_per_partition: int\n seed: int, None\n Seed for random data\n\n Returns\n -------\n Dask Bag\n\n See Also\n --------\n make_people\n \"\"\"\n import dask.bag as db\n from dask.base import tokenize\n\n field = field or {}\n\n random_state = random.Random(seed)\n seeds = [random_state.randint(0, 1 << 32) for _ in range(npartitions)]\n\n name = \"mimesis-\" + tokenize(\n field, schema, npartitions, records_per_partition, seed\n )\n dsk = {\n (name, i): (_generate_mimesis, field, schema, records_per_partition, seed)\n for i, seed in enumerate(seeds)\n }\n\n return db.Bag(dsk, name, npartitions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/datasets.py_make_people_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/datasets.py_make_people_", "embedding": null, "metadata": {"file_path": "dask/datasets.py", "file_name": "datasets.py", "file_type": "text/x-python", "category": "implementation", "start_line": 124, "end_line": 166, "span_ids": ["make_people"], "tokens": 319}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def make_people(npartitions=10, records_per_partition=1000, seed=None, locale=\"en\"):\n \"\"\"Make a dataset of random people\n\n This makes a Dask Bag with dictionary records of randomly generated people.\n This requires the optional library ``mimesis`` to generate records.\n\n Parameters\n ----------\n npartitions : int\n Number of partitions\n records_per_partition : int\n Number of records in each partition\n seed : int, (optional)\n Random seed\n locale : str\n Language locale, like 'en', 'fr', 'zh', or 'ru'\n\n Returns\n -------\n b: Dask Bag\n \"\"\"\n import_required(\n \"mimesis\",\n \"The mimesis module is required for this function. Try:\\n\"\n \" python -m pip install mimesis\",\n )\n\n schema = lambda field: {\n \"age\": field(\"person.age\"),\n \"name\": (field(\"person.name\"), field(\"person.surname\")),\n \"occupation\": field(\"person.occupation\"),\n \"telephone\": field(\"person.telephone\"),\n \"address\": {\"address\": field(\"address.address\"), \"city\": field(\"address.city\")},\n \"credit-card\": {\n \"number\": field(\"payment.credit_card_number\"),\n \"expiration-date\": field(\"payment.credit_card_expiration_date\"),\n },\n }\n\n return _make_mimesis(\n {\"locale\": locale}, schema, npartitions, records_per_partition, seed\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_operator_finalize.return.Delayed_name_graph_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_operator_finalize.return.Delayed_name_graph_", "embedding": null, "metadata": {"file_path": "dask/delayed.py", "file_name": "delayed.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 39, "span_ids": ["finalize", "imports", "unzip"], "tokens": 270}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import operator\nimport types\nimport uuid\nimport warnings\nfrom collections.abc import Iterator\n\nfrom tlz import curry, concat, unique, merge\n\nfrom . import config, threaded\nfrom .base import is_dask_collection, dont_optimize, DaskMethodsMixin\nfrom .base import tokenize as _tokenize\nfrom .compatibility import is_dataclass, dataclass_fields\n\nfrom .core import quote\nfrom .context import globalmethod\nfrom .optimization import cull\nfrom .utils import funcname, methodcaller, OperatorMethodMixin, ensure_dict, apply\nfrom .highlevelgraph import HighLevelGraph\n\n__all__ = [\"Delayed\", \"delayed\"]\n\n\ndef unzip(ls, nout):\n \"\"\"Unzip a list of lists into ``nout`` outputs.\"\"\"\n out = list(zip(*ls))\n if not out:\n out = [()] * nout\n return out\n\n\ndef finalize(collection):\n assert is_dask_collection(collection)\n\n name = \"finalize-\" + tokenize(collection)\n keys = collection.__dask_keys__()\n finalize, args = collection.__dask_postcompute__()\n layer = {name: (finalize, keys) + args}\n graph = HighLevelGraph.from_collections(name, layer, dependencies=[collection])\n return Delayed(name, graph)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_unpack_collections_unpack_collections.return.expr_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_unpack_collections_unpack_collections.return.expr_", "embedding": null, "metadata": {"file_path": "dask/delayed.py", "file_name": "delayed.py", "file_type": "text/x-python", "category": "implementation", "start_line": 42, "end_line": 113, "span_ids": ["unpack_collections"], "tokens": 527}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def unpack_collections(expr):\n \"\"\"Normalize a python object and merge all sub-graphs.\n\n - Replace ``Delayed`` with their keys\n - Convert literals to things the schedulers can handle\n - Extract dask graphs from all enclosed values\n\n Parameters\n ----------\n expr : object\n The object to be normalized. This function knows how to handle\n dask collections, as well as most builtin python types.\n\n Returns\n -------\n task : normalized task to be run\n collections : a tuple of collections\n\n Examples\n --------\n >>> import dask\n >>> a = delayed(1, 'a')\n >>> b = delayed(2, 'b')\n >>> task, collections = unpack_collections([a, b, 3])\n >>> task # doctest: +SKIP\n ['a', 'b', 3]\n >>> collections # doctest: +SKIP\n (a, b)\n\n >>> task, collections = unpack_collections({a: 1, b: 2})\n >>> task # doctest: +SKIP\n (dict, [['a', 1], ['b', 2]])\n >>> collections # doctest: +SKIP\n {a, b}\n \"\"\"\n if isinstance(expr, Delayed):\n return expr._key, (expr,)\n\n if is_dask_collection(expr):\n finalized = finalize(expr)\n return finalized._key, (finalized,)\n\n if isinstance(expr, Iterator):\n expr = tuple(expr)\n\n typ = type(expr)\n\n if typ in (list, tuple, set):\n args, collections = unzip((unpack_collections(e) for e in expr), 2)\n args = list(args)\n collections = tuple(unique(concat(collections), key=id))\n # Ensure output type matches input type\n if typ is not list:\n args = (typ, args)\n return args, collections\n\n if typ is dict:\n args, collections = unpack_collections([[k, v] for k, v in expr.items()])\n return (dict, args), collections\n\n if typ is slice:\n args, collections = unpack_collections([expr.start, expr.stop, expr.step])\n return (slice,) + tuple(args), collections\n\n if is_dataclass(expr):\n args, collections = unpack_collections(\n [[f.name, getattr(expr, f.name)] for f in dataclass_fields(expr)]\n )\n\n return (apply, typ, (), (dict, args)), collections\n\n return expr, ()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_to_task_dask_to_task_dask.return.expr_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_to_task_dask_to_task_dask.return.expr_", "embedding": null, "metadata": {"file_path": "dask/delayed.py", "file_name": "delayed.py", "file_type": "text/x-python", "category": "implementation", "start_line": 116, "end_line": 195, "span_ids": ["to_task_dask"], "tokens": 698}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def to_task_dask(expr):\n \"\"\"Normalize a python object and merge all sub-graphs.\n\n - Replace ``Delayed`` with their keys\n - Convert literals to things the schedulers can handle\n - Extract dask graphs from all enclosed values\n\n Parameters\n ----------\n expr : object\n The object to be normalized. This function knows how to handle\n ``Delayed``s, as well as most builtin python types.\n\n Returns\n -------\n task : normalized task to be run\n dask : a merged dask graph that forms the dag for this task\n\n Examples\n --------\n >>> import dask\n >>> a = delayed(1, 'a')\n >>> b = delayed(2, 'b')\n >>> task, dask = to_task_dask([a, b, 3]) # doctest: +SKIP\n >>> task # doctest: +SKIP\n ['a', 'b', 3]\n >>> dict(dask) # doctest: +SKIP\n {'a': 1, 'b': 2}\n\n >>> task, dasks = to_task_dask({a: 1, b: 2}) # doctest: +SKIP\n >>> task # doctest: +SKIP\n (dict, [['a', 1], ['b', 2]])\n >>> dict(dask) # doctest: +SKIP\n {'a': 1, 'b': 2}\n \"\"\"\n warnings.warn(\n \"The dask.delayed.to_dask_dask function has been \"\n \"Deprecated in favor of unpack_collections\",\n stacklevel=2,\n )\n\n if isinstance(expr, Delayed):\n return expr.key, expr.dask\n\n if is_dask_collection(expr):\n name = \"finalize-\" + tokenize(expr, pure=True)\n keys = expr.__dask_keys__()\n opt = getattr(expr, \"__dask_optimize__\", dont_optimize)\n finalize, args = expr.__dask_postcompute__()\n dsk = {name: (finalize, keys) + args}\n dsk.update(opt(expr.__dask_graph__(), keys))\n return name, dsk\n\n if isinstance(expr, Iterator):\n expr = list(expr)\n typ = type(expr)\n\n if typ in (list, tuple, set):\n args, dasks = unzip((to_task_dask(e) for e in expr), 2)\n args = list(args)\n dsk = merge(dasks)\n # Ensure output type matches input type\n return (args, dsk) if typ is list else ((typ, args), dsk)\n\n if typ is dict:\n args, dsk = to_task_dask([[k, v] for k, v in expr.items()])\n return (dict, args), dsk\n\n if is_dataclass(expr):\n args, dsk = to_task_dask(\n [[f.name, getattr(expr, f.name)] for f in dataclass_fields(expr)]\n )\n\n return (apply, typ, (), (dict, args)), dsk\n\n if typ is slice:\n args, dsk = to_task_dask([expr.start, expr.stop, expr.step])\n return (slice,) + tuple(args), dsk\n\n return expr, {}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_tokenize_delayed": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_tokenize_delayed", "embedding": null, "metadata": {"file_path": "dask/delayed.py", "file_name": "delayed.py", "file_type": "text/x-python", "category": "implementation", "start_line": 196, "end_line": 445, "span_ids": ["tokenize", "delayed"], "tokens": 167}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def tokenize(*args, **kwargs):\n \"\"\"Mapping function from task -> consistent name.\n\n Parameters\n ----------\n args : object\n Python objects that summarize the task.\n pure : boolean, optional\n If True, a consistent hash function is tried on the input. If this\n fails, then a unique identifier is used. If False (default), then a\n unique identifier is always used.\n \"\"\"\n pure = kwargs.pop(\"pure\", None)\n if pure is None:\n pure = config.get(\"delayed_pure\", False)\n\n if pure:\n return _tokenize(*args, **kwargs)\n else:\n return str(uuid.uuid4())\n\n\n@curry\ndef delayed(obj, name=None, pure=None, nout=None, traverse=True):\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_delayed._Wraps_a_function_or_ob_delayed._Wraps_a_function_or_ob": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_delayed._Wraps_a_function_or_ob_delayed._Wraps_a_function_or_ob", "embedding": null, "metadata": {"file_path": "dask/delayed.py", "file_name": "delayed.py", "file_type": "text/x-python", "category": "implementation", "start_line": 222, "end_line": 424, "span_ids": ["delayed"], "tokens": 1990}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@curry\ndef delayed(obj, name=None, pure=None, nout=None, traverse=True):\n \"\"\"Wraps a function or object to produce a ``Delayed``.\n\n ``Delayed`` objects act as proxies for the object they wrap, but all\n operations on them are done lazily by building up a dask graph internally.\n\n Parameters\n ----------\n obj : object\n The function or object to wrap\n name : string or hashable, optional\n The key to use in the underlying graph for the wrapped object. Defaults\n to hashing content. Note that this only affects the name of the object\n wrapped by this call to delayed, and *not* the output of delayed\n function calls - for that use ``dask_key_name=`` as described below.\n\n .. note::\n\n Because this ``name`` is used as the key in task graphs, you should\n ensure that it uniquely identifies ``obj``. If you'd like to provide\n a descriptive name that is still unique, combine the descriptive name\n with :func:`dask.base.tokenize` of the ``array_like``. See\n :ref:`graphs` for more.\n\n pure : bool, optional\n Indicates whether calling the resulting ``Delayed`` object is a pure\n operation. If True, arguments to the call are hashed to produce\n deterministic keys. If not provided, the default is to check the global\n ``delayed_pure`` setting, and fallback to ``False`` if unset.\n nout : int, optional\n The number of outputs returned from calling the resulting ``Delayed``\n object. If provided, the ``Delayed`` output of the call can be iterated\n into ``nout`` objects, allowing for unpacking of results. By default\n iteration over ``Delayed`` objects will error. Note, that ``nout=1``\n expects ``obj`` to return a tuple of length 1, and consequently for\n ``nout=0``, ``obj`` should return an empty tuple.\n traverse : bool, optional\n By default dask traverses builtin python collections looking for dask\n objects passed to ``delayed``. For large collections this can be\n expensive. If ``obj`` doesn't contain any dask objects, set\n ``traverse=False`` to avoid doing this traversal.\n\n Examples\n --------\n Apply to functions to delay execution:\n\n >>> from dask import delayed\n >>> def inc(x):\n ... return x + 1\n\n >>> inc(10)\n 11\n\n >>> x = delayed(inc, pure=True)(10)\n >>> type(x) == Delayed # doctest: +SKIP\n True\n >>> x.compute()\n 11\n\n Can be used as a decorator:\n\n >>> @delayed(pure=True)\n ... def add(a, b):\n ... return a + b\n >>> add(1, 2).compute()\n 3\n\n ``delayed`` also accepts an optional keyword ``pure``. If False, then\n subsequent calls will always produce a different ``Delayed``. This is\n useful for non-pure functions (such as ``time`` or ``random``).\n\n >>> from random import random\n >>> out1 = delayed(random, pure=False)()\n >>> out2 = delayed(random, pure=False)()\n >>> out1.key == out2.key\n False\n\n If you know a function is pure (output only depends on the input, with no\n global state), then you can set ``pure=True``. This will attempt to apply a\n consistent name to the output, but will fallback on the same behavior of\n ``pure=False`` if this fails.\n\n >>> @delayed(pure=True)\n ... def add(a, b):\n ... return a + b\n >>> out1 = add(1, 2)\n >>> out2 = add(1, 2)\n >>> out1.key == out2.key\n True\n\n Instead of setting ``pure`` as a property of the callable, you can also set\n it contextually using the ``delayed_pure`` setting. Note that this\n influences the *call* and not the *creation* of the callable:\n\n >>> @delayed\n ... def mul(a, b):\n ... return a * b\n >>> import dask\n >>> with dask.config.set(delayed_pure=True):\n ... print(mul(1, 2).key == mul(1, 2).key)\n True\n >>> with dask.config.set(delayed_pure=False):\n ... print(mul(1, 2).key == mul(1, 2).key)\n False\n\n The key name of the result of calling a delayed object is determined by\n hashing the arguments by default. To explicitly set the name, you can use\n the ``dask_key_name`` keyword when calling the function:\n\n >>> add(1, 2) # doctest: +SKIP\n Delayed('add-3dce7c56edd1ac2614add714086e950f')\n >>> add(1, 2, dask_key_name='three')\n Delayed('three')\n\n Note that objects with the same key name are assumed to have the same\n result. If you set the names explicitly you should make sure your key names\n are different for different results.\n\n >>> add(1, 2, dask_key_name='three')\n Delayed('three')\n >>> add(2, 1, dask_key_name='three')\n Delayed('three')\n >>> add(2, 2, dask_key_name='four')\n Delayed('four')\n\n ``delayed`` can also be applied to objects to make operations on them lazy:\n\n >>> a = delayed([1, 2, 3])\n >>> isinstance(a, Delayed) # doctest: +SKIP\n True\n >>> a.compute()\n [1, 2, 3]\n\n The key name of a delayed object is hashed by default if ``pure=True`` or\n is generated randomly if ``pure=False`` (default). To explicitly set the\n name, you can use the ``name`` keyword. To ensure that the key is unique\n you should include the tokenized value as well, or otherwise ensure that\n it's unique:\n\n >>> from dask.base import tokenize\n >>> data = [1, 2, 3]\n >>> a = delayed(data, name='mylist-' + tokenize(data))\n >>> a # doctest: +SKIP\n Delayed('mylist-55af65871cb378a4fa6de1660c3e8fb7')\n\n Delayed results act as a proxy to the underlying object. Many operators\n are supported:\n\n >>> (a + [1, 2]).compute() # doctest: +SKIP\n [1, 2, 3, 1, 2]\n >>> a[1].compute() # doctest: +SKIP\n 2\n\n Method and attribute access also works:\n\n >>> a.count(2).compute() # doctest: +SKIP\n 1\n\n Note that if a method doesn't exist, no error will be thrown until runtime:\n\n >>> res = a.not_a_real_method() # doctest: +SKIP\n >>> res.compute() # doctest: +SKIP\n AttributeError(\"'list' object has no attribute 'not_a_real_method'\")\n\n \"Magic\" methods (e.g. operators and attribute access) are assumed to be\n pure, meaning that subsequent calls must return the same results. This\n behavior is not overrideable through the ``delayed`` call, but can be\n modified using other ways as described below.\n\n To invoke an impure attribute or operator, you'd need to use it in a\n delayed function with ``pure=False``:\n\n >>> class Incrementer(object):\n ... def __init__(self):\n ... self._n = 0\n ... @property\n ... def n(self):\n ... self._n += 1\n ... return self._n\n ...\n >>> x = delayed(Incrementer())\n >>> x.n.key == x.n.key\n True\n >>> get_n = delayed(lambda x: x.n, pure=False)\n >>> get_n(x).key == get_n(x).key\n False\n\n In contrast, methods are assumed to be impure by default, meaning that\n subsequent calls may return different results. To assume purity, set\n `pure=True`. This allows sharing of any intermediate values.\n\n >>> a.count(2, pure=True).key == a.count(2, pure=True).key\n True\n\n As with function calls, method calls also respect the global\n ``delayed_pure`` setting and support the ``dask_key_name`` keyword:\n\n >>> a.count(2, dask_key_name=\"count_2\")\n Delayed('count_2')\n >>> import dask\n >>> with dask.config.set(delayed_pure=True):\n ... print(a.count(2).key == a.count(2).key)\n True\n \"\"\"\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_delayed.if_isinstance_obj_Delaye_rebuild.return.Delayed_key_dsk_length_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_delayed.if_isinstance_obj_Delaye_rebuild.return.Delayed_key_dsk_length_", "embedding": null, "metadata": {"file_path": "dask/delayed.py", "file_name": "delayed.py", "file_type": "text/x-python", "category": "implementation", "start_line": 418, "end_line": 464, "span_ids": ["right", "delayed", "optimize", "rebuild"], "tokens": 355}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@curry\ndef delayed(obj, name=None, pure=None, nout=None, traverse=True):\n if isinstance(obj, Delayed):\n return obj\n\n if is_dask_collection(obj) or traverse:\n task, collections = unpack_collections(obj)\n else:\n task = quote(obj)\n collections = set()\n\n if task is obj:\n if not (nout is None or (type(nout) is int and nout >= 0)):\n raise ValueError(\n \"nout must be None or a non-negative integer, got %s\" % nout\n )\n if not name:\n try:\n prefix = obj.__name__\n except AttributeError:\n prefix = type(obj).__name__\n token = tokenize(obj, nout, pure=pure)\n name = \"%s-%s\" % (prefix, token)\n return DelayedLeaf(obj, name, pure=pure, nout=nout)\n else:\n if not name:\n name = \"%s-%s\" % (type(obj).__name__, tokenize(task, pure=pure))\n layer = {name: task}\n graph = HighLevelGraph.from_collections(name, layer, dependencies=collections)\n return Delayed(name, graph)\n\n\ndef right(method):\n \"\"\"Wrapper to create 'right' version of operator given left version\"\"\"\n\n def _inner(self, other):\n return method(other, self)\n\n return _inner\n\n\ndef optimize(dsk, keys, **kwargs):\n dsk = ensure_dict(dsk)\n dsk2, _ = cull(dsk, keys)\n return dsk2\n\n\ndef rebuild(dsk, key, length):\n return Delayed(key, dsk, length)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_Delayed_Delayed._get_unary_operator._get_binary_operator": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_Delayed_Delayed._get_unary_operator._get_binary_operator", "embedding": null, "metadata": {"file_path": "dask/delayed.py", "file_name": "delayed.py", "file_type": "text/x-python", "category": "implementation", "start_line": 467, "end_line": 587, "span_ids": ["Delayed.__bool__", "Delayed.__setstate__", "Delayed.__repr__", "Delayed.__hash__", "Delayed.__dask_keys__", "Delayed.__dask_postcompute__", "Delayed.__iter__", "Delayed.__dask_tokenize__", "Delayed.__len__", "Delayed:5", "Delayed.__init__", "Delayed:11", "Delayed.__dask_layers__", "Delayed.__dir__", "Delayed:9", "Delayed._get_binary_operator", "Delayed.__get__", "Delayed.__dask_postpersist__", "Delayed.__call__", "Delayed.__getattr__", "Delayed.__getstate__", "Delayed.__dask_graph__", "Delayed", "Delayed.__setitem__", "Delayed.__setattr__", "Delayed.key"], "tokens": 911}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Delayed(DaskMethodsMixin, OperatorMethodMixin):\n \"\"\"Represents a value to be computed by dask.\n\n Equivalent to the output from a single key in a dask graph.\n \"\"\"\n\n __slots__ = (\"_key\", \"dask\", \"_length\", \"_dask_layers\")\n\n def __init__(self, key, dsk, length=None):\n self._key = key\n self.dask = dsk\n self._length = length\n\n # `__dask_layers__` of an one-layered HLG is the layer name\n # TODO: what is the name of a multi-layered HLG?\n if isinstance(dsk, HighLevelGraph) and len(dsk.layers) == 1:\n self._dask_layers = set(iter(dsk.layers))\n else:\n self._dask_layers = (self.key,)\n\n def __dask_graph__(self):\n return self.dask\n\n def __dask_keys__(self):\n return [self.key]\n\n def __dask_layers__(self):\n if hasattr(self, \"_dask_layers\"):\n return self._dask_layers\n else:\n return (self.key,)\n\n def __dask_tokenize__(self):\n return self.key\n\n __dask_scheduler__ = staticmethod(threaded.get)\n __dask_optimize__ = globalmethod(optimize, key=\"delayed_optimize\")\n\n def __dask_postcompute__(self):\n return single_key, ()\n\n def __dask_postpersist__(self):\n return rebuild, (self._key, getattr(self, \"_length\", None))\n\n def __getstate__(self):\n return tuple(getattr(self, i) for i in self.__slots__)\n\n def __setstate__(self, state):\n for k, v in zip(self.__slots__, state):\n setattr(self, k, v)\n\n @property\n def key(self):\n return self._key\n\n def __repr__(self):\n return \"Delayed({0})\".format(repr(self.key))\n\n def __hash__(self):\n return hash(self.key)\n\n def __dir__(self):\n return dir(type(self))\n\n def __getattr__(self, attr):\n if attr.startswith(\"_\"):\n raise AttributeError(\"Attribute {0} not found\".format(attr))\n\n if attr == \"visualise\":\n # added to warn users in case of spelling error\n # for more details: https://github.com/dask/dask/issues/5721\n warnings.warn(\n \"dask.delayed objects have no `visualise` method. Perhaps you meant `visualize`?\"\n )\n\n return DelayedAttr(self, attr)\n\n def __setattr__(self, attr, val):\n if attr in self.__slots__:\n object.__setattr__(self, attr, val)\n else:\n raise TypeError(\"Delayed objects are immutable\")\n\n def __setitem__(self, index, val):\n raise TypeError(\"Delayed objects are immutable\")\n\n def __iter__(self):\n if getattr(self, \"_length\", None) is None:\n raise TypeError(\"Delayed objects of unspecified length are not iterable\")\n for i in range(self._length):\n yield self[i]\n\n def __len__(self):\n if getattr(self, \"_length\", None) is None:\n raise TypeError(\"Delayed objects of unspecified length have no len()\")\n return self._length\n\n def __call__(self, *args, **kwargs):\n pure = kwargs.pop(\"pure\", None)\n name = kwargs.pop(\"dask_key_name\", None)\n func = delayed(apply, pure=pure)\n if name is not None:\n return func(self, args, kwargs, dask_key_name=name)\n return func(self, args, kwargs)\n\n def __bool__(self):\n raise TypeError(\"Truth of Delayed objects is not supported\")\n\n __nonzero__ = __bool__\n\n def __get__(self, instance, cls):\n if instance is None:\n return self\n return types.MethodType(self, instance)\n\n @classmethod\n def _get_binary_operator(cls, op, inv=False):\n method = delayed(right(op) if inv else op, pure=True)\n return lambda *args, **kwargs: method(*args, **kwargs)\n\n _get_unary_operator = _get_binary_operator", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_call_function_call_function.return.Delayed_name_graph_leng": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_call_function_call_function.return.Delayed_name_graph_leng", "embedding": null, "metadata": {"file_path": "dask/delayed.py", "file_name": "delayed.py", "file_type": "text/x-python", "category": "implementation", "start_line": 590, "end_line": 616, "span_ids": ["call_function"], "tokens": 224}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def call_function(func, func_token, args, kwargs, pure=None, nout=None):\n dask_key_name = kwargs.pop(\"dask_key_name\", None)\n pure = kwargs.pop(\"pure\", pure)\n\n if dask_key_name is None:\n name = \"%s-%s\" % (\n funcname(func),\n tokenize(func_token, *args, pure=pure, **kwargs),\n )\n else:\n name = dask_key_name\n\n args2, collections = unzip(map(unpack_collections, args), 2)\n collections = list(concat(collections))\n\n if kwargs:\n dask_kwargs, collections2 = unpack_collections(kwargs)\n collections.extend(collections2)\n task = (apply, func, list(args2), dask_kwargs)\n else:\n task = (func,) + args2\n\n graph = HighLevelGraph.from_collections(\n name, {name: task}, dependencies=collections\n )\n nout = nout if nout is not None else None\n return Delayed(name, graph, length=nout)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_DelayedLeaf_DelayedLeaf.__call__.return.call_function_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_DelayedLeaf_DelayedLeaf.__call__.return.call_function_", "embedding": null, "metadata": {"file_path": "dask/delayed.py", "file_name": "delayed.py", "file_type": "text/x-python", "category": "implementation", "start_line": 619, "end_line": 637, "span_ids": ["DelayedLeaf.dask", "DelayedLeaf.__call__", "DelayedLeaf.__init__", "DelayedLeaf"], "tokens": 157}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DelayedLeaf(Delayed):\n __slots__ = (\"_obj\", \"_key\", \"_pure\", \"_nout\")\n\n def __init__(self, obj, key, pure=None, nout=None):\n self._obj = obj\n self._key = key\n self._pure = pure\n self._nout = nout\n\n @property\n def dask(self):\n return HighLevelGraph.from_collections(\n self._key, {self._key: self._obj}, dependencies=()\n )\n\n def __call__(self, *args, **kwargs):\n return call_function(\n self._obj, self._key, args, kwargs, pure=self._pure, nout=self._nout\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_DelayedAttr_DelayedAttr.__call__.return.call_function_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_DelayedAttr_DelayedAttr.__call__.return.call_function_", "embedding": null, "metadata": {"file_path": "dask/delayed.py", "file_name": "delayed.py", "file_type": "text/x-python", "category": "implementation", "start_line": 647, "end_line": 675, "span_ids": ["DelayedAttr.dask", "DelayedAttr.__init__", "DelayedAttr.__call__", "DelayedAttr.__getattr__", "DelayedAttr"], "tokens": 310}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DelayedAttr(Delayed):\n __slots__ = (\"_obj\", \"_attr\", \"_key\")\n\n def __init__(self, obj, attr):\n self._obj = obj\n self._attr = attr\n self._key = \"getattr-%s\" % tokenize(obj, attr, pure=True)\n\n def __getattr__(self, attr):\n # Calling np.dtype(dask.delayed(...)) used to result in a segfault, as\n # numpy recursively tries to get `dtype` from the object. This is\n # likely a bug in numpy. For now, we can do a dumb for if\n # `x.dtype().dtype()` is called (which shouldn't ever show up in real\n # code). See https://github.com/dask/dask/pull/4374#issuecomment-454381465\n if attr == \"dtype\" and self._attr == \"dtype\":\n raise AttributeError(\"Attribute %s not found\" % attr)\n return super().__getattr__(attr)\n\n @property\n def dask(self):\n layer = {self._key: (getattr, self._obj._key, self._attr)}\n return HighLevelGraph.from_collections(\n self._key, layer, dependencies=[self._obj]\n )\n\n def __call__(self, *args, **kwargs):\n return call_function(\n methodcaller(self._attr), self._attr, (self._obj,) + args, kwargs\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_for_op_in__": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_for_op_in__", "embedding": null, "metadata": {"file_path": "dask/delayed.py", "file_name": "delayed.py", "file_type": "text/x-python", "category": "implementation", "start_line": 671, "end_line": 708, "span_ids": ["impl:4", "single_key"], "tokens": 159}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "for op in [\n operator.abs,\n operator.neg,\n operator.pos,\n operator.invert,\n operator.add,\n operator.sub,\n operator.mul,\n operator.floordiv,\n operator.truediv,\n operator.mod,\n operator.pow,\n operator.and_,\n operator.or_,\n operator.xor,\n operator.lshift,\n operator.rshift,\n operator.eq,\n operator.ge,\n operator.gt,\n operator.ne,\n operator.le,\n operator.lt,\n operator.getitem,\n]:\n Delayed._bind_operator(op)\n\n\ntry:\n Delayed._bind_operator(operator.matmul)\nexcept AttributeError:\n pass\n\n\ndef single_key(seq):\n \"\"\" Pick out the only element of this list, a list of keys \"\"\"\n return seq[0]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/__init__.py__": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/__init__.py__", "embedding": null, "metadata": {"file_path": "dask/diagnostics/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 5, "span_ids": ["imports"], "tokens": 32}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from ..callbacks import Callback\nfrom .profile import Profiler, ResourceProfiler, CacheProfiler\nfrom .progress import ProgressBar\nfrom .profile_visualize import visualize", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile.py_from_collections_import_n_Profiler.clear.self._dsk._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile.py_from_collections_import_n_Profiler.clear.self._dsk._", "embedding": null, "metadata": {"file_path": "dask/diagnostics/profile.py", "file_name": "profile.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 103, "span_ids": ["Profiler._posttask", "Profiler.visualize", "Profiler._finish", "imports", "Profiler.__enter__", "Profiler._pretask", "Profiler.clear", "Profiler", "Profiler._plot", "Profiler._start", "Profiler.__init__"], "tokens": 744}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from collections import namedtuple\nfrom itertools import starmap\nfrom timeit import default_timer\nfrom time import sleep\nfrom multiprocessing import Process, Pipe, current_process\n\nfrom ..callbacks import Callback\nfrom ..utils import import_required\n\n\n# Stores execution data for each task\nTaskData = namedtuple(\n \"TaskData\", (\"key\", \"task\", \"start_time\", \"end_time\", \"worker_id\")\n)\n\n\nclass Profiler(Callback):\n \"\"\"A profiler for dask execution at the task level.\n\n Records the following information for each task:\n 1. Key\n 2. Task\n 3. Start time in seconds since the epoch\n 4. Finish time in seconds since the epoch\n 5. Worker id\n\n Examples\n --------\n\n >>> from operator import add, mul\n >>> from dask.threaded import get\n >>> from dask.diagnostics import Profiler\n >>> dsk = {'x': 1, 'y': (add, 'x', 10), 'z': (mul, 'y', 2)}\n >>> with Profiler() as prof:\n ... get(dsk, 'z')\n 22\n\n >>> prof.results # doctest: +SKIP\n [('y', (add, 'x', 10), 1435352238.48039, 1435352238.480655, 140285575100160),\n ('z', (mul, 'y', 2), 1435352238.480657, 1435352238.480803, 140285566707456)]\n\n These results can be visualized in a bokeh plot using the ``visualize``\n method. Note that this requires bokeh to be installed.\n\n >>> prof.visualize() # doctest: +SKIP\n\n You can activate the profiler globally\n\n >>> prof.register() # doctest: +SKIP\n\n If you use the profiler globally you will need to clear out old results\n manually.\n\n >>> prof.clear()\n\n \"\"\"\n\n def __init__(self):\n self._results = {}\n self.results = []\n self._dsk = {}\n\n def __enter__(self):\n self.clear()\n return super().__enter__()\n\n def _start(self, dsk):\n self._dsk.update(dsk)\n\n def _pretask(self, key, dsk, state):\n start = default_timer()\n self._results[key] = (key, dsk[key], start)\n\n def _posttask(self, key, value, dsk, state, id):\n end = default_timer()\n self._results[key] += (end, id)\n\n def _finish(self, dsk, state, failed):\n results = dict((k, v) for k, v in self._results.items() if len(v) == 5)\n self.results += list(starmap(TaskData, results.values()))\n self._results.clear()\n\n def _plot(self, **kwargs):\n from .profile_visualize import plot_tasks\n\n return plot_tasks(self.results, self._dsk, **kwargs)\n\n def visualize(self, **kwargs):\n \"\"\"Visualize the profiling run in a bokeh plot.\n\n See also\n --------\n dask.diagnostics.profile_visualize.visualize\n \"\"\"\n from .profile_visualize import visualize\n\n return visualize(self, **kwargs)\n\n def clear(self):\n \"\"\"Clear out old results from profiler\"\"\"\n self._results.clear()\n del self.results[:]\n self._dsk = {}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile.py_ResourceData_ResourceProfiler.visualize.return.visualize_self_kwargs_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile.py_ResourceData_ResourceProfiler.visualize.return.visualize_self_kwargs_", "embedding": null, "metadata": {"file_path": "dask/diagnostics/profile.py", "file_name": "profile.py", "file_type": "text/x-python", "category": "implementation", "start_line": 106, "end_line": 210, "span_ids": ["impl:3", "ResourceProfiler", "ResourceProfiler._is_running", "ResourceProfiler.__enter__", "ResourceProfiler.__exit__", "ResourceProfiler._start_collect", "ResourceProfiler._stop_collect", "ResourceProfiler:3", "ResourceProfiler.clear", "ResourceProfiler.visualize", "ResourceProfiler._finish", "ResourceProfiler.__init__", "ResourceProfiler.close", "ResourceProfiler._plot", "ResourceProfiler._start"], "tokens": 685}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "ResourceData = namedtuple(\"ResourceData\", (\"time\", \"mem\", \"cpu\"))\n\n\nclass ResourceProfiler(Callback):\n \"\"\"A profiler for resource use.\n\n Records the following each timestep\n 1. Time in seconds since the epoch\n 2. Memory usage in MB\n 3. % CPU usage\n\n Examples\n --------\n\n >>> from operator import add, mul\n >>> from dask.threaded import get\n >>> dsk = {'x': 1, 'y': (add, 'x', 10), 'z': (mul, 'y', 2)}\n >>> with ResourceProfiler() as prof: # doctest: +SKIP\n ... get(dsk, 'z')\n 22\n\n These results can be visualized in a bokeh plot using the ``visualize``\n method. Note that this requires bokeh to be installed.\n\n >>> prof.visualize() # doctest: +SKIP\n\n You can activate the profiler globally\n\n >>> prof.register() # doctest: +SKIP\n\n If you use the profiler globally you will need to clear out old results\n manually.\n\n >>> prof.clear() # doctest: +SKIP\n\n Note that when used as a context manager data will be collected throughout\n the duration of the enclosed block. In contrast, when registered globally\n data will only be collected while a dask scheduler is active.\n \"\"\"\n\n def __init__(self, dt=1):\n self._dt = dt\n self._entered = False\n self._tracker = None\n self.results = []\n\n def _is_running(self):\n return self._tracker is not None and self._tracker.is_alive()\n\n def _start_collect(self):\n if not self._is_running():\n self._tracker = _Tracker(self._dt)\n self._tracker.start()\n self._tracker.parent_conn.send(\"collect\")\n\n def _stop_collect(self):\n if self._is_running():\n self._tracker.parent_conn.send(\"send_data\")\n self.results.extend(starmap(ResourceData, self._tracker.parent_conn.recv()))\n\n def __enter__(self):\n self._entered = True\n self.clear()\n self._start_collect()\n return super().__enter__()\n\n def __exit__(self, *args):\n self._entered = False\n self._stop_collect()\n self.close()\n super().__exit__(*args)\n\n def _start(self, dsk):\n self._start_collect()\n\n def _finish(self, dsk, state, failed):\n if not self._entered:\n self._stop_collect()\n\n def close(self):\n \"\"\"Shutdown the resource tracker process\"\"\"\n if self._is_running():\n self._tracker.shutdown()\n self._tracker = None\n\n __del__ = close\n\n def clear(self):\n self.results = []\n\n def _plot(self, **kwargs):\n from .profile_visualize import plot_resources\n\n return plot_resources(self.results, **kwargs)\n\n def visualize(self, **kwargs):\n \"\"\"Visualize the profiling run in a bokeh plot.\n\n See also\n --------\n dask.diagnostics.profile_visualize.visualize\n \"\"\"\n from .profile_visualize import visualize\n\n return visualize(self, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile.py__Tracker__Tracker._update_pids.return._self_parent_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile.py__Tracker__Tracker._update_pids.return._self_parent_", "embedding": null, "metadata": {"file_path": "dask/diagnostics/profile.py", "file_name": "profile.py", "file_type": "text/x-python", "category": "implementation", "start_line": 212, "end_line": 231, "span_ids": ["_Tracker._update_pids", "_Tracker.shutdown", "_Tracker.__init__", "_Tracker"], "tokens": 140}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Tracker(Process):\n \"\"\"Background process for tracking resource usage\"\"\"\n\n def __init__(self, dt=1):\n Process.__init__(self)\n self.daemon = True\n self.dt = dt\n self.parent_pid = current_process().pid\n self.parent_conn, self.child_conn = Pipe()\n\n def shutdown(self):\n if not self.parent_conn.closed:\n self.parent_conn.send(\"shutdown\")\n self.parent_conn.close()\n self.join()\n\n def _update_pids(self, pid):\n return [self.parent] + [\n p for p in self.parent.children() if p.pid != pid and p.status() != \"zombie\"\n ]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile.py__Tracker.run_CacheData.namedtuple_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile.py__Tracker.run_CacheData.namedtuple_", "embedding": null, "metadata": {"file_path": "dask/diagnostics/profile.py", "file_name": "profile.py", "file_type": "text/x-python", "category": "implementation", "start_line": 233, "end_line": 274, "span_ids": ["_Tracker.run", "impl:5"], "tokens": 272}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Tracker(Process):\n\n def run(self):\n\n psutil = import_required(\n \"psutil\", \"Tracking resource usage requires `psutil` to be installed\"\n )\n self.parent = psutil.Process(self.parent_pid)\n\n pid = current_process()\n data = []\n while True:\n try:\n msg = self.child_conn.recv()\n except KeyboardInterrupt:\n continue\n if msg == \"shutdown\":\n break\n elif msg == \"collect\":\n ps = self._update_pids(pid)\n while not data or not self.child_conn.poll():\n tic = default_timer()\n mem = cpu = 0\n for p in ps:\n try:\n mem2 = p.memory_info().rss\n cpu2 = p.cpu_percent()\n except Exception: # could be a few different exceptions\n pass\n else:\n # Only increment if both were successful\n mem += mem2\n cpu += cpu2\n data.append((tic, mem / 1e6, cpu))\n sleep(self.dt)\n elif msg == \"send_data\":\n self.child_conn.send(data)\n data = []\n self.child_conn.close()\n\n\nCacheData = namedtuple(\n \"CacheData\", (\"key\", \"task\", \"metric\", \"cache_time\", \"free_time\")\n)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile.py_CacheProfiler_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile.py_CacheProfiler_", "embedding": null, "metadata": {"file_path": "dask/diagnostics/profile.py", "file_name": "profile.py", "file_type": "text/x-python", "category": "implementation", "start_line": 278, "end_line": 384, "span_ids": ["CacheProfiler", "CacheProfiler._posttask", "CacheProfiler._finish", "CacheProfiler._plot", "CacheProfiler.__init__", "CacheProfiler.visualize", "CacheProfiler.clear", "CacheProfiler.__enter__", "CacheProfiler._start"], "tokens": 870}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class CacheProfiler(Callback):\n \"\"\"A profiler for dask execution at the scheduler cache level.\n\n Records the following information for each task:\n 1. Key\n 2. Task\n 3. Size metric\n 4. Cache entry time in seconds since the epoch\n 5. Cache exit time in seconds since the epoch\n\n Examples\n --------\n\n >>> from operator import add, mul\n >>> from dask.threaded import get\n >>> from dask.diagnostics import CacheProfiler\n >>> dsk = {'x': 1, 'y': (add, 'x', 10), 'z': (mul, 'y', 2)}\n >>> with CacheProfiler() as prof:\n ... get(dsk, 'z')\n 22\n\n >>> prof.results # doctest: +SKIP\n [CacheData('y', (add, 'x', 10), 1, 1435352238.48039, 1435352238.480655),\n CacheData('z', (mul, 'y', 2), 1, 1435352238.480657, 1435352238.480803)]\n\n The default is to count each task (``metric`` is 1 for all tasks). Other\n functions may used as a metric instead through the ``metric`` keyword. For\n example, the ``nbytes`` function found in ``cachey`` can be used to measure\n the number of bytes in the cache.\n\n >>> from cachey import nbytes # doctest: +SKIP\n >>> with CacheProfiler(metric=nbytes) as prof: # doctest: +SKIP\n ... get(dsk, 'z')\n\n The profiling results can be visualized in a bokeh plot using the\n ``visualize`` method. Note that this requires bokeh to be installed.\n\n >>> prof.visualize() # doctest: +SKIP\n\n You can activate the profiler globally\n\n >>> prof.register() # doctest: +SKIP\n\n If you use the profiler globally you will need to clear out old results\n manually.\n\n >>> prof.clear()\n\n \"\"\"\n\n def __init__(self, metric=None, metric_name=None):\n self.clear()\n self._metric = metric if metric else lambda value: 1\n if metric_name:\n self._metric_name = metric_name\n elif metric:\n self._metric_name = metric.__name__\n else:\n self._metric_name = \"count\"\n\n def __enter__(self):\n self.clear()\n return super().__enter__()\n\n def _start(self, dsk):\n self._dsk.update(dsk)\n if not self._start_time:\n self._start_time = default_timer()\n\n def _posttask(self, key, value, dsk, state, id):\n t = default_timer()\n self._cache[key] = (self._metric(value), t)\n for k in state[\"released\"].intersection(self._cache):\n metric, start = self._cache.pop(k)\n self.results.append(CacheData(k, dsk[k], metric, start, t))\n\n def _finish(self, dsk, state, failed):\n t = default_timer()\n for k, (metric, start) in self._cache.items():\n self.results.append(CacheData(k, dsk[k], metric, start, t))\n self._cache.clear()\n\n def _plot(self, **kwargs):\n from .profile_visualize import plot_cache\n\n return plot_cache(\n self.results, self._dsk, self._start_time, self._metric_name, **kwargs\n )\n\n def visualize(self, **kwargs):\n \"\"\"Visualize the profiling run in a bokeh plot.\n\n See also\n --------\n dask.diagnostics.profile_visualize.visualize\n \"\"\"\n from .profile_visualize import visualize\n\n return visualize(self, **kwargs)\n\n def clear(self):\n \"\"\"Clear out old results from profiler\"\"\"\n self.results = []\n self._cache = {}\n self._dsk = {}\n self._start_time = None", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile_visualize.py_random_unquote.return.expr": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile_visualize.py_random_unquote.return.expr", "embedding": null, "metadata": {"file_path": "dask/diagnostics/profile_visualize.py", "file_name": "profile_visualize.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 25, "span_ids": ["imports", "unquote"], "tokens": 174}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import random\nfrom bisect import bisect_left\nfrom distutils.version import LooseVersion\nfrom itertools import cycle\nfrom operator import itemgetter, add\n\nfrom ..utils import funcname, import_required, apply\nfrom ..core import istask\n\n\n_BOKEH_MISSING_MSG = \"Diagnostics plots require `bokeh` to be installed\"\n_TOOLZ_MISSING_MSG = \"Diagnostics plots require `toolz` to be installed\"\n\n\ndef unquote(expr):\n if istask(expr):\n if expr[0] in (tuple, list, set):\n return expr[0](map(unquote, expr[1]))\n elif (\n expr[0] == dict\n and isinstance(expr[1], list)\n and isinstance(expr[1][0], list)\n ):\n return dict(map(unquote, expr[1]))\n return expr", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile_visualize.py_pprint_task_pprint_task.if_istask_task_.else_.try_.except_TypeError_.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile_visualize.py_pprint_task_pprint_task.if_istask_task_.else_.try_.except_TypeError_.return._", "embedding": null, "metadata": {"file_path": "dask/diagnostics/profile_visualize.py", "file_name": "profile_visualize.py", "file_type": "text/x-python", "category": "implementation", "start_line": 28, "end_line": 117, "span_ids": ["pprint_task"], "tokens": 727}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def pprint_task(task, keys, label_size=60):\n \"\"\"Return a nicely formatted string for a task.\n\n Parameters\n ----------\n task:\n Value within dask graph to render as text\n keys: iterable\n List of keys within dask graph\n label_size: int (optional)\n Maximum size of output label, defaults to 60\n\n Examples\n --------\n >>> from operator import add, mul\n >>> dsk = {'a': 1,\n ... 'b': 2,\n ... 'c': (add, 'a', 'b'),\n ... 'd': (add, (mul, 'a', 'b'), 'c'),\n ... 'e': (sum, ['a', 'b', 5]),\n ... 'f': (add,),\n ... 'g': []}\n\n >>> pprint_task(dsk['c'], dsk)\n 'add(_, _)'\n >>> pprint_task(dsk['d'], dsk)\n 'add(mul(_, _), _)'\n >>> pprint_task(dsk['e'], dsk)\n 'sum([_, _, *])'\n >>> pprint_task(dsk['f'], dsk)\n 'add()'\n >>> pprint_task(dsk['g'], dsk)\n '[]'\n \"\"\"\n if istask(task):\n func = task[0]\n if func is apply:\n head = funcname(task[1])\n tail = \")\"\n args = unquote(task[2]) if len(task) > 2 else ()\n kwargs = unquote(task[3]) if len(task) > 3 else {}\n else:\n if hasattr(func, \"funcs\"):\n head = \"(\".join(funcname(f) for f in func.funcs)\n tail = \")\" * len(func.funcs)\n else:\n head = funcname(task[0])\n tail = \")\"\n args = task[1:]\n kwargs = {}\n if args or kwargs:\n label_size2 = int(\n (label_size - len(head) - len(tail)) // (len(args) + len(kwargs))\n )\n pprint = lambda t: pprint_task(t, keys, label_size2)\n if args:\n if label_size2 > 5:\n args = \", \".join(pprint(t) for t in args)\n else:\n args = \"...\"\n else:\n args = \"\"\n if kwargs:\n if label_size2 > 5:\n kwargs = \", \" + \", \".join(\n \"{0}={1}\".format(k, pprint(v)) for k, v in sorted(kwargs.items())\n )\n else:\n kwargs = \", ...\"\n else:\n kwargs = \"\"\n return \"{0}({1}{2}{3}\".format(head, args, kwargs, tail)\n elif isinstance(task, list):\n if not task:\n return \"[]\"\n elif len(task) > 3:\n result = pprint_task(task[:3], keys, label_size)\n return result[:-1] + \", ...]\"\n else:\n label_size2 = int((label_size - 2 - 2 * len(task)) // len(task))\n args = \", \".join(pprint_task(t, keys, label_size2) for t in task)\n return \"[{0}]\".format(args)\n else:\n try:\n if task in keys:\n return \"_\"\n else:\n return \"*\"\n except TypeError:\n return \"*\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile_visualize.py_get_colors_get_colors.return._color_lookup_n_for_n_in": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile_visualize.py_get_colors_get_colors.return._color_lookup_n_for_n_in", "embedding": null, "metadata": {"file_path": "dask/diagnostics/profile_visualize.py", "file_name": "profile_visualize.py", "file_type": "text/x-python", "category": "implementation", "start_line": 120, "end_line": 146, "span_ids": ["get_colors"], "tokens": 259}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def get_colors(palette, funcs):\n \"\"\"Get a dict mapping funcs to colors from palette.\n\n Parameters\n ----------\n palette : string\n Name of the bokeh palette to use, must be a member of\n bokeh.palettes.all_palettes.\n funcs : iterable\n Iterable of function names\n \"\"\"\n palettes = import_required(\"bokeh.palettes\", _BOKEH_MISSING_MSG)\n tz = import_required(\"tlz\", _TOOLZ_MISSING_MSG)\n\n unique_funcs = list(sorted(tz.unique(funcs)))\n n_funcs = len(unique_funcs)\n palette_lookup = palettes.all_palettes[palette]\n keys = list(sorted(palette_lookup.keys()))\n index = keys[min(bisect_left(keys, n_funcs), len(keys) - 1)]\n palette = palette_lookup[index]\n # Some bokeh palettes repeat colors, we want just the unique set\n palette = list(tz.unique(palette))\n if len(palette) > n_funcs:\n # Consistently shuffle palette - prevents just using low-range\n random.Random(42).shuffle(palette)\n color_lookup = dict(zip(unique_funcs, cycle(palette)))\n return [color_lookup[n] for n in funcs]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile_visualize.py_visualize__get_figure_keywords.return.o": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile_visualize.py_visualize__get_figure_keywords.return.o", "embedding": null, "metadata": {"file_path": "dask/diagnostics/profile_visualize.py", "file_name": "profile_visualize.py", "file_type": "text/x-python", "category": "implementation", "start_line": 149, "end_line": 220, "span_ids": ["_get_figure_keywords", "visualize"], "tokens": 528}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def visualize(profilers, file_path=None, show=True, save=True, **kwargs):\n \"\"\"Visualize the results of profiling in a bokeh plot.\n\n If multiple profilers are passed in, the plots are stacked vertically.\n\n Parameters\n ----------\n profilers : profiler or list\n Profiler or list of profilers.\n file_path : string, optional\n Name of the plot output file.\n show : boolean, optional\n If True (default), the plot is opened in a browser.\n save : boolean, optional\n If True (default), the plot is saved to disk.\n **kwargs\n Other keyword arguments, passed to bokeh.figure. These will override\n all defaults set by visualize.\n\n Returns\n -------\n The completed bokeh plot object.\n \"\"\"\n bp = import_required(\"bokeh.plotting\", _BOKEH_MISSING_MSG)\n import bokeh\n\n if LooseVersion(bokeh.__version__) >= \"0.12.10\":\n from bokeh.io import state\n\n in_notebook = state.curstate().notebook\n else:\n from bokeh.io import _state\n\n in_notebook = _state._notebook\n\n if not in_notebook:\n file_path = file_path or \"profile.html\"\n bp.output_file(file_path)\n\n if not isinstance(profilers, list):\n profilers = [profilers]\n figs = [prof._plot(**kwargs) for prof in profilers]\n # Stack the plots\n if len(figs) == 1:\n p = figs[0]\n else:\n top = figs[0]\n for f in figs[1:]:\n f.x_range = top.x_range\n f.title = None\n f.min_border_top = 20\n f.plot_height -= 30\n for f in figs[:-1]:\n f.xaxis.axis_label = None\n f.min_border_bottom = 20\n f.plot_height -= 30\n for f in figs:\n f.min_border_left = 75\n f.min_border_right = 75\n p = bp.gridplot([[f] for f in figs])\n if show:\n bp.show(p)\n if file_path and save:\n bp.save(p)\n return p\n\n\ndef _get_figure_keywords():\n bp = import_required(\"bokeh.plotting\", _BOKEH_MISSING_MSG)\n o = bp.Figure.properties()\n o.add(\"tools\")\n return o", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile_visualize.py_plot_tasks_plot_tasks.hover.p_select_HoverTool_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile_visualize.py_plot_tasks_plot_tasks.hover.p_select_HoverTool_", "embedding": null, "metadata": {"file_path": "dask/diagnostics/profile_visualize.py", "file_name": "profile_visualize.py", "file_type": "text/x-python", "category": "implementation", "start_line": 223, "end_line": 309, "span_ids": ["plot_tasks"], "tokens": 717}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def plot_tasks(results, dsk, palette=\"Viridis\", label_size=60, **kwargs):\n \"\"\"Visualize the results of profiling in a bokeh plot.\n\n Parameters\n ----------\n results : sequence\n Output of Profiler.results\n dsk : dict\n The dask graph being profiled.\n palette : string, optional\n Name of the bokeh palette to use, must be a member of\n bokeh.palettes.all_palettes.\n label_size: int (optional)\n Maximum size of output labels in plot, defaults to 60\n **kwargs\n Other keyword arguments, passed to bokeh.figure. These will override\n all defaults set by visualize.\n\n Returns\n -------\n The completed bokeh plot object.\n \"\"\"\n bp = import_required(\"bokeh.plotting\", _BOKEH_MISSING_MSG)\n from bokeh.models import HoverTool\n\n tz = import_required(\"tlz\", _TOOLZ_MISSING_MSG)\n\n defaults = dict(\n title=\"Profile Results\",\n tools=\"hover,save,reset,xwheel_zoom,xpan\",\n toolbar_location=\"above\",\n plot_width=800,\n plot_height=300,\n )\n defaults.update((k, v) for (k, v) in kwargs.items() if k in _get_figure_keywords())\n\n if results:\n keys, tasks, starts, ends, ids = zip(*results)\n\n id_group = tz.groupby(itemgetter(4), results)\n timings = dict(\n (k, [i.end_time - i.start_time for i in v]) for (k, v) in id_group.items()\n )\n id_lk = dict(\n (t[0], n)\n for (n, t) in enumerate(\n sorted(timings.items(), key=itemgetter(1), reverse=True)\n )\n )\n\n left = min(starts)\n right = max(ends)\n\n p = bp.figure(\n y_range=[str(i) for i in range(len(id_lk))],\n x_range=[0, right - left],\n **defaults\n )\n\n data = {}\n data[\"width\"] = width = [e - s for (s, e) in zip(starts, ends)]\n data[\"x\"] = [w / 2 + s - left for (w, s) in zip(width, starts)]\n data[\"y\"] = [id_lk[i] + 1 for i in ids]\n data[\"function\"] = funcs = [pprint_task(i, dsk, label_size) for i in tasks]\n data[\"color\"] = get_colors(palette, funcs)\n data[\"key\"] = [str(i) for i in keys]\n\n source = bp.ColumnDataSource(data=data)\n\n p.rect(\n source=source,\n x=\"x\",\n y=\"y\",\n height=1,\n width=\"width\",\n color=\"color\",\n line_color=\"gray\",\n )\n else:\n p = bp.figure(y_range=[str(i) for i in range(8)], x_range=[0, 10], **defaults)\n p.grid.grid_line_color = None\n p.axis.axis_line_color = None\n p.axis.major_tick_line_color = None\n p.yaxis.axis_label = \"Worker ID\"\n p.xaxis.axis_label = \"Time (s)\"\n\n hover = p.select(HoverTool)\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile_visualize.py_plot_tasks.hover.tooltips_plot_tasks.return.p": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile_visualize.py_plot_tasks.hover.tooltips_plot_tasks.return.p", "embedding": null, "metadata": {"file_path": "dask/diagnostics/profile_visualize.py", "file_name": "profile_visualize.py", "file_type": "text/x-python", "category": "implementation", "start_line": 310, "end_line": 322, "span_ids": ["plot_tasks"], "tokens": 159}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def plot_tasks(results, dsk, palette=\"Viridis\", label_size=60, **kwargs):\n # ... other code\n hover.tooltips = \"\"\"\n
\n Key: \n @key\n
\n
\n Task: \n @function\n
\n \"\"\"\n hover.point_policy = \"follow_mouse\"\n\n return p", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile_visualize.py_plot_resources_fix_bounds.return.start_max_end_start_m": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile_visualize.py_plot_resources_fix_bounds.return.start_max_end_start_m", "embedding": null, "metadata": {"file_path": "dask/diagnostics/profile_visualize.py", "file_name": "profile_visualize.py", "file_type": "text/x-python", "category": "implementation", "start_line": 325, "end_line": 405, "span_ids": ["fix_bounds", "plot_resources"], "tokens": 613}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def plot_resources(results, palette=\"Viridis\", **kwargs):\n \"\"\"Plot resource usage in a bokeh plot.\n\n Parameters\n ----------\n results : sequence\n Output of ResourceProfiler.results\n palette : string, optional\n Name of the bokeh palette to use, must be a member of\n bokeh.palettes.all_palettes.\n **kwargs\n Other keyword arguments, passed to bokeh.figure. These will override\n all defaults set by plot_resources.\n\n Returns\n -------\n The completed bokeh plot object.\n \"\"\"\n bp = import_required(\"bokeh.plotting\", _BOKEH_MISSING_MSG)\n import bokeh\n from bokeh import palettes\n from bokeh.models import LinearAxis, Range1d\n\n defaults = dict(\n title=\"Profile Results\",\n tools=\"save,reset,xwheel_zoom,xpan\",\n toolbar_location=\"above\",\n plot_width=800,\n plot_height=300,\n )\n defaults.update((k, v) for (k, v) in kwargs.items() if k in _get_figure_keywords())\n if results:\n t, mem, cpu = zip(*results)\n left, right = min(t), max(t)\n t = [i - left for i in t]\n p = bp.figure(\n y_range=fix_bounds(0, max(cpu), 100),\n x_range=fix_bounds(0, right - left, 1),\n **defaults\n )\n else:\n t = mem = cpu = []\n p = bp.figure(y_range=(0, 100), x_range=(0, 1), **defaults)\n colors = palettes.all_palettes[palette][6]\n p.line(\n t,\n cpu,\n color=colors[0],\n line_width=4,\n **{\n \"legend_label\"\n if LooseVersion(bokeh.__version__) >= \"1.4\"\n else \"legend\": \"% CPU\"\n }\n )\n p.yaxis.axis_label = \"% CPU\"\n p.extra_y_ranges = {\n \"memory\": Range1d(\n *fix_bounds(min(mem) if mem else 0, max(mem) if mem else 100, 100)\n )\n }\n p.line(\n t,\n mem,\n color=colors[2],\n y_range_name=\"memory\",\n line_width=4,\n **{\n \"legend_label\"\n if LooseVersion(bokeh.__version__) >= \"1.4\"\n else \"legend\": \"Memory\"\n }\n )\n p.add_layout(LinearAxis(y_range_name=\"memory\", axis_label=\"Memory (MB)\"), \"right\")\n p.xaxis.axis_label = \"Time (s)\"\n return p\n\n\ndef fix_bounds(start, end, min_span):\n \"\"\"Adjust end point to ensure span of at least `min_span`\"\"\"\n return start, max(end, start + min_span)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile_visualize.py_plot_cache_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile_visualize.py_plot_cache_", "embedding": null, "metadata": {"file_path": "dask/diagnostics/profile_visualize.py", "file_name": "profile_visualize.py", "file_type": "text/x-python", "category": "implementation", "start_line": 408, "end_line": 489, "span_ids": ["plot_cache"], "tokens": 692}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def plot_cache(\n results, dsk, start_time, metric_name, palette=\"Viridis\", label_size=60, **kwargs\n):\n \"\"\"Visualize the results of profiling in a bokeh plot.\n\n Parameters\n ----------\n results : sequence\n Output of CacheProfiler.results\n dsk : dict\n The dask graph being profiled.\n start_time : float\n Start time of the profile.\n metric_name : string\n Metric used to measure cache size\n palette : string, optional\n Name of the bokeh palette to use, must be a member of\n bokeh.palettes.all_palettes.\n label_size: int (optional)\n Maximum size of output labels in plot, defaults to 60\n **kwargs\n Other keyword arguments, passed to bokeh.figure. These will override\n all defaults set by visualize.\n\n Returns\n -------\n The completed bokeh plot object.\n \"\"\"\n bp = import_required(\"bokeh.plotting\", _BOKEH_MISSING_MSG)\n from bokeh.models import HoverTool\n\n tz = import_required(\"tlz\", _TOOLZ_MISSING_MSG)\n\n defaults = dict(\n title=\"Profile Results\",\n tools=\"hover,save,reset,wheel_zoom,xpan\",\n toolbar_location=\"above\",\n plot_width=800,\n plot_height=300,\n )\n defaults.update((k, v) for (k, v) in kwargs.items() if k in _get_figure_keywords())\n\n if results:\n starts, ends = list(zip(*results))[3:]\n tics = list(sorted(tz.unique(starts + ends)))\n groups = tz.groupby(lambda d: pprint_task(d[1], dsk, label_size), results)\n data = {}\n for k, vals in groups.items():\n cnts = dict.fromkeys(tics, 0)\n for v in vals:\n cnts[v.cache_time] += v.metric\n cnts[v.free_time] -= v.metric\n data[k] = [0] + list(tz.accumulate(add, tz.pluck(1, sorted(cnts.items()))))\n\n tics = [0] + [i - start_time for i in tics]\n p = bp.figure(x_range=[0, max(tics)], **defaults)\n\n for (key, val), color in zip(data.items(), get_colors(palette, data.keys())):\n p.line(\n \"x\",\n \"y\",\n line_color=color,\n line_width=3,\n source=bp.ColumnDataSource(\n {\"x\": tics, \"y\": val, \"label\": [key for i in val]}\n ),\n )\n\n else:\n p = bp.figure(y_range=[0, 10], x_range=[0, 10], **defaults)\n p.yaxis.axis_label = \"Cache Size ({0})\".format(metric_name)\n p.xaxis.axis_label = \"Time (s)\"\n\n hover = p.select(HoverTool)\n hover.tooltips = \"\"\"\n
\n Task: \n @label\n
\n \"\"\"\n return p", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/progress.py_sys_format_time.if_h_.else_.return._0_4_1f_s_format_s_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/progress.py_sys_format_time.if_h_.else_.return._0_4_1f_s_format_s_", "embedding": null, "metadata": {"file_path": "dask/diagnostics/progress.py", "file_name": "progress.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 25, "span_ids": ["imports", "format_time"], "tokens": 191}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import sys\nimport threading\nimport time\nfrom timeit import default_timer\n\nfrom ..callbacks import Callback\nfrom ..utils import ignoring\n\n\ndef format_time(t):\n \"\"\"Format seconds into a human readable form.\n\n >>> format_time(10.4)\n '10.4s'\n >>> format_time(1000.4)\n '16min 40.4s'\n \"\"\"\n m, s = divmod(t, 60)\n h, m = divmod(m, 60)\n if h:\n return \"{0:2.0f}hr {1:2.0f}min {2:4.1f}s\".format(h, m, s)\n elif m:\n return \"{0:2.0f}min {1:4.1f}s\".format(m, s)\n else:\n return \"{0:4.1f}s\".format(s)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/progress.py_ProgressBar_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/progress.py_ProgressBar_", "embedding": null, "metadata": {"file_path": "dask/diagnostics/progress.py", "file_name": "progress.py", "file_type": "text/x-python", "category": "implementation", "start_line": 28, "end_line": 138, "span_ids": ["ProgressBar._finish", "ProgressBar._pretask", "ProgressBar._timer_func", "ProgressBar._update_bar", "ProgressBar", "ProgressBar.__init__", "ProgressBar._draw_bar", "ProgressBar._start"], "tokens": 881}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ProgressBar(Callback):\n \"\"\"A progress bar for dask.\n\n Parameters\n ----------\n minimum : int, optional\n Minimum time threshold in seconds before displaying a progress bar.\n Default is 0 (always display)\n width : int, optional\n Width of the bar\n dt : float, optional\n Update resolution in seconds, default is 0.1 seconds\n\n Examples\n --------\n\n Below we create a progress bar with a minimum threshold of 1 second before\n displaying. For cheap computations nothing is shown:\n\n >>> with ProgressBar(minimum=1.0): # doctest: +SKIP\n ... out = some_fast_computation.compute()\n\n But for expensive computations a full progress bar is displayed:\n\n >>> with ProgressBar(minimum=1.0): # doctest: +SKIP\n ... out = some_slow_computation.compute()\n [########################################] | 100% Completed | 10.4 s\n\n The duration of the last computation is available as an attribute\n\n >>> pbar = ProgressBar() # doctest: +SKIP\n >>> with pbar: # doctest: +SKIP\n ... out = some_computation.compute()\n [########################################] | 100% Completed | 10.4 s\n >>> pbar.last_duration # doctest: +SKIP\n 10.4\n\n You can also register a progress bar so that it displays for all\n computations:\n\n >>> pbar = ProgressBar() # doctest: +SKIP\n >>> pbar.register() # doctest: +SKIP\n >>> some_slow_computation.compute() # doctest: +SKIP\n [########################################] | 100% Completed | 10.4 s\n \"\"\"\n\n def __init__(self, minimum=0, width=40, dt=0.1, out=None):\n if out is None:\n out = sys.stdout\n self._minimum = minimum\n self._width = width\n self._dt = dt\n self._file = out\n self.last_duration = 0\n\n def _start(self, dsk):\n self._state = None\n self._start_time = default_timer()\n # Start background thread\n self._running = True\n self._timer = threading.Thread(target=self._timer_func)\n self._timer.daemon = True\n self._timer.start()\n\n def _pretask(self, key, dsk, state):\n self._state = state\n self._file.flush()\n\n def _finish(self, dsk, state, errored):\n self._running = False\n self._timer.join()\n elapsed = default_timer() - self._start_time\n self.last_duration = elapsed\n if elapsed < self._minimum:\n return\n if not errored:\n self._draw_bar(1, elapsed)\n else:\n self._update_bar(elapsed)\n self._file.write(\"\\n\")\n self._file.flush()\n\n def _timer_func(self):\n \"\"\"Background thread for updating the progress bar\"\"\"\n while self._running:\n elapsed = default_timer() - self._start_time\n if elapsed > self._minimum:\n self._update_bar(elapsed)\n time.sleep(self._dt)\n\n def _update_bar(self, elapsed):\n s = self._state\n if not s:\n self._draw_bar(0, elapsed)\n return\n ndone = len(s[\"finished\"])\n ntasks = sum(len(s[k]) for k in [\"ready\", \"waiting\", \"running\"]) + ndone\n if ndone < ntasks:\n self._draw_bar(ndone / ntasks if ntasks else 0, elapsed)\n\n def _draw_bar(self, frac, elapsed):\n bar = \"#\" * int(self._width * frac)\n percent = int(100 * frac)\n elapsed = format_time(elapsed)\n msg = \"\\r[{0:<{1}}] | {2}% Completed | {3}\".format(\n bar, self._width, percent, elapsed\n )\n with ignoring(ValueError):\n self._file.write(msg)\n self._file.flush()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_from_operator_import_add__ignore_abc_warning.pytest_mark_filterwarning": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_from_operator_import_add__ignore_abc_warning.pytest_mark_filterwarning", "embedding": null, "metadata": {"file_path": "dask/diagnostics/tests/test_profiler.py", "file_name": "test_profiler.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 30, "span_ids": ["imports"], "tokens": 237}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from operator import add, mul\nimport os\nfrom time import sleep\nfrom distutils.version import LooseVersion\n\nfrom dask.diagnostics import Profiler, ResourceProfiler, CacheProfiler\nfrom dask.threaded import get\nfrom dask.utils import ignoring, tmpfile, apply\nimport pytest\n\ntry:\n import bokeh\nexcept ImportError:\n bokeh = None\ntry:\n import psutil\nexcept ImportError:\n psutil = None\n\n\nprof = Profiler()\n\n\ndsk = {\"a\": 1, \"b\": 2, \"c\": (add, \"a\", \"b\"), \"d\": (mul, \"a\", \"b\"), \"e\": (mul, \"c\", \"d\")}\n\ndsk2 = {\"a\": 1, \"b\": 2, \"c\": (lambda a, b: sleep(0.1) or (a + b), \"a\", \"b\")}\n# Bokeh, via jinja https://github.com/pallets/jinja/issues/998\nignore_abc_warning = pytest.mark.filterwarnings(\n \"ignore:Using or importing:DeprecationWarning\"\n)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_profiler_test_profiler.assert_prof_results_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_profiler_test_profiler.assert_prof_results_", "embedding": null, "metadata": {"file_path": "dask/diagnostics/tests/test_profiler.py", "file_name": "test_profiler.py", "file_type": "text/x-python", "category": "test", "start_line": 33, "end_line": 43, "span_ids": ["test_profiler"], "tokens": 120}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_profiler():\n with prof:\n out = get(dsk, \"e\")\n assert out == 6\n prof_data = sorted(prof.results, key=lambda d: d.key)\n keys = [i.key for i in prof_data]\n assert keys == [\"c\", \"d\", \"e\"]\n tasks = [i.task for i in prof_data]\n assert tasks == [(add, \"a\", \"b\"), (mul, \"a\", \"b\"), (mul, \"c\", \"d\")]\n prof.clear()\n assert prof.results == []", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_profiler_works_under_error_test_two_gets.assert_len_prof_results_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_profiler_works_under_error_test_two_gets.assert_len_prof_results_", "embedding": null, "metadata": {"file_path": "dask/diagnostics/tests/test_profiler.py", "file_name": "test_profiler.py", "file_type": "text/x-python", "category": "test", "start_line": 46, "end_line": 74, "span_ids": ["test_two_gets", "test_profiler_works_under_error"], "tokens": 226}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_profiler_works_under_error():\n div = lambda x, y: x / y\n dsk = {\"x\": (div, 1, 1), \"y\": (div, \"x\", 2), \"z\": (div, \"y\", 0)}\n\n with ignoring(ZeroDivisionError):\n with prof:\n get(dsk, \"z\")\n\n assert all(len(v) == 5 for v in prof.results)\n assert len(prof.results) == 2\n\n\ndef test_two_gets():\n with prof:\n get(dsk, \"e\")\n n = len(prof.results)\n\n dsk2 = {\"x\": (add, 1, 2), \"y\": (add, \"x\", \"x\")}\n\n with prof:\n get(dsk2, \"y\")\n m = len(prof.results)\n\n with prof:\n get(dsk, \"e\")\n get(dsk2, \"y\")\n get(dsk, \"e\")\n\n assert len(prof.results) == n + m + n", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_resource_profiler_test_resource_profiler.assert_len_rprof_results_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_resource_profiler_test_resource_profiler.assert_len_rprof_results_", "embedding": null, "metadata": {"file_path": "dask/diagnostics/tests/test_profiler.py", "file_name": "test_profiler.py", "file_type": "text/x-python", "category": "test", "start_line": 77, "end_line": 98, "span_ids": ["test_resource_profiler"], "tokens": 157}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not psutil\")\ndef test_resource_profiler():\n with ResourceProfiler(dt=0.01) as rprof:\n get(dsk2, \"c\")\n results = rprof.results\n assert len(results) > 0\n assert all(isinstance(i, tuple) and len(i) == 3 for i in results)\n\n # Tracker stopped on exit\n assert not rprof._is_running()\n\n rprof.clear()\n assert rprof.results == []\n\n # Close is idempotent\n rprof.close()\n assert not rprof._is_running()\n\n # Restarts tracker if already closed\n with rprof:\n get(dsk2, \"c\")\n assert len(rprof.results) > 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_resource_profiler_multiple_gets_test_resource_profiler_multiple_gets.assert_not_rprof__is_runn": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_resource_profiler_multiple_gets_test_resource_profiler_multiple_gets.assert_not_rprof__is_runn", "embedding": null, "metadata": {"file_path": "dask/diagnostics/tests/test_profiler.py", "file_name": "test_profiler.py", "file_type": "text/x-python", "category": "test", "start_line": 101, "end_line": 121, "span_ids": ["test_resource_profiler_multiple_gets"], "tokens": 173}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not psutil\")\ndef test_resource_profiler_multiple_gets():\n with ResourceProfiler(dt=0.01) as rprof:\n get(dsk2, \"c\")\n assert len(rprof.results) == 0\n get(dsk2, \"c\")\n results = rprof.results\n assert all(isinstance(i, tuple) and len(i) == 3 for i in results)\n\n rprof.clear()\n rprof.register()\n get(dsk2, \"c\")\n assert len(rprof.results) > 0\n get(dsk2, \"c\")\n rprof.unregister()\n\n results = rprof.results\n assert all(isinstance(i, tuple) and len(i) == 3 for i in results)\n\n rprof.close()\n assert not rprof._is_running()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_cache_profiler_test_cache_profiler.assert_CacheProfiler_metr": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_cache_profiler_test_cache_profiler.assert_CacheProfiler_metr", "embedding": null, "metadata": {"file_path": "dask/diagnostics/tests/test_profiler.py", "file_name": "test_profiler.py", "file_type": "text/x-python", "category": "test", "start_line": 124, "end_line": 146, "span_ids": ["test_cache_profiler"], "tokens": 176}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_cache_profiler():\n with CacheProfiler() as cprof:\n get(dsk2, \"c\")\n results = cprof.results\n assert all(isinstance(i, tuple) and len(i) == 5 for i in results)\n\n cprof.clear()\n assert cprof.results == []\n\n tics = [0]\n\n def nbytes(res):\n tics[0] += 1\n return tics[0]\n\n with CacheProfiler(nbytes) as cprof:\n get(dsk2, \"c\")\n\n results = cprof.results\n assert tics[-1] == len(results)\n assert tics[-1] == results[-1].metric\n assert cprof._metric_name == \"nbytes\"\n assert CacheProfiler(metric=nbytes, metric_name=\"foo\")._metric_name == \"foo\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_register_test_register.try_.finally_.prof_unregister_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_register_test_register.try_.finally_.prof_unregister_", "embedding": null, "metadata": {"file_path": "dask/diagnostics/tests/test_profiler.py", "file_name": "test_profiler.py", "file_type": "text/x-python", "category": "test", "start_line": 149, "end_line": 169, "span_ids": ["test_register"], "tokens": 118}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"profiler\",\n [\n Profiler,\n pytest.param(\n lambda: ResourceProfiler(dt=0.01), marks=pytest.mark.skipif(\"not psutil\")\n ),\n CacheProfiler,\n ],\n)\ndef test_register(profiler):\n prof = profiler()\n try:\n prof.register()\n get(dsk2, \"c\")\n n = len(prof.results)\n assert n > 0\n get(dsk2, \"c\")\n assert len(prof.results) > n\n finally:\n prof.unregister()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_unquote_test_unquote.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_unquote_test_unquote.None_2", "embedding": null, "metadata": {"file_path": "dask/diagnostics/tests/test_profiler.py", "file_name": "test_profiler.py", "file_type": "text/x-python", "category": "test", "start_line": 172, "end_line": 187, "span_ids": ["test_unquote"], "tokens": 197}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not bokeh\")\n@ignore_abc_warning\ndef test_unquote():\n from dask.diagnostics.profile_visualize import unquote\n\n t = {\"a\": 1, \"b\": 2, \"c\": 3}\n task_dask = (dict, [[\"a\", 1], [\"b\", 2], [\"c\", 3]])\n assert unquote(task_dask) == t\n\n t = {\"a\": [1, 2, 3], \"b\": 2, \"c\": 3}\n task_dask = (dict, [[\"a\", [1, 2, 3]], [\"b\", 2], [\"c\", 3]])\n assert unquote(task_dask) == t\n\n t = [1, 2, 3]\n task_dask = [1, 2, 3]\n assert unquote(task_dask) == t", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_pprint_task_test_pprint_task.None_9": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_pprint_task_test_pprint_task.None_9", "embedding": null, "metadata": {"file_path": "dask/diagnostics/tests/test_profiler.py", "file_name": "test_profiler.py", "file_type": "text/x-python", "category": "test", "start_line": 190, "end_line": 220, "span_ids": ["test_pprint_task"], "tokens": 483}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not bokeh\")\n@ignore_abc_warning\ndef test_pprint_task():\n from dask.diagnostics.profile_visualize import pprint_task\n\n keys = set([\"a\", \"b\", \"c\", \"d\", \"e\"])\n assert pprint_task((add, \"a\", 1), keys) == \"add(_, *)\"\n assert pprint_task((add, (add, \"a\", 1)), keys) == \"add(add(_, *))\"\n res = \"sum([*, _, add(_, *)])\"\n assert pprint_task((sum, [1, \"b\", (add, \"a\", 1)]), keys) == res\n assert pprint_task((sum, (1, 2, 3, 4, 5, 6, 7)), keys) == \"sum(*)\"\n\n assert len(pprint_task((sum, list(keys) * 100), keys)) < 100\n assert pprint_task((sum, list(keys) * 100), keys) == \"sum([_, _, _, ...])\"\n assert (\n pprint_task((sum, [1, 2, (sum, [\"a\", 4]), 5, 6] * 100), keys)\n == \"sum([*, *, sum([_, *]), ...])\"\n )\n assert (\n pprint_task((sum, [1, 2, (sum, [\"a\", (sum, [1, 2, 3])]), 5, 6]), keys)\n == \"sum([*, *, sum([_, sum(...)]), ...])\"\n )\n\n # With kwargs\n def foo(w, x, y=(), z=3):\n return w + x + sum(y) + z\n\n task = (apply, foo, (tuple, [\"a\", \"b\"]), (dict, [[\"y\", [\"a\", \"b\"]], [\"z\", \"c\"]]))\n assert pprint_task(task, keys) == \"foo(_, _, y=[_, _], z=_)\"\n task = (apply, foo, (tuple, [\"a\", \"b\"]), (dict, [[\"y\", [\"a\", 1]], [\"z\", 1]]))\n assert pprint_task(task, keys) == \"foo(_, _, y=[_, *], z=*)\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_check_title_test_profiler_plot.assert_len_record_0": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_check_title_test_profiler_plot.assert_len_record_0", "embedding": null, "metadata": {"file_path": "dask/diagnostics/tests/test_profiler.py", "file_name": "test_profiler.py", "file_type": "text/x-python", "category": "test", "start_line": 223, "end_line": 251, "span_ids": ["test_profiler_plot", "check_title"], "tokens": 208}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def check_title(p, title):\n # bokeh 0.12 changed the title attribute to not a string\n return getattr(p.title, \"text\", p.title) == title\n\n\n@pytest.mark.skipif(\"not bokeh\")\n@ignore_abc_warning\ndef test_profiler_plot():\n with prof:\n get(dsk, \"e\")\n p = prof.visualize(\n plot_width=500,\n plot_height=300,\n tools=\"hover\",\n title=\"Not the default\",\n show=False,\n save=False,\n )\n assert p.plot_width == 500\n assert p.plot_height == 300\n assert len(p.tools) == 1\n assert isinstance(p.tools[0], bokeh.models.HoverTool)\n assert check_title(p, \"Not the default\")\n # Test empty, checking for errors\n prof.clear()\n with pytest.warns(None) as record:\n prof.visualize(show=False, save=False)\n\n assert len(record) == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_resource_profiler_plot_test_resource_profiler_plot.for_results_in_1_0.None_6": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_resource_profiler_plot_test_resource_profiler_plot.for_results_in_1_0.None_6", "embedding": null, "metadata": {"file_path": "dask/diagnostics/tests/test_profiler.py", "file_name": "test_profiler.py", "file_type": "text/x-python", "category": "test", "start_line": 254, "end_line": 287, "span_ids": ["test_resource_profiler_plot"], "tokens": 299}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not bokeh\")\n@pytest.mark.skipif(\"not psutil\")\n@ignore_abc_warning\ndef test_resource_profiler_plot():\n with ResourceProfiler(dt=0.01) as rprof:\n get(dsk2, \"c\")\n p = rprof.visualize(\n plot_width=500,\n plot_height=300,\n tools=\"hover\",\n title=\"Not the default\",\n show=False,\n save=False,\n )\n assert p.plot_width == 500\n assert p.plot_height == 300\n assert len(p.tools) == 1\n assert isinstance(p.tools[0], bokeh.models.HoverTool)\n assert check_title(p, \"Not the default\")\n\n # Test with empty and one point, checking for errors\n rprof.clear()\n for results in [[], [(1.0, 0, 0)]]:\n rprof.results = results\n with pytest.warns(None) as record:\n p = rprof.visualize(show=False, save=False)\n assert len(record) == 0\n # Check bounds are valid\n assert p.x_range.start == 0\n assert p.x_range.end == 1\n assert p.y_range.start == 0\n assert p.y_range.end == 100\n assert p.extra_y_ranges[\"memory\"].start == 0\n assert p.extra_y_ranges[\"memory\"].end == 100", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_cache_profiler_plot_test_cache_profiler_plot.assert_len_record_0": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_cache_profiler_plot_test_cache_profiler_plot.assert_len_record_0", "embedding": null, "metadata": {"file_path": "dask/diagnostics/tests/test_profiler.py", "file_name": "test_profiler.py", "file_type": "text/x-python", "category": "test", "start_line": 290, "end_line": 314, "span_ids": ["test_cache_profiler_plot"], "tokens": 200}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not bokeh\")\n@ignore_abc_warning\ndef test_cache_profiler_plot():\n with CacheProfiler(metric_name=\"non-standard\") as cprof:\n get(dsk, \"e\")\n p = cprof.visualize(\n plot_width=500,\n plot_height=300,\n tools=\"hover\",\n title=\"Not the default\",\n show=False,\n save=False,\n )\n assert p.plot_width == 500\n assert p.plot_height == 300\n assert len(p.tools) == 1\n assert isinstance(p.tools[0], bokeh.models.HoverTool)\n assert check_title(p, \"Not the default\")\n assert p.axis[1].axis_label == \"Cache Size (non-standard)\"\n # Test empty, checking for errors\n cprof.clear()\n with pytest.warns(None) as record:\n cprof.visualize(show=False, save=False)\n\n assert len(record) == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_plot_multiple_test_plot_multiple.visualize_prof_rprof_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_plot_multiple_test_plot_multiple.visualize_prof_rprof_", "embedding": null, "metadata": {"file_path": "dask/diagnostics/tests/test_profiler.py", "file_name": "test_profiler.py", "file_type": "text/x-python", "category": "test", "start_line": 317, "end_line": 344, "span_ids": ["test_plot_multiple"], "tokens": 293}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not bokeh\")\n@pytest.mark.skipif(\"not psutil\")\n@ignore_abc_warning\ndef test_plot_multiple():\n from dask.diagnostics.profile_visualize import visualize\n\n with ResourceProfiler(dt=0.01) as rprof:\n with prof:\n get(dsk2, \"c\")\n p = visualize(\n [prof, rprof], label_size=50, title=\"Not the default\", show=False, save=False\n )\n bokeh_version = LooseVersion(bokeh.__version__)\n if bokeh_version >= \"1.1.0\":\n figures = [r[0] for r in p.children[1].children]\n elif bokeh_version >= \"0.12.0\":\n figures = [r.children[0] for r in p.children[1].children]\n else:\n figures = [r[0] for r in p.children]\n assert len(figures) == 2\n assert check_title(figures[0], \"Not the default\")\n assert figures[0].xaxis[0].axis_label is None\n assert figures[1].title is None\n assert figures[1].xaxis[0].axis_label == \"Time (s)\"\n # Test empty, checking for errors\n prof.clear()\n rprof.clear()\n visualize([prof, rprof], show=False, save=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_saves_file_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_saves_file_", "embedding": null, "metadata": {"file_path": "dask/diagnostics/tests/test_profiler.py", "file_name": "test_profiler.py", "file_type": "text/x-python", "category": "test", "start_line": 347, "end_line": 392, "span_ids": ["test_get_colors", "test_saves_file"], "tokens": 380}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not bokeh\")\n@ignore_abc_warning\ndef test_saves_file():\n with tmpfile(\"html\") as fn:\n with prof:\n get(dsk, \"e\")\n # Run just to see that it doesn't error\n prof.visualize(show=False, file_path=fn)\n\n assert os.path.exists(fn)\n with open(fn) as f:\n assert \"html\" in f.read().lower()\n\n\n@pytest.mark.skipif(\"not bokeh\")\n@ignore_abc_warning\ndef test_get_colors():\n from dask.diagnostics.profile_visualize import get_colors\n from bokeh.palettes import Blues5, Viridis\n\n # 256-color palettes were added in bokeh 1.4.0\n if LooseVersion(bokeh.__version__) >= \"1.4.0\":\n from bokeh.palettes import Blues256\n\n funcs = list(range(11))\n cmap = get_colors(\"Blues\", funcs)\n assert set(cmap) < set(Blues256)\n assert len(set(cmap)) == 11\n\n funcs = list(range(5))\n cmap = get_colors(\"Blues\", funcs)\n lk = dict(zip(funcs, Blues5))\n assert cmap == [lk[i] for i in funcs]\n\n funcs = [0, 1, 0, 1, 0, 1]\n cmap = get_colors(\"BrBG\", funcs)\n assert len(set(cmap)) == 2\n\n funcs = list(range(100))\n cmap = get_colors(\"Viridis\", funcs)\n assert len(set(cmap)) == 100\n\n funcs = list(range(300))\n cmap = get_colors(\"Viridis\", funcs)\n assert len(set(cmap)) == len(set(Viridis[256]))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_progress.py_from_operator_import_add__test_no_tasks.check_bar_completed_capsy": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_progress.py_from_operator_import_add__test_no_tasks.check_bar_completed_capsy", "embedding": null, "metadata": {"file_path": "dask/diagnostics/tests/test_progress.py", "file_name": "test_progress.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 90, "span_ids": ["test_register", "imports", "test_minimum_time", "test_array_compute", "test_progressbar", "test_clean_exit", "test_no_tasks", "check_bar_completed", "test_format_time"], "tokens": 653}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from operator import add, mul\n\nimport pytest\n\nfrom dask.callbacks import Callback\nfrom dask.local import get_sync\nfrom dask.diagnostics import ProgressBar\nfrom dask.diagnostics.progress import format_time\nfrom dask.threaded import get as get_threaded\n\n\ndsk = {\"a\": 1, \"b\": 2, \"c\": (add, \"a\", \"b\"), \"d\": (mul, \"a\", \"b\"), \"e\": (mul, \"c\", \"d\")}\n\n\ndef check_bar_completed(capsys, width=40):\n out, err = capsys.readouterr()\n assert out.count(\"100% Completed\") == 1\n bar, percent, time = [i.strip() for i in out.split(\"\\r\")[-1].split(\"|\")]\n assert bar == \"[\" + \"#\" * width + \"]\"\n assert percent == \"100% Completed\"\n\n\ndef test_array_compute(capsys):\n from dask.array import ones\n\n data = ones((100, 100), dtype=\"f4\", chunks=(100, 100))\n with ProgressBar():\n out = data.sum().compute()\n assert out == 10000\n check_bar_completed(capsys)\n\n\ndef test_progressbar(capsys):\n with ProgressBar():\n out = get_threaded(dsk, \"e\")\n assert out == 6\n check_bar_completed(capsys)\n with ProgressBar(width=20):\n out = get_threaded(dsk, \"e\")\n check_bar_completed(capsys, 20)\n\n\ndef test_minimum_time(capsys):\n with ProgressBar(1.0):\n out = get_threaded(dsk, \"e\")\n out, err = capsys.readouterr()\n assert out == \"\" and err == \"\"\n\n\n@pytest.mark.parametrize(\"get\", [get_threaded, get_sync])\ndef test_clean_exit(get):\n dsk = {\"a\": (lambda: 1 / 0,)}\n try:\n with ProgressBar() as pbar:\n get_threaded(dsk, \"a\")\n except ZeroDivisionError:\n pass\n assert not pbar._running\n assert not pbar._timer.is_alive()\n\n\ndef test_format_time():\n assert format_time(1.4) == \" 1.4s\"\n assert format_time(10.4) == \"10.4s\"\n assert format_time(100.4) == \" 1min 40.4s\"\n assert format_time(1000.4) == \"16min 40.4s\"\n assert format_time(10000.4) == \" 2hr 46min 40.4s\"\n\n\ndef test_register(capsys):\n try:\n p = ProgressBar()\n p.register()\n\n assert Callback.active\n\n get_threaded(dsk, \"e\")\n check_bar_completed(capsys)\n\n p.unregister()\n\n assert not Callback.active\n finally:\n Callback.active.clear()\n\n\ndef test_no_tasks(capsys):\n with ProgressBar():\n get_threaded({\"x\": 1}, \"x\")\n check_bar_completed(capsys)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_progress.py_test_with_cache_test_with_cache.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_progress.py_test_with_cache_test_with_cache.None_1", "embedding": null, "metadata": {"file_path": "dask/diagnostics/tests/test_progress.py", "file_name": "test_progress.py", "file_type": "text/x-python", "category": "test", "start_line": 93, "end_line": 109, "span_ids": ["test_with_cache"], "tokens": 148}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_with_cache(capsys):\n cachey = pytest.importorskip(\"cachey\")\n from dask.cache import Cache\n\n c = cachey.Cache(10000)\n cc = Cache(c)\n\n with cc:\n with ProgressBar():\n assert get_threaded({\"x\": (mul, 1, 2)}, \"x\") == 2\n check_bar_completed(capsys)\n assert c.data[\"x\"] == 2\n\n with cc:\n with ProgressBar():\n assert get_threaded({\"x\": (mul, 1, 2), \"y\": (mul, \"x\", 3)}, \"y\") == 6\n check_bar_completed(capsys)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_progress.py_test_with_alias_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_progress.py_test_with_alias_", "embedding": null, "metadata": {"file_path": "dask/diagnostics/tests/test_progress.py", "file_name": "test_progress.py", "file_type": "text/x-python", "category": "test", "start_line": 112, "end_line": 132, "span_ids": ["test_store_time", "test_with_alias"], "tokens": 133}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_with_alias(capsys):\n dsk = {\n \"a\": 1,\n \"b\": 2,\n \"c\": (add, \"a\", \"b\"),\n \"d\": (add, 1, 2),\n \"e\": \"d\",\n \"f\": (mul, \"e\", \"c\"),\n }\n with ProgressBar():\n get_threaded(dsk, \"f\")\n check_bar_completed(capsys)\n\n\ndef test_store_time():\n p = ProgressBar()\n with p:\n get_threaded({\"x\": 1}, \"x\")\n\n assert isinstance(p.last_duration, float)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/distributed.py__flake8_noqa_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/distributed.py__flake8_noqa_", "embedding": null, "metadata": {"file_path": "dask/distributed.py", "file_name": "distributed.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 12, "span_ids": ["docstring"], "tokens": 104}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# flake8: noqa\ntry:\n from distributed import *\nexcept ImportError as e:\n msg = (\n \"Dask's distributed scheduler is not installed.\\n\\n\"\n \"Please either conda or pip install dask distributed:\\n\\n\"\n \" conda install dask distributed # either conda install\\n\"\n ' python -m pip install \"dask[distributed]\" --upgrade # or python -m pip install'\n )\n raise ImportError(msg) from e", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dot.py_re_task_label.if_any_has_sub_tasks_i_f.else_.return.head": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dot.py_re_task_label.if_any_has_sub_tasks_i_f.else_.return.head", "embedding": null, "metadata": {"file_path": "dask/dot.py", "file_name": "dot.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 42, "span_ids": ["imports", "task_label"], "tokens": 267}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import re\nimport os\nfrom functools import partial\n\nfrom .core import istask, get_dependencies, ishashable\nfrom .utils import funcname, import_required, key_split, apply\n\n\ngraphviz = import_required(\n \"graphviz\",\n \"Drawing dask graphs requires the \"\n \"`graphviz` python library and the \"\n \"`graphviz` system library to be \"\n \"installed.\",\n)\n\n\ndef task_label(task):\n \"\"\"Label for a task on a dot graph.\n\n Examples\n --------\n >>> from operator import add\n >>> task_label((add, 1, 2))\n 'add'\n >>> task_label((add, (add, 1, 2), 3))\n 'add(...)'\n \"\"\"\n func = task[0]\n if func is apply:\n func = task[1]\n if hasattr(func, \"funcs\"):\n if len(func.funcs) > 1:\n return \"{0}(...)\".format(funcname(func.funcs[0]))\n else:\n head = funcname(func.funcs[0])\n else:\n head = funcname(func)\n if any(has_sub_tasks(i) for i in task[1:]):\n return \"{0}(...)\".format(head)\n else:\n return head", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dot.py_has_sub_tasks__UUIDPAT.re_compile_0_9a_z_8_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dot.py_has_sub_tasks__UUIDPAT.re_compile_0_9a_z_8_", "embedding": null, "metadata": {"file_path": "dask/dot.py", "file_name": "dot.py", "file_type": "text/x-python", "category": "implementation", "start_line": 45, "end_line": 63, "span_ids": ["name", "has_sub_tasks", "impl:3"], "tokens": 147}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def has_sub_tasks(task):\n \"\"\"Returns True if the task has sub tasks\"\"\"\n if istask(task):\n return True\n elif isinstance(task, list):\n return any(has_sub_tasks(i) for i in task)\n else:\n return False\n\n\ndef name(x):\n try:\n return str(hash(x))\n except TypeError:\n return str(hash(str(x)))\n\n\n_HASHPAT = re.compile(\"([0-9a-z]{32})\")\n_UUIDPAT = re.compile(\"([0-9a-z]{8}-[0-9a-z]{4}-[0-9a-z]{4}-[0-9a-z]{4}-[0-9a-z]{12})\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dot.py_label_label.return.s": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dot.py_label_label.return.s", "embedding": null, "metadata": {"file_path": "dask/dot.py", "file_name": "dot.py", "file_type": "text/x-python", "category": "implementation", "start_line": 66, "end_line": 104, "span_ids": ["label"], "tokens": 289}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def label(x, cache=None):\n \"\"\"\n\n >>> label('x')\n 'x'\n\n >>> label(('x', 1))\n \"('x', 1)\"\n\n >>> from hashlib import md5\n >>> x = 'x-%s-hello' % md5(b'1234').hexdigest()\n >>> x\n 'x-81dc9bdb52d04dc20036dbd8313ed055-hello'\n\n >>> label(x)\n 'x-#-hello'\n\n >>> from uuid import uuid1\n >>> x = 'x-%s-hello' % uuid1()\n >>> x # doctest: +SKIP\n 'x-4c1a3d7e-0b45-11e6-8334-54ee75105593-hello'\n\n >>> label(x)\n 'x-#-hello'\n \"\"\"\n s = str(x)\n for pattern in (_HASHPAT, _UUIDPAT):\n m = re.search(pattern, s)\n if m is not None:\n for h in m.groups():\n if cache is not None:\n n = cache.get(h, len(cache))\n label = \"#{0}\".format(n)\n # cache will be overwritten destructively\n cache[h] = n\n else:\n label = \"#\"\n s = s.replace(h, label)\n return s", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dot.py_box_label_box_label.if_isinstance_key_tuple_.else_.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dot.py_box_label_box_label.if_isinstance_key_tuple_.else_.return._", "embedding": null, "metadata": {"file_path": "dask/dot.py", "file_name": "dot.py", "file_type": "text/x-python", "category": "implementation", "start_line": 107, "end_line": 125, "span_ids": ["box_label"], "tokens": 119}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def box_label(key, verbose=False):\n \"\"\"Label boxes in graph by chunk index\n\n >>> box_label(('x', 1, 2, 3))\n '(1, 2, 3)'\n >>> box_label(('x', 123))\n '123'\n >>> box_label('x')\n ''\n \"\"\"\n if isinstance(key, tuple):\n key = key[1:]\n if len(key) == 1:\n [key] = key\n return str(key)\n elif verbose:\n return str(key)\n else:\n return \"\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dot.py_to_graphviz_to_graphviz.return.g": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dot.py_to_graphviz_to_graphviz.return.g", "embedding": null, "metadata": {"file_path": "dask/dot.py", "file_name": "dot.py", "file_type": "text/x-python", "category": "implementation", "start_line": 128, "end_line": 196, "span_ids": ["to_graphviz"], "tokens": 481}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def to_graphviz(\n dsk,\n data_attributes=None,\n function_attributes=None,\n rankdir=\"BT\",\n graph_attr=None,\n node_attr=None,\n edge_attr=None,\n collapse_outputs=False,\n verbose=False,\n **kwargs,\n):\n if data_attributes is None:\n data_attributes = {}\n if function_attributes is None:\n function_attributes = {}\n if graph_attr is None:\n graph_attr = {}\n\n graph_attr = graph_attr or {}\n graph_attr[\"rankdir\"] = rankdir\n graph_attr.update(kwargs)\n g = graphviz.Digraph(\n graph_attr=graph_attr, node_attr=node_attr, edge_attr=edge_attr\n )\n\n seen = set()\n connected = set()\n\n for k, v in dsk.items():\n k_name = name(k)\n if istask(v):\n func_name = name((k, \"function\")) if not collapse_outputs else k_name\n if collapse_outputs or func_name not in seen:\n seen.add(func_name)\n attrs = function_attributes.get(k, {}).copy()\n attrs.setdefault(\"label\", key_split(k))\n attrs.setdefault(\"shape\", \"circle\")\n g.node(func_name, **attrs)\n if not collapse_outputs:\n g.edge(func_name, k_name)\n connected.add(func_name)\n connected.add(k_name)\n\n for dep in get_dependencies(dsk, k):\n dep_name = name(dep)\n if dep_name not in seen:\n seen.add(dep_name)\n attrs = data_attributes.get(dep, {}).copy()\n attrs.setdefault(\"label\", box_label(dep, verbose))\n attrs.setdefault(\"shape\", \"box\")\n g.node(dep_name, **attrs)\n g.edge(dep_name, func_name)\n connected.add(dep_name)\n connected.add(func_name)\n\n elif ishashable(v) and v in dsk:\n v_name = name(v)\n g.edge(v_name, k_name)\n connected.add(v_name)\n connected.add(k_name)\n\n if (not collapse_outputs or k_name in connected) and k_name not in seen:\n seen.add(k_name)\n attrs = data_attributes.get(k, {}).copy()\n attrs.setdefault(\"label\", box_label(k, verbose))\n attrs.setdefault(\"shape\", \"box\")\n g.node(k_name, **attrs)\n return g", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dot.py_IPYTHON_IMAGE_FORMATS__get_display_cls.if_format_in_IPYTHON_NO_D.else_.raise_ValueError_Unknown": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dot.py_IPYTHON_IMAGE_FORMATS__get_display_cls.if_format_in_IPYTHON_NO_D.else_.raise_ValueError_Unknown", "embedding": null, "metadata": {"file_path": "dask/dot.py", "file_name": "dot.py", "file_type": "text/x-python", "category": "implementation", "start_line": 199, "end_line": 230, "span_ids": ["_get_display_cls", "impl:7"], "tokens": 263}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "IPYTHON_IMAGE_FORMATS = frozenset([\"jpeg\", \"png\"])\nIPYTHON_NO_DISPLAY_FORMATS = frozenset([\"dot\", \"pdf\"])\n\n\ndef _get_display_cls(format):\n \"\"\"\n Get the appropriate IPython display class for `format`.\n\n Returns `IPython.display.SVG` if format=='svg', otherwise\n `IPython.display.Image`.\n\n If IPython is not importable, return dummy function that swallows its\n arguments and returns None.\n \"\"\"\n dummy = lambda *args, **kwargs: None\n try:\n import IPython.display as display\n except ImportError:\n # Can't return a display object if no IPython.\n return dummy\n\n if format in IPYTHON_NO_DISPLAY_FORMATS:\n # IPython can't display this format natively, so just return None.\n return dummy\n elif format in IPYTHON_IMAGE_FORMATS:\n # Partially apply `format` so that `Image` and `SVG` supply a uniform\n # interface to the caller.\n return partial(display.Image, format=format)\n elif format == \"svg\":\n return display.SVG\n else:\n raise ValueError(\"Unknown format '%s' passed to `dot_graph`\" % format)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dot.py_dot_graph_dot_graph.return.graphviz_to_file_g_filen": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dot.py_dot_graph_dot_graph.return.graphviz_to_file_g_filen", "embedding": null, "metadata": {"file_path": "dask/dot.py", "file_name": "dot.py", "file_type": "text/x-python", "category": "implementation", "start_line": 233, "end_line": 272, "span_ids": ["dot_graph"], "tokens": 358}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def dot_graph(dsk, filename=\"mydask\", format=None, **kwargs):\n \"\"\"\n Render a task graph using dot.\n\n If `filename` is not None, write a file to disk with the specified name and extension.\n If no extension is specified, '.png' will be used by default.\n\n Parameters\n ----------\n dsk : dict\n The graph to display.\n filename : str or None, optional\n The name of the file to write to disk. If the provided `filename`\n doesn't include an extension, '.png' will be used by default.\n If `filename` is None, no file will be written, and we communicate\n with dot using only pipes. Default is 'mydask'.\n format : {'png', 'pdf', 'dot', 'svg', 'jpeg', 'jpg'}, optional\n Format in which to write output file. Default is 'png'.\n **kwargs\n Additional keyword arguments to forward to `to_graphviz`.\n\n Returns\n -------\n result : None or IPython.display.Image or IPython.display.SVG (See below.)\n\n Notes\n -----\n If IPython is installed, we return an IPython.display object in the\n requested format. If IPython is not installed, we just return None.\n\n We always return None if format is 'pdf' or 'dot', because IPython can't\n display these formats natively. Passing these formats with filename=None\n will not produce any useful output.\n\n See Also\n --------\n dask.dot.to_graphviz\n \"\"\"\n g = to_graphviz(dsk, **kwargs)\n return graphviz_to_file(g, filename, format)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dot.py_graphviz_to_file_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dot.py_graphviz_to_file_", "embedding": null, "metadata": {"file_path": "dask/dot.py", "file_name": "dot.py", "file_type": "text/x-python", "category": "implementation", "start_line": 275, "end_line": 304, "span_ids": ["graphviz_to_file"], "tokens": 222}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def graphviz_to_file(g, filename, format):\n fmts = [\".png\", \".pdf\", \".dot\", \".svg\", \".jpeg\", \".jpg\"]\n if format is None and any(filename.lower().endswith(fmt) for fmt in fmts):\n filename, format = os.path.splitext(filename)\n format = format[1:].lower()\n\n if format is None:\n format = \"png\"\n\n data = g.pipe(format=format)\n if not data:\n raise RuntimeError(\n \"Graphviz failed to properly produce an image. \"\n \"This probably means your installation of graphviz \"\n \"is missing png support. See: \"\n \"https://github.com/ContinuumIO/anaconda-issues/\"\n \"issues/485 for more information.\"\n )\n\n display_cls = _get_display_cls(format)\n\n if not filename:\n return display_cls(data=data)\n\n full_filename = \".\".join([filename, format])\n with open(full_filename, \"wb\") as f:\n f.write(data)\n\n return display_cls(filename=full_filename)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/hashing.py_binascii_hashers_append__hash_sha1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/hashing.py_binascii_hashers_append__hash_sha1", "embedding": null, "metadata": {"file_path": "dask/hashing.py", "file_name": "hashing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 69, "span_ids": ["_hash_sha1", "imports", "impl:19"], "tokens": 405}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import binascii\nimport hashlib\n\n\nhashers = [] # In decreasing performance order\n\n\n# Timings on a largish array:\n# - CityHash is 2x faster than MurmurHash\n# - xxHash is slightly slower than CityHash\n# - MurmurHash is 8x faster than SHA1\n# - SHA1 is significantly faster than all other hashlib algorithms\n\ntry:\n import cityhash # `python -m pip install cityhash`\nexcept ImportError:\n pass\nelse:\n # CityHash disabled unless the reference leak in\n # https://github.com/escherba/python-cityhash/pull/16\n # is fixed.\n if cityhash.__version__ >= \"0.2.2\":\n\n def _hash_cityhash(buf):\n \"\"\"\n Produce a 16-bytes hash of *buf* using CityHash.\n \"\"\"\n h = cityhash.CityHash128(buf)\n return h.to_bytes(16, \"little\")\n\n hashers.append(_hash_cityhash)\n\ntry:\n import xxhash # `python -m pip install xxhash`\nexcept ImportError:\n pass\nelse:\n\n def _hash_xxhash(buf):\n \"\"\"\n Produce a 8-bytes hash of *buf* using xxHash.\n \"\"\"\n return xxhash.xxh64(buf).digest()\n\n hashers.append(_hash_xxhash)\n\ntry:\n import mmh3 # `python -m pip install mmh3`\nexcept ImportError:\n pass\nelse:\n\n def _hash_murmurhash(buf):\n \"\"\"\n Produce a 16-bytes hash of *buf* using MurmurHash.\n \"\"\"\n return mmh3.hash_bytes(buf)\n\n hashers.append(_hash_murmurhash)\n\n\ndef _hash_sha1(buf):\n \"\"\"\n Produce a 20-bytes hash of *buf* using SHA1.\n \"\"\"\n return hashlib.sha1(buf).digest()\n\n\nhashers.append(_hash_sha1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/hashing.py_hash_buffer_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/hashing.py_hash_buffer_", "embedding": null, "metadata": {"file_path": "dask/hashing.py", "file_name": "hashing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 72, "end_line": 100, "span_ids": ["hash_buffer", "hash_buffer_hex"], "tokens": 202}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def hash_buffer(buf, hasher=None):\n \"\"\"\n Hash a bytes-like (buffer-compatible) object. This function returns\n a good quality hash but is not cryptographically secure. The fastest\n available algorithm is selected. A fixed-length bytes object is returned.\n \"\"\"\n if hasher is not None:\n try:\n return hasher(buf)\n except (TypeError, OverflowError):\n # Some hash libraries may have overly-strict type checking,\n # not accepting all buffers\n pass\n for hasher in hashers:\n try:\n return hasher(buf)\n except (TypeError, OverflowError):\n pass\n raise TypeError(\"unsupported type for hashing: %s\" % (type(buf),))\n\n\ndef hash_buffer_hex(buf, hasher=None):\n \"\"\"\n Same as hash_buffer, but returns its result in hex-encoded form.\n \"\"\"\n h = hash_buffer(buf, hasher)\n s = binascii.b2a_hex(h)\n return s.decode()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph._from_collection_HighLevelGraph._from_collection.return.cls_layers_deps_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph._from_collection_HighLevelGraph._from_collection.return.cls_layers_deps_", "embedding": null, "metadata": {"file_path": "dask/highlevelgraph.py", "file_name": "highlevelgraph.py", "file_type": "text/x-python", "category": "implementation", "start_line": 104, "end_line": 125, "span_ids": ["HighLevelGraph._from_collection"], "tokens": 173}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class HighLevelGraph(Mapping):\n\n @classmethod\n def _from_collection(cls, name, layer, collection):\n \"\"\" `from_collections` optimized for a single collection \"\"\"\n if is_dask_collection(collection):\n graph = collection.__dask_graph__()\n if isinstance(graph, HighLevelGraph):\n layers = graph.layers.copy()\n layers.update({name: layer})\n deps = graph.dependencies.copy()\n with ignoring(AttributeError):\n deps.update({name: set(collection.__dask_layers__())})\n else:\n try:\n [key] = collection.__dask_layers__()\n except AttributeError:\n key = id(graph)\n layers = {name: layer, key: graph}\n deps = {name: {key}, key: set()}\n else:\n raise TypeError(type(collection))\n\n return cls(layers, deps)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.from_collections_HighLevelGraph.from_collections.return.cls_layers_deps_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.from_collections_HighLevelGraph.from_collections.return.cls_layers_deps_", "embedding": null, "metadata": {"file_path": "dask/highlevelgraph.py", "file_name": "highlevelgraph.py", "file_type": "text/x-python", "category": "implementation", "start_line": 387, "end_line": 445, "span_ids": ["HighLevelGraph.from_collections"], "tokens": 489}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class HighLevelGraph(Mapping):\n\n @classmethod\n def from_collections(cls, name, layer, dependencies=()):\n \"\"\"Construct a HighLevelGraph from a new layer and a set of collections\n\n This constructs a HighLevelGraph in the common case where we have a single\n new layer and a set of old collections on which we want to depend.\n\n This pulls out the ``__dask_layers__()`` method of the collections if\n they exist, and adds them to the dependencies for this new layer. It\n also merges all of the layers from all of the dependent collections\n together into the new layers for this graph.\n\n Parameters\n ----------\n name : str\n The name of the new layer\n layer : Mapping\n The graph layer itself\n dependencies : List of Dask collections\n A lit of other dask collections (like arrays or dataframes) that\n have graphs themselves\n\n Examples\n --------\n\n In typical usage we make a new task layer, and then pass that layer\n along with all dependent collections to this method.\n\n >>> def add(self, other):\n ... name = 'add-' + tokenize(self, other)\n ... layer = {(name, i): (add, input_key, other)\n ... for i, input_key in enumerate(self.__dask_keys__())}\n ... graph = HighLevelGraph.from_collections(name, layer, dependencies=[self])\n ... return new_collection(name, graph)\n \"\"\"\n if len(dependencies) == 1:\n return cls._from_collection(name, layer, dependencies[0])\n layers = {name: layer}\n deps = {name: set()}\n for collection in toolz.unique(dependencies, key=id):\n if is_dask_collection(collection):\n graph = collection.__dask_graph__()\n if isinstance(graph, HighLevelGraph):\n layers.update(graph.layers)\n deps.update(graph.dependencies)\n with ignoring(AttributeError):\n deps[name] |= set(collection.__dask_layers__())\n else:\n try:\n [key] = collection.__dask_layers__()\n except AttributeError:\n key = id(graph)\n layers[key] = graph\n deps[name].add(key)\n deps[key] = set()\n else:\n raise TypeError(type(collection))\n\n return cls(layers, deps)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.validate_HighLevelGraph.validate.for_k_in_dep_key1_.if_self_dependencies_k_.raise_ValueError_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.validate_HighLevelGraph.validate.for_k_in_dep_key1_.if_self_dependencies_k_.raise_ValueError_", "embedding": null, "metadata": {"file_path": "dask/highlevelgraph.py", "file_name": "highlevelgraph.py", "file_type": "text/x-python", "category": "implementation", "start_line": 237, "end_line": 266, "span_ids": ["HighLevelGraph.validate"], "tokens": 225}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class HighLevelGraph(Mapping):\n\n def validate(self):\n # Check dependencies\n for layer_name, deps in self.dependencies.items():\n if layer_name not in self.layers:\n raise ValueError(\n f\"dependencies[{repr(layer_name)}] not found in layers\"\n )\n for dep in deps:\n if dep not in self.dependencies:\n raise ValueError(f\"{repr(dep)} not found in dependencies\")\n\n # Re-calculate all layer dependencies\n dependencies = compute_layer_dependencies(self.layers)\n\n # Check keys\n dep_key1 = set(self.dependencies.keys())\n dep_key2 = set(dependencies.keys())\n if dep_key1 != dep_key2:\n raise ValueError(\n f\"incorrect dependencies keys {repr(dep_key1)} \"\n f\"expected {repr(dep_key2)}\"\n )\n\n # Check values\n for k in dep_key1:\n if self.dependencies[k] != dependencies[k]:\n raise ValueError(\n f\"incorrect dependencies[{repr(k)}]: {repr(self.dependencies[k])} \"\n f\"expected {repr(dependencies[k])}\"\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_to_graphviz_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_to_graphviz_", "embedding": null, "metadata": {"file_path": "dask/highlevelgraph.py", "file_name": "highlevelgraph.py", "file_type": "text/x-python", "category": "implementation", "start_line": 269, "end_line": 308, "span_ids": ["to_graphviz"], "tokens": 232}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def to_graphviz(\n hg,\n data_attributes=None,\n function_attributes=None,\n rankdir=\"BT\",\n graph_attr={},\n node_attr=None,\n edge_attr=None,\n **kwargs,\n):\n from .dot import graphviz, name, label\n\n if data_attributes is None:\n data_attributes = {}\n if function_attributes is None:\n function_attributes = {}\n\n graph_attr = graph_attr or {}\n graph_attr[\"rankdir\"] = rankdir\n graph_attr.update(kwargs)\n g = graphviz.Digraph(\n graph_attr=graph_attr, node_attr=node_attr, edge_attr=edge_attr\n )\n\n cache = {}\n\n for k in hg.dependencies:\n k_name = name(k)\n attrs = data_attributes.get(k, {})\n attrs.setdefault(\"label\", label(k, cache=cache))\n attrs.setdefault(\"shape\", \"box\")\n g.node(k_name, **attrs)\n\n for k, deps in hg.dependencies.items():\n k_name = name(k)\n for dep in deps:\n dep_name = name(dep)\n g.edge(dep_name, k_name)\n return g", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/local.py___os": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/local.py___os", "embedding": null, "metadata": {"file_path": "dask/local.py", "file_name": "local.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 107, "span_ids": ["imports", "docstring"], "tokens": 884}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\"\nAsynchronous Shared-Memory Scheduler for Dask Graphs.\n\nThis scheduler coordinates several workers to execute tasks in a dask graph in\nparallel. It depends on an apply_async function as would be found in thread or\nprocess Pools and a corresponding Queue for worker-to-scheduler communication.\n\nIt tries to execute tasks in an order which maintains a small memory footprint\nthroughout execution. It does this by running tasks that allow us to release\ndata resources.\n\n\nTask Selection Policy\n=====================\n\nWhen we complete a task we add more data in to our set of available data; this\nnew data makes new tasks available. We preferentially choose tasks that were\njust made available in a last-in-first-out fashion. We implement this as a\nsimple stack. This results in more depth-first rather than breadth first\nbehavior which encourages us to process batches of data to completion before\nstarting in on new data when possible.\n\nWhen the addition of new data readies multiple tasks simultaneously we add\ntasks to the stack in sorted order so that tasks with greater keynames are run\nfirst. This can be handy to break ties in a predictable fashion.\n\n\nState\n=====\n\nMany functions pass around a ``state`` variable that holds the current state of\nthe computation. This variable consists of several other dictionaries and\nsets, explained below.\n\nConstant state\n--------------\n\n1. dependencies: {x: [a, b ,c]} a,b,c, must be run before x\n2. dependents: {a: [x, y]} a must run before x or y\n\nChanging state\n--------------\n\n### Data\n\n1. cache: available concrete data. {key: actual-data}\n2. released: data that we've seen, used, and released because it is no longer\n needed\n\n### Jobs\n\n1. ready: A fifo stack of ready-to-run tasks\n2. running: A set of tasks currently in execution\n3. finished: A set of finished tasks\n4. waiting: which tasks are still waiting on others :: {key: {keys}}\n Real-time equivalent of dependencies\n5. waiting_data: available data to yet-to-be-run-tasks :: {key: {keys}}\n Real-time equivalent of dependents\n\n\nExamples\n--------\n\n>>> import pprint # doctest: +SKIP\n>>> dsk = {'x': 1, 'y': 2, 'z': (inc, 'x'), 'w': (add, 'z', 'y')} # doctest: +SKIP\n>>> pprint.pprint(start_state_from_dask(dsk)) # doctest: +SKIP\n{'cache': {'x': 1, 'y': 2},\n 'dependencies': {'w': {'z', 'y'}, 'x': set(), 'y': set(), 'z': {'x'}},\n 'dependents': {'w': set(), 'x': {'z'}, 'y': {'w'}, 'z': {'w'}},\n 'finished': set(),\n 'ready': ['z'],\n 'released': set(),\n 'running': set(),\n 'waiting': {'w': {'z'}},\n 'waiting_data': {'x': {'z'}, 'y': {'w'}, 'z': {'w'}}}\n\nOptimizations\n=============\n\nWe build this scheduler with out-of-core array operations in mind. To this end\nwe have encoded some particular optimizations.\n\nCompute to release data\n-----------------------\n\nWhen we choose a new task to execute we often have many options. Policies at\nthis stage are cheap and can significantly impact performance. One could\nimagine policies that expose parallelism, drive towards a particular output,\netc..\n\nOur current policy is to run tasks that were most recently made available.\n\n\nInlining computations\n---------------------\n\nWe hold on to intermediate computations either in memory or on disk.\n\nFor very cheap computations that may emit new copies of the data, like\n``np.transpose`` or possibly even ``x + 1`` we choose not to store these as\nseparate pieces of data / tasks. Instead we combine them with the computations\nthat require them. This may result in repeated computation but saves\nsignificantly on space and computation complexity.\n\nSee the function ``inline_functions`` for more information.\n\"\"\"\nimport os", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/local.py_from_queue_import_Queue__DEBUG.False": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/local.py_from_queue_import_Queue__DEBUG.False", "embedding": null, "metadata": {"file_path": "dask/local.py", "file_name": "local.py", "file_type": "text/x-python", "category": "implementation", "start_line": 108, "end_line": 136, "span_ids": ["imports"], "tokens": 172}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from queue import Queue, Empty\n\nfrom . import config\nfrom .core import flatten, reverse_dict, get_dependencies, has_tasks, _execute_task\nfrom .order import order\nfrom .callbacks import unpack_callbacks, local_callbacks\nfrom .utils_test import add, inc # noqa: F401\n\n\nif os.name == \"nt\":\n # Python 3 windows Queue.get doesn't handle interrupts properly. To\n # workaround this we poll at a sufficiently large interval that it\n # shouldn't affect performance, but small enough that users trying to kill\n # an application shouldn't care.\n def queue_get(q):\n while True:\n try:\n return q.get(block=True, timeout=0.1)\n except Empty:\n pass\n\n\nelse:\n\n def queue_get(q):\n return q.get()\n\n\nDEBUG = False", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/local.py_start_state_from_dask_start_state_from_dask.return.state": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/local.py_start_state_from_dask_start_state_from_dask.return.state", "embedding": null, "metadata": {"file_path": "dask/local.py", "file_name": "local.py", "file_type": "text/x-python", "category": "implementation", "start_line": 139, "end_line": 198, "span_ids": ["start_state_from_dask"], "tokens": 568}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def start_state_from_dask(dsk, cache=None, sortkey=None):\n \"\"\"Start state from a dask\n\n Examples\n --------\n\n >>> dsk = {'x': 1, 'y': 2, 'z': (inc, 'x'), 'w': (add, 'z', 'y')} # doctest: +SKIP\n >>> from pprint import pprint # doctest: +SKIP\n >>> pprint(start_state_from_dask(dsk)) # doctest: +SKIP\n {'cache': {'x': 1, 'y': 2},\n 'dependencies': {'w': {'z', 'y'}, 'x': set(), 'y': set(), 'z': {'x'}},\n 'dependents': {'w': set(), 'x': {'z'}, 'y': {'w'}, 'z': {'w'}},\n 'finished': set(),\n 'ready': ['z'],\n 'released': set(),\n 'running': set(),\n 'waiting': {'w': {'z'}},\n 'waiting_data': {'x': {'z'}, 'y': {'w'}, 'z': {'w'}}}\n \"\"\"\n if sortkey is None:\n sortkey = order(dsk).get\n if cache is None:\n cache = config.get(\"cache\", None)\n if cache is None:\n cache = dict()\n data_keys = set()\n for k, v in dsk.items():\n if not has_tasks(dsk, v):\n cache[k] = v\n data_keys.add(k)\n\n dsk2 = dsk.copy()\n dsk2.update(cache)\n\n dependencies = {k: get_dependencies(dsk2, k) for k in dsk}\n waiting = {k: v.copy() for k, v in dependencies.items() if k not in data_keys}\n\n dependents = reverse_dict(dependencies)\n for a in cache:\n for b in dependents.get(a, ()):\n waiting[b].remove(a)\n waiting_data = dict((k, v.copy()) for k, v in dependents.items() if v)\n\n ready_set = set([k for k, v in waiting.items() if not v])\n ready = sorted(ready_set, key=sortkey, reverse=True)\n waiting = dict((k, v) for k, v in waiting.items() if v)\n\n state = {\n \"dependencies\": dependencies,\n \"dependents\": dependents,\n \"waiting\": waiting,\n \"waiting_data\": waiting_data,\n \"cache\": cache,\n \"ready\": ready,\n \"running\": set(),\n \"finished\": set(),\n \"released\": set(),\n }\n\n return state", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/local.py_None_2_release_data.if_delete_.del_state_cache_key_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/local.py_None_2_release_data.if_delete_.del_state_cache_key_", "embedding": null, "metadata": {"file_path": "dask/local.py", "file_name": "local.py", "file_type": "text/x-python", "category": "implementation", "start_line": 201, "end_line": 245, "span_ids": ["execute_task", "release_data", "start_state_from_dask"], "tokens": 235}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\"\nRunning tasks\n-------------\n\nWhen we execute tasks we both\n\n1. Perform the actual work of collecting the appropriate data and calling the function\n2. Manage administrative state to coordinate with the scheduler\n\"\"\"\n\n\ndef execute_task(key, task_info, dumps, loads, get_id, pack_exception):\n \"\"\"\n Compute task and handle all administration\n\n See Also\n --------\n _execute_task : actually execute task\n \"\"\"\n try:\n task, data = loads(task_info)\n result = _execute_task(task, data)\n id = get_id()\n result = dumps((result, id))\n failed = False\n except BaseException as e:\n result = pack_exception(e, dumps)\n failed = True\n return key, result, failed\n\n\ndef release_data(key, state, delete=True):\n \"\"\"Remove data from temporary storage\n\n See Also\n finish_task\n \"\"\"\n if key in state[\"waiting_data\"]:\n assert not state[\"waiting_data\"][key]\n del state[\"waiting_data\"][key]\n\n state[\"released\"].add(key)\n\n if delete:\n del state[\"cache\"][key]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/local.py_finish_task_finish_task.return.state": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/local.py_finish_task_finish_task.return.state", "embedding": null, "metadata": {"file_path": "dask/local.py", "file_name": "local.py", "file_type": "text/x-python", "category": "implementation", "start_line": 248, "end_line": 282, "span_ids": ["finish_task"], "tokens": 265}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def finish_task(\n dsk, key, state, results, sortkey, delete=True, release_data=release_data\n):\n \"\"\"\n Update execution state after a task finishes\n\n Mutates. This should run atomically (with a lock).\n \"\"\"\n for dep in sorted(state[\"dependents\"][key], key=sortkey, reverse=True):\n s = state[\"waiting\"][dep]\n s.remove(key)\n if not s:\n del state[\"waiting\"][dep]\n state[\"ready\"].append(dep)\n\n for dep in state[\"dependencies\"][key]:\n if dep in state[\"waiting_data\"]:\n s = state[\"waiting_data\"][dep]\n s.remove(key)\n if not s and dep not in results:\n if DEBUG:\n from chest.core import nbytes\n\n print(\n \"Key: %s\\tDep: %s\\t NBytes: %.2f\\t Release\"\n % (key, dep, sum(map(nbytes, state[\"cache\"].values()) / 1e6))\n )\n release_data(dep, state, delete=delete)\n elif delete and dep not in results:\n release_data(dep, state, delete=delete)\n\n state[\"finished\"].add(key)\n state[\"running\"].remove(key)\n\n return state", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/local.py_nested_get_identity.return.x": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/local.py_nested_get_identity.return.x", "embedding": null, "metadata": {"file_path": "dask/local.py", "file_name": "local.py", "file_type": "text/x-python", "category": "implementation", "start_line": 285, "end_line": 325, "span_ids": ["nested_get", "default_get_id", "identity", "reraise", "default_pack_exception"], "tokens": 204}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def nested_get(ind, coll):\n \"\"\"Get nested index from collection\n\n Examples\n --------\n\n >>> nested_get(1, 'abc')\n 'b'\n >>> nested_get([1, 0], 'abc')\n ('b', 'a')\n >>> nested_get([[1, 0], [0, 1]], 'abc')\n (('b', 'a'), ('a', 'b'))\n \"\"\"\n if isinstance(ind, list):\n return tuple([nested_get(i, coll) for i in ind])\n else:\n return coll[ind]\n\n\ndef default_get_id():\n \"\"\"Default get_id\"\"\"\n return None\n\n\ndef default_pack_exception(e, dumps):\n raise\n\n\ndef reraise(exc, tb=None):\n if exc.__traceback__ is not tb:\n raise exc.with_traceback(tb)\n raise exc\n\n\ndef identity(x):\n \"\"\"Identity function. Returns x.\n\n >>> identity(3)\n 3\n \"\"\"\n return x", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/local.py_None_3_get_async.dsk.dict_dsk_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/local.py_None_3_get_async.dsk.dict_dsk_", "embedding": null, "metadata": {"file_path": "dask/local.py", "file_name": "local.py", "file_type": "text/x-python", "category": "implementation", "start_line": 328, "end_line": 415, "span_ids": ["identity", "get_async"], "tokens": 617}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\"\nTask Selection\n--------------\n\nWe often have a choice among many tasks to run next. This choice is both\ncheap and can significantly impact performance.\n\nWe currently select tasks that have recently been made ready. We hope that\nthis first-in-first-out policy reduces memory footprint\n\"\"\"\n\n\"\"\"\n`get`\n-----\n\nThe main function of the scheduler. Get is the main entry point.\n\"\"\"\n\n\ndef get_async(\n apply_async,\n num_workers,\n dsk,\n result,\n cache=None,\n get_id=default_get_id,\n rerun_exceptions_locally=None,\n pack_exception=default_pack_exception,\n raise_exception=reraise,\n callbacks=None,\n dumps=identity,\n loads=identity,\n **kwargs\n):\n \"\"\"Asynchronous get function\n\n This is a general version of various asynchronous schedulers for dask. It\n takes a an apply_async function as found on Pool objects to form a more\n specific ``get`` method that walks through the dask array with parallel\n workers, avoiding repeat computation and minimizing memory use.\n\n Parameters\n ----------\n apply_async : function\n Asynchronous apply function as found on Pool or ThreadPool\n num_workers : int\n The number of active tasks we should have at any one time\n dsk : dict\n A dask dictionary specifying a workflow\n result : key or list of keys\n Keys corresponding to desired data\n cache : dict-like, optional\n Temporary storage of results\n get_id : callable, optional\n Function to return the worker id, takes no arguments. Examples are\n `threading.current_thread` and `multiprocessing.current_process`.\n rerun_exceptions_locally : bool, optional\n Whether to rerun failing tasks in local process to enable debugging\n (False by default)\n pack_exception : callable, optional\n Function to take an exception and ``dumps`` method, and return a\n serialized tuple of ``(exception, traceback)`` to send back to the\n scheduler. Default is to just raise the exception.\n raise_exception : callable, optional\n Function that takes an exception and a traceback, and raises an error.\n dumps: callable, optional\n Function to serialize task data and results to communicate between\n worker and parent. Defaults to identity.\n loads: callable, optional\n Inverse function of `dumps`. Defaults to identity.\n callbacks : tuple or list of tuples, optional\n Callbacks are passed in as tuples of length 5. Multiple sets of\n callbacks may be passed in as a list of tuples. For more information,\n see the dask.diagnostics documentation.\n\n See Also\n --------\n threaded.get\n \"\"\"\n queue = Queue()\n\n if isinstance(result, list):\n result_flat = set(flatten(result))\n else:\n result_flat = set([result])\n results = set(result_flat)\n\n dsk = dict(dsk)\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/local.py_get_async.with_local_callbacks_call_get_async.return.nested_get_result_state_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/local.py_get_async.with_local_callbacks_call_get_async.return.nested_get_result_state_", "embedding": null, "metadata": {"file_path": "dask/local.py", "file_name": "local.py", "file_type": "text/x-python", "category": "implementation", "start_line": 416, "end_line": 503, "span_ids": ["get_async"], "tokens": 704}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def get_async(\n apply_async,\n num_workers,\n dsk,\n result,\n cache=None,\n get_id=default_get_id,\n rerun_exceptions_locally=None,\n pack_exception=default_pack_exception,\n raise_exception=reraise,\n callbacks=None,\n dumps=identity,\n loads=identity,\n **kwargs\n):\n # ... other code\n with local_callbacks(callbacks) as callbacks:\n _, _, pretask_cbs, posttask_cbs, _ = unpack_callbacks(callbacks)\n started_cbs = []\n succeeded = False\n # if start_state_from_dask fails, we will have something\n # to pass to the final block.\n state = {}\n try:\n for cb in callbacks:\n if cb[0]:\n cb[0](dsk)\n started_cbs.append(cb)\n\n keyorder = order(dsk)\n\n state = start_state_from_dask(dsk, cache=cache, sortkey=keyorder.get)\n\n for _, start_state, _, _, _ in callbacks:\n if start_state:\n start_state(dsk, state)\n\n if rerun_exceptions_locally is None:\n rerun_exceptions_locally = config.get(\"rerun_exceptions_locally\", False)\n\n if state[\"waiting\"] and not state[\"ready\"]:\n raise ValueError(\"Found no accessible jobs in dask\")\n\n def fire_task():\n \"\"\" Fire off a task to the thread pool \"\"\"\n # Choose a good task to compute\n key = state[\"ready\"].pop()\n state[\"running\"].add(key)\n for f in pretask_cbs:\n f(key, dsk, state)\n\n # Prep data to send\n data = dict(\n (dep, state[\"cache\"][dep]) for dep in get_dependencies(dsk, key)\n )\n # Submit\n apply_async(\n execute_task,\n args=(\n key,\n dumps((dsk[key], data)),\n dumps,\n loads,\n get_id,\n pack_exception,\n ),\n callback=queue.put,\n )\n\n # Seed initial tasks into the thread pool\n while state[\"ready\"] and len(state[\"running\"]) < num_workers:\n fire_task()\n\n # Main loop, wait on tasks to finish, insert new ones\n while state[\"waiting\"] or state[\"ready\"] or state[\"running\"]:\n key, res_info, failed = queue_get(queue)\n if failed:\n exc, tb = loads(res_info)\n if rerun_exceptions_locally:\n data = dict(\n (dep, state[\"cache\"][dep])\n for dep in get_dependencies(dsk, key)\n )\n task = dsk[key]\n _execute_task(task, data) # Re-execute locally\n else:\n raise_exception(exc, tb)\n res, worker_id = loads(res_info)\n state[\"cache\"][key] = res\n finish_task(dsk, key, state, results, keyorder.get)\n for f in posttask_cbs:\n f(key, res, dsk, state, worker_id)\n\n while state[\"ready\"] and len(state[\"running\"]) < num_workers:\n fire_task()\n\n succeeded = True\n\n finally:\n for _, _, _, _, finish in started_cbs:\n if finish:\n finish(dsk, state, not succeeded)\n\n return nested_get(result, state[\"cache\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/local.py__Synchronous_concrete__get_sync.return.get_async_apply_sync_1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/local.py__Synchronous_concrete__get_sync.return.get_async_apply_sync_1_", "embedding": null, "metadata": {"file_path": "dask/local.py", "file_name": "local.py", "file_type": "text/x-python", "category": "implementation", "start_line": 506, "end_line": 527, "span_ids": ["get_sync", "apply_sync", "get_async"], "tokens": 159}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\" Synchronous concrete version of get_async\n\nUsually we supply a multi-core apply_async function. Here we provide a\nsequential one. This is useful for debugging and for code dominated by the\nGIL\n\"\"\"\n\n\ndef apply_sync(func, args=(), kwds={}, callback=None):\n \"\"\" A naive synchronous version of apply_async \"\"\"\n res = func(*args, **kwds)\n if callback is not None:\n callback(res)\n\n\ndef get_sync(dsk, keys, **kwargs):\n \"\"\"A naive synchronous version of get_async\n\n Can be useful for debugging.\n \"\"\"\n kwargs.pop(\"num_workers\", None) # if num_workers present, remove it\n return get_async(apply_sync, 1, dsk, keys, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/local.py_sortkey_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/local.py_sortkey_", "embedding": null, "metadata": {"file_path": "dask/local.py", "file_name": "local.py", "file_type": "text/x-python", "category": "implementation", "start_line": 530, "end_line": 546, "span_ids": ["sortkey"], "tokens": 112}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def sortkey(item):\n \"\"\"Sorting key function that is robust to different types\n\n Both strings and tuples are common key types in dask graphs.\n However In Python 3 one can not compare strings with tuples directly.\n This function maps many types to a form where they can be compared\n\n Examples\n --------\n >>> sortkey('Hello')\n ('str', 'Hello')\n\n >>> sortkey(('x', 1))\n ('tuple', ('x', 1))\n \"\"\"\n return (type(item).__name__, item)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/multiprocessing.py_copyreg__process_get_id.return.multiprocessing_current_p": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/multiprocessing.py_copyreg__process_get_id.return.multiprocessing_current_p", "embedding": null, "metadata": {"file_path": "dask/multiprocessing.py", "file_name": "multiprocessing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 43, "span_ids": ["imports", "_process_get_id", "_reduce_method_descriptor", "impl"], "tokens": 242}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import copyreg\nimport multiprocessing\nimport os\nimport pickle\nimport sys\nimport traceback\nfrom functools import partial\nfrom warnings import warn\n\nfrom . import config\nfrom .system import CPU_COUNT\nfrom .local import reraise, get_async # TODO: get better get\nfrom .optimization import fuse, cull\n\n\ndef _reduce_method_descriptor(m):\n return getattr, (m.__objclass__, m.__name__)\n\n\n# type(set.union) is used as a proxy to \ncopyreg.pickle(type(set.union), _reduce_method_descriptor)\n\n\ntry:\n import cloudpickle\n\n _dumps = partial(cloudpickle.dumps, protocol=pickle.HIGHEST_PROTOCOL)\n _loads = cloudpickle.loads\nexcept ImportError:\n\n def _dumps(obj, **kwargs):\n try:\n return pickle.dumps(obj, protocol=pickle.HIGHEST_PROTOCOL, **kwargs)\n except (pickle.PicklingError, AttributeError) as exc:\n raise ModuleNotFoundError(\n \"Please install cloudpickle to use the multiprocessing scheduler\"\n ) from exc\n\n _loads = pickle.loads\n\n\ndef _process_get_id():\n return multiprocessing.current_process().ident", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/multiprocessing.py__Remote_Exception_Han_RemoteException.__getattr__.try_.except_AttributeError_.return.getattr_self_exception_k": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/multiprocessing.py__Remote_Exception_Han_RemoteException.__getattr__.try_.except_AttributeError_.return.getattr_self_exception_k", "embedding": null, "metadata": {"file_path": "dask/multiprocessing.py", "file_name": "multiprocessing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 45, "end_line": 78, "span_ids": ["_process_get_id", "RemoteException.__init__", "RemoteException", "RemoteException.__getattr__", "RemoteException.__dir__", "RemoteException.__str__"], "tokens": 286}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# -- Remote Exception Handling --\n# By default, tracebacks can't be serialized using pickle. However, the\n# `tblib` library can enable support for this. Since we don't mandate\n# that tblib is installed, we do the following:\n#\n# - If tblib is installed, use it to serialize the traceback and reraise\n# in the scheduler process\n# - Otherwise, use a ``RemoteException`` class to contain a serialized\n# version of the formatted traceback, which will then print in the\n# scheduler process.\n#\n# To enable testing of the ``RemoteException`` class even when tblib is\n# installed, we don't wrap the class in the try block below\nclass RemoteException(Exception):\n \"\"\"Remote Exception\n\n Contains the exception and traceback from a remotely run task\n \"\"\"\n\n def __init__(self, exception, traceback):\n self.exception = exception\n self.traceback = traceback\n\n def __str__(self):\n return str(self.exception) + \"\\n\\nTraceback\\n---------\\n\" + self.traceback\n\n def __dir__(self):\n return sorted(set(dir(type(self)) + list(self.__dict__) + dir(self.exception)))\n\n def __getattr__(self, key):\n try:\n return object.__getattribute__(self, key)\n except AttributeError:\n return getattr(self.exception, key)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/multiprocessing.py_exceptions_get_context.if_sys_platform_win32.else_.return.multiprocessing_get_conte": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/multiprocessing.py_exceptions_get_context.if_sys_platform_win32.else_.return.multiprocessing_get_conte", "embedding": null, "metadata": {"file_path": "dask/multiprocessing.py", "file_name": "multiprocessing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 81, "end_line": 150, "span_ids": ["impl:11", "impl:17", "remote_exception", "get_context", "pack_exception", "impl:13"], "tokens": 409}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "exceptions = dict()\n\n\ndef remote_exception(exc, tb):\n \"\"\" Metaclass that wraps exception type in RemoteException \"\"\"\n if type(exc) in exceptions:\n typ = exceptions[type(exc)]\n return typ(exc, tb)\n else:\n try:\n typ = type(\n exc.__class__.__name__,\n (RemoteException, type(exc)),\n {\"exception_type\": type(exc)},\n )\n exceptions[type(exc)] = typ\n return typ(exc, tb)\n except TypeError:\n return exc\n\n\ntry:\n import tblib.pickling_support\n\n tblib.pickling_support.install()\n\n def _pack_traceback(tb):\n return tb\n\n\nexcept ImportError:\n\n def _pack_traceback(tb):\n return \"\".join(traceback.format_tb(tb))\n\n def reraise(exc, tb):\n exc = remote_exception(exc, tb)\n raise exc\n\n\ndef pack_exception(e, dumps):\n exc_type, exc_value, exc_traceback = sys.exc_info()\n tb = _pack_traceback(exc_traceback)\n try:\n result = dumps((e, tb))\n except BaseException as e:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n tb = _pack_traceback(exc_traceback)\n result = dumps((e, tb))\n return result\n\n\n_CONTEXT_UNSUPPORTED = \"\"\"\\\nThe 'multiprocessing.context' configuration option will be ignored on Python 2\nand on Windows, because they each only support a single context.\n\"\"\"\n\n\ndef get_context():\n \"\"\" Return the current multiprocessing context.\"\"\"\n # fork context does fork()-without-exec(), which can lead to deadlocks,\n # so default to \"spawn\".\n context_name = config.get(\"multiprocessing.context\", \"spawn\")\n if sys.platform == \"win32\":\n if context_name != \"spawn\":\n # Only spawn is supported on Win32, can't change it:\n warn(_CONTEXT_UNSUPPORTED, UserWarning)\n return multiprocessing\n else:\n return multiprocessing.get_context(context_name)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/multiprocessing.py_get_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/multiprocessing.py_get_", "embedding": null, "metadata": {"file_path": "dask/multiprocessing.py", "file_name": "multiprocessing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 154, "end_line": 245, "span_ids": ["initialize_worker_process", "get"], "tokens": 689}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def get(\n dsk,\n keys,\n num_workers=None,\n func_loads=None,\n func_dumps=None,\n optimize_graph=True,\n pool=None,\n **kwargs\n):\n \"\"\"Multiprocessed get function appropriate for Bags\n\n Parameters\n ----------\n dsk : dict\n dask graph\n keys : object or list\n Desired results from graph\n num_workers : int\n Number of worker processes (defaults to number of cores)\n func_dumps : function\n Function to use for function serialization\n (defaults to cloudpickle.dumps if available, otherwise pickle.dumps)\n func_loads : function\n Function to use for function deserialization\n (defaults to cloudpickle.loads if available, otherwise pickle.loads)\n optimize_graph : bool\n If True [default], `fuse` is applied to the graph before computation.\n \"\"\"\n pool = pool or config.get(\"pool\", None)\n num_workers = num_workers or config.get(\"num_workers\", None) or CPU_COUNT\n if pool is None:\n # In order to get consistent hashing in subprocesses, we need to set a\n # consistent seed for the Python hash algorithm. Unfortunatley, there\n # is no way to specify environment variables only for the Pool\n # processes, so we have to rely on environment variables being\n # inherited.\n if os.environ.get(\"PYTHONHASHSEED\") in (None, \"0\"):\n # This number is arbitrary; it was chosen to commemorate\n # https://github.com/dask/dask/issues/6640.\n os.environ[\"PYTHONHASHSEED\"] = \"6640\"\n context = get_context()\n pool = context.Pool(num_workers, initializer=initialize_worker_process)\n cleanup = True\n else:\n cleanup = False\n\n # Optimize Dask\n dsk2, dependencies = cull(dsk, keys)\n if optimize_graph:\n dsk3, dependencies = fuse(dsk2, keys, dependencies)\n else:\n dsk3 = dsk2\n\n # We specify marshalling functions in order to catch serialization\n # errors and report them to the user.\n loads = func_loads or config.get(\"func_loads\", None) or _loads\n dumps = func_dumps or config.get(\"func_dumps\", None) or _dumps\n\n # Note former versions used a multiprocessing Manager to share\n # a Queue between parent and workers, but this is fragile on Windows\n # (issue #1652).\n try:\n # Run\n result = get_async(\n pool.apply_async,\n len(pool._pool),\n dsk3,\n keys,\n get_id=_process_get_id,\n dumps=dumps,\n loads=loads,\n pack_exception=pack_exception,\n raise_exception=reraise,\n **kwargs\n )\n finally:\n if cleanup:\n pool.close()\n return result\n\n\ndef initialize_worker_process():\n \"\"\"\n Initialize a worker process before running any tasks in it.\n \"\"\"\n # If Numpy is already imported, presumably its random state was\n # inherited from the parent => re-seed it.\n np = sys.modules.get(\"numpy\")\n if np is not None:\n np.random.seed()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_math_cull.return.out_dependencies": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_math_cull.return.out_dependencies", "embedding": null, "metadata": {"file_path": "dask/optimization.py", "file_name": "optimization.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 61, "span_ids": ["imports", "cull"], "tokens": 413}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import math\nimport numbers\nfrom enum import Enum\n\nfrom . import config, core, utils\nfrom .core import (\n istask,\n get_dependencies,\n subs,\n toposort,\n flatten,\n reverse_dict,\n ishashable,\n)\nfrom .utils_test import add, inc # noqa: F401\n\n\ndef cull(dsk, keys):\n \"\"\"Return new dask with only the tasks required to calculate keys.\n\n In other words, remove unnecessary tasks from dask.\n ``keys`` may be a single key or list of keys.\n\n Examples\n --------\n\n >>> d = {'x': 1, 'y': (inc, 'x'), 'out': (add, 'x', 10)} # doctest: +SKIP\n >>> dsk, dependencies = cull(d, 'out') # doctest: +SKIP\n >>> dsk # doctest: +SKIP\n {'x': 1, 'out': (add, 'x', 10)}\n >>> dependencies # doctest: +SKIP\n {'x': set(), 'out': set(['x'])}\n\n Returns\n -------\n dsk: culled dask graph\n dependencies: Dict mapping {key: [deps]}. Useful side effect to accelerate\n other optimizations, notably fuse.\n \"\"\"\n if not isinstance(keys, (list, set)):\n keys = [keys]\n\n seen = set()\n dependencies = dict()\n out = {}\n work = list(set(flatten(keys)))\n\n while work:\n new_work = []\n for k in work:\n dependencies_k = get_dependencies(dsk, k, as_list=True) # fuse needs lists\n out[k] = dsk[k]\n dependencies[k] = dependencies_k\n for d in dependencies_k:\n if d not in seen:\n seen.add(d)\n new_work.append(d)\n\n work = new_work\n\n return out, dependencies", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_default_fused_linear_keys_renamer_default_fused_linear_keys_renamer.if_typ_is_str_.else_.return.None": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_default_fused_linear_keys_renamer_default_fused_linear_keys_renamer.if_typ_is_str_.else_.return.None", "embedding": null, "metadata": {"file_path": "dask/optimization.py", "file_name": "optimization.py", "file_type": "text/x-python", "category": "implementation", "start_line": 63, "end_line": 75, "span_ids": ["default_fused_linear_keys_renamer"], "tokens": 138}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def default_fused_linear_keys_renamer(keys):\n \"\"\"Create new keys for fused tasks\"\"\"\n typ = type(keys[0])\n if typ is str:\n names = [utils.key_split(x) for x in keys[:0:-1]]\n names.append(keys[0])\n return \"-\".join(names)\n elif typ is tuple and len(keys[0]) > 0 and isinstance(keys[0][0], str):\n names = [utils.key_split(x) for x in keys[:0:-1]]\n names.append(keys[0][0])\n return (\"-\".join(names),) + keys[0][1:]\n else:\n return None", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_fuse_linear_fuse_linear.dependencies._k_set_v_for_k_v_in_de": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_fuse_linear_fuse_linear.dependencies._k_set_v_for_k_v_in_de", "embedding": null, "metadata": {"file_path": "dask/optimization.py", "file_name": "optimization.py", "file_type": "text/x-python", "category": "implementation", "start_line": 78, "end_line": 162, "span_ids": ["fuse_linear"], "tokens": 772}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def fuse_linear(dsk, keys=None, dependencies=None, rename_keys=True):\n \"\"\"Return new dask graph with linear sequence of tasks fused together.\n\n If specified, the keys in ``keys`` keyword argument are *not* fused.\n Supply ``dependencies`` from output of ``cull`` if available to avoid\n recomputing dependencies.\n\n **This function is mostly superseded by ``fuse``**\n\n Parameters\n ----------\n dsk: dict\n keys: list\n dependencies: dict, optional\n {key: [list-of-keys]}. Must be a list to provide count of each key\n This optional input often comes from ``cull``\n rename_keys: bool or func, optional\n Whether to rename fused keys with ``default_fused_linear_keys_renamer``\n or not. Renaming fused keys can keep the graph more understandable\n and comprehensive, but it comes at the cost of additional processing.\n If False, then the top-most key will be used. For advanced usage, a\n func is also accepted, ``new_key = rename_keys(fused_key_list)``.\n\n Examples\n --------\n >>> d = {'a': 1, 'b': (inc, 'a'), 'c': (inc, 'b')}\n >>> dsk, dependencies = fuse(d)\n >>> dsk # doctest: +SKIP\n {'a-b-c': (inc, (inc, 1)), 'c': 'a-b-c'}\n >>> dsk, dependencies = fuse(d, rename_keys=False)\n >>> dsk # doctest: +SKIP\n {'c': (inc, (inc, 1))}\n >>> dsk, dependencies = fuse(d, keys=['b'], rename_keys=False)\n >>> dsk # doctest: +SKIP\n {'b': (inc, 1), 'c': (inc, 'b')}\n\n Returns\n -------\n dsk: output graph with keys fused\n dependencies: dict mapping dependencies after fusion. Useful side effect\n to accelerate other downstream optimizations.\n \"\"\"\n if keys is not None and not isinstance(keys, set):\n if not isinstance(keys, list):\n keys = [keys]\n keys = set(flatten(keys))\n\n if dependencies is None:\n dependencies = {k: get_dependencies(dsk, k, as_list=True) for k in dsk}\n\n # locate all members of linear chains\n child2parent = {}\n unfusible = set()\n for parent in dsk:\n deps = dependencies[parent]\n has_many_children = len(deps) > 1\n for child in deps:\n if keys is not None and child in keys:\n unfusible.add(child)\n elif child in child2parent:\n del child2parent[child]\n unfusible.add(child)\n elif has_many_children:\n unfusible.add(child)\n elif child not in unfusible:\n child2parent[child] = parent\n\n # construct the chains from ancestor to descendant\n chains = []\n parent2child = dict(map(reversed, child2parent.items()))\n while child2parent:\n child, parent = child2parent.popitem()\n chain = [child, parent]\n while parent in child2parent:\n parent = child2parent.pop(parent)\n del parent2child[parent]\n chain.append(parent)\n chain.reverse()\n while child in parent2child:\n child = parent2child.pop(child)\n del child2parent[child]\n chain.append(child)\n chains.append(chain)\n\n dependencies = {k: set(v) for k, v in dependencies.items()}\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_fuse_linear.if_rename_keys_is_True___flat_set.return.set_x_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_fuse_linear.if_rename_keys_is_True___flat_set.return.set_x_", "embedding": null, "metadata": {"file_path": "dask/optimization.py", "file_name": "optimization.py", "file_type": "text/x-python", "category": "implementation", "start_line": 164, "end_line": 224, "span_ids": ["fuse_linear", "_flat_set"], "tokens": 441}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def fuse_linear(dsk, keys=None, dependencies=None, rename_keys=True):\n # ... other code\n\n if rename_keys is True:\n key_renamer = default_fused_linear_keys_renamer\n elif rename_keys is False:\n key_renamer = None\n else:\n key_renamer = rename_keys\n\n # create a new dask with fused chains\n rv = {}\n fused = set()\n aliases = set()\n is_renamed = False\n for chain in chains:\n if key_renamer is not None:\n new_key = key_renamer(chain)\n is_renamed = (\n new_key is not None and new_key not in dsk and new_key not in rv\n )\n child = chain.pop()\n val = dsk[child]\n while chain:\n parent = chain.pop()\n dependencies[parent].update(dependencies.pop(child))\n dependencies[parent].remove(child)\n val = subs(dsk[parent], child, val)\n fused.add(child)\n child = parent\n fused.add(child)\n if is_renamed:\n rv[new_key] = val\n rv[child] = new_key\n dependencies[new_key] = dependencies[child]\n dependencies[child] = {new_key}\n aliases.add(child)\n else:\n rv[child] = val\n for key, val in dsk.items():\n if key not in fused:\n rv[key] = val\n if aliases:\n for key, deps in dependencies.items():\n for old_key in deps & aliases:\n new_key = rv[old_key]\n deps.remove(old_key)\n deps.add(new_key)\n rv[key] = subs(rv[key], old_key, new_key)\n if keys is not None:\n for key in aliases - keys:\n del rv[key]\n del dependencies[key]\n return rv, dependencies\n\n\ndef _flat_set(x):\n if x is None:\n return set()\n elif isinstance(x, set):\n return x\n elif not isinstance(x, (list, set)):\n x = [x]\n return set(x)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_inline_inline.return.dsk2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_inline_inline.return.dsk2", "embedding": null, "metadata": {"file_path": "dask/optimization.py", "file_name": "optimization.py", "file_type": "text/x-python", "category": "implementation", "start_line": 228, "end_line": 286, "span_ids": ["inline"], "tokens": 586}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def inline(dsk, keys=None, inline_constants=True, dependencies=None):\n \"\"\"Return new dask with the given keys inlined with their values.\n\n Inlines all constants if ``inline_constants`` keyword is True. Note that\n the constant keys will remain in the graph, to remove them follow\n ``inline`` with ``cull``.\n\n Examples\n --------\n\n >>> d = {'x': 1, 'y': (inc, 'x'), 'z': (add, 'x', 'y')} # doctest: +SKIP\n >>> inline(d) # doctest: +SKIP\n {'x': 1, 'y': (inc, 1), 'z': (add, 1, 'y')}\n\n >>> inline(d, keys='y') # doctest: +SKIP\n {'x': 1, 'y': (inc, 1), 'z': (add, 1, (inc, 1))}\n\n >>> inline(d, keys='y', inline_constants=False) # doctest: +SKIP\n {'x': 1, 'y': (inc, 1), 'z': (add, 'x', (inc, 'x'))}\n \"\"\"\n if dependencies and isinstance(next(iter(dependencies.values())), list):\n dependencies = {k: set(v) for k, v in dependencies.items()}\n\n keys = _flat_set(keys)\n\n if dependencies is None:\n dependencies = {k: get_dependencies(dsk, k) for k in dsk}\n\n if inline_constants:\n keys.update(\n k\n for k, v in dsk.items()\n if (ishashable(v) and v in dsk) or (not dependencies[k] and not istask(v))\n )\n\n # Keys may depend on other keys, so determine replace order with toposort.\n # The values stored in `keysubs` do not include other keys.\n replaceorder = toposort(\n dict((k, dsk[k]) for k in keys if k in dsk), dependencies=dependencies\n )\n keysubs = {}\n for key in replaceorder:\n val = dsk[key]\n for dep in keys & dependencies[key]:\n if dep in keysubs:\n replace = keysubs[dep]\n else:\n replace = dsk[dep]\n val = subs(val, dep, replace)\n keysubs[key] = val\n\n # Make new dask with substitutions\n dsk2 = keysubs.copy()\n for key, val in dsk.items():\n if key not in dsk2:\n for item in keys & dependencies[key]:\n val = subs(val, item, keysubs[item])\n dsk2[key] = val\n return dsk2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_inline_functions_inline_functions.return.dsk": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_inline_functions_inline_functions.return.dsk", "embedding": null, "metadata": {"file_path": "dask/optimization.py", "file_name": "optimization.py", "file_type": "text/x-python", "category": "implementation", "start_line": 287, "end_line": 340, "span_ids": ["inline_functions"], "tokens": 429}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def inline_functions(\n dsk, output, fast_functions=None, inline_constants=False, dependencies=None\n):\n \"\"\"Inline cheap functions into larger operations\n\n Examples\n --------\n >>> dsk = {'out': (add, 'i', 'd'), # doctest: +SKIP\n ... 'i': (inc, 'x'),\n ... 'd': (double, 'y'),\n ... 'x': 1, 'y': 1}\n >>> inline_functions(dsk, [], [inc]) # doctest: +SKIP\n {'out': (add, (inc, 'x'), 'd'),\n 'd': (double, 'y'),\n 'x': 1, 'y': 1}\n\n Protect output keys. In the example below ``i`` is not inlined because it\n is marked as an output key.\n\n >>> inline_functions(dsk, ['i', 'out'], [inc, double]) # doctest: +SKIP\n {'out': (add, 'i', (double, 'y')),\n 'i': (inc, 'x'),\n 'x': 1, 'y': 1}\n \"\"\"\n if not fast_functions:\n return dsk\n\n output = set(output)\n\n fast_functions = set(fast_functions)\n\n if dependencies is None:\n dependencies = {k: get_dependencies(dsk, k) for k in dsk}\n dependents = reverse_dict(dependencies)\n\n def inlinable(v):\n try:\n return functions_of(v).issubset(fast_functions)\n except TypeError:\n return False\n\n keys = [\n k\n for k, v in dsk.items()\n if istask(v) and dependents[k] and k not in output and inlinable(v)\n ]\n\n if keys:\n dsk = inline(\n dsk, keys, inline_constants=inline_constants, dependencies=dependencies\n )\n for k in keys:\n del dsk[k]\n return dsk", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_unwrap_partial_functions_of.return.funcs": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_unwrap_partial_functions_of.return.funcs", "embedding": null, "metadata": {"file_path": "dask/optimization.py", "file_name": "optimization.py", "file_type": "text/x-python", "category": "implementation", "start_line": 343, "end_line": 374, "span_ids": ["unwrap_partial", "functions_of"], "tokens": 184}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def unwrap_partial(func):\n while hasattr(func, \"func\"):\n func = func.func\n return func\n\n\ndef functions_of(task):\n \"\"\"Set of functions contained within nested task\n\n Examples\n --------\n >>> task = (add, (mul, 1, 2), (inc, 3)) # doctest: +SKIP\n >>> functions_of(task) # doctest: +SKIP\n set([add, mul, inc])\n \"\"\"\n funcs = set()\n\n work = [task]\n sequence_types = {list, tuple}\n\n while work:\n new_work = []\n for task in work:\n if type(task) in sequence_types:\n if istask(task):\n funcs.add(unwrap_partial(task[0]))\n new_work.extend(task[1:])\n else:\n new_work.extend(task)\n work = new_work\n\n return funcs", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_default_fused_keys_renamer__default.Default_token": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_default_fused_keys_renamer__default.Default_token", "embedding": null, "metadata": {"file_path": "dask/optimization.py", "file_name": "optimization.py", "file_type": "text/x-python", "category": "implementation", "start_line": 377, "end_line": 423, "span_ids": ["Default", "Default.__repr__", "default_fused_keys_renamer", "impl"], "tokens": 420}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def default_fused_keys_renamer(keys, max_fused_key_length=120):\n \"\"\"Create new keys for ``fuse`` tasks.\n\n The optional parameter `max_fused_key_length` is used to limit the maximum string length for each renamed key.\n If this parameter is set to `None`, there is no limit.\n \"\"\"\n it = reversed(keys)\n first_key = next(it)\n typ = type(first_key)\n\n if max_fused_key_length: # Take into account size of hash suffix\n max_fused_key_length -= 5\n\n def _enforce_max_key_limit(key_name):\n if max_fused_key_length and len(key_name) > max_fused_key_length:\n name_hash = f\"{hash(key_name):x}\"[:4]\n key_name = f\"{key_name[:max_fused_key_length]}-{name_hash}\"\n return key_name\n\n if typ is str:\n first_name = utils.key_split(first_key)\n names = {utils.key_split(k) for k in it}\n names.discard(first_name)\n names = sorted(names)\n names.append(first_key)\n concatenated_name = \"-\".join(names)\n return _enforce_max_key_limit(concatenated_name)\n elif typ is tuple and len(first_key) > 0 and isinstance(first_key[0], str):\n first_name = utils.key_split(first_key)\n names = {utils.key_split(k) for k in it}\n names.discard(first_name)\n names = sorted(names)\n names.append(first_key[0])\n concatenated_name = \"-\".join(names)\n return (_enforce_max_key_limit(concatenated_name),) + first_key[1:]\n\n\n# PEP-484 compliant singleton constant\n# https://www.python.org/dev/peps/pep-0484/#support-for-singleton-types-in-unions\nclass Default(Enum):\n token = 0\n\n def __repr__(self) -> str:\n return \"\"\n\n\n_default = Default.token", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_fuse_fuse.if_not_config_get_optimi.return.dsk_dependencies": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_fuse_fuse.if_not_config_get_optimi.return.dsk_dependencies", "embedding": null, "metadata": {"file_path": "dask/optimization.py", "file_name": "optimization.py", "file_type": "text/x-python", "category": "implementation", "start_line": 426, "end_line": 496, "span_ids": ["fuse"], "tokens": 732}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def fuse(\n dsk,\n keys=None,\n dependencies=None,\n ave_width=_default,\n max_width=_default,\n max_height=_default,\n max_depth_new_edges=_default,\n rename_keys=_default,\n fuse_subgraphs=_default,\n):\n \"\"\"Fuse tasks that form reductions; more advanced than ``fuse_linear``\n\n This trades parallelism opportunities for faster scheduling by making tasks\n less granular. It can replace ``fuse_linear`` in optimization passes.\n\n This optimization applies to all reductions--tasks that have at most one\n dependent--so it may be viewed as fusing \"multiple input, single output\"\n groups of tasks into a single task. There are many parameters to fine\n tune the behavior, which are described below. ``ave_width`` is the\n natural parameter with which to compare parallelism to granularity, so\n it should always be specified. Reasonable values for other parameters\n will be determined using ``ave_width`` if necessary.\n\n Parameters\n ----------\n dsk: dict\n dask graph\n keys: list or set, optional\n Keys that must remain in the returned dask graph\n dependencies: dict, optional\n {key: [list-of-keys]}. Must be a list to provide count of each key\n This optional input often comes from ``cull``\n ave_width: float (default 1)\n Upper limit for ``width = num_nodes / height``, a good measure of\n parallelizability.\n dask.config key: ``optimization.fuse.ave-width``\n max_width: int (default infinite)\n Don't fuse if total width is greater than this.\n dask.config key: ``optimization.fuse.max-width``\n max_height: int or None (default None)\n Don't fuse more than this many levels. Set to None to dynamically\n adjust to ``1.5 + ave_width * log(ave_width + 1)``.\n dask.config key: ``optimization.fuse.max-height``\n max_depth_new_edges: int or None (default None)\n Don't fuse if new dependencies are added after this many levels.\n Set to None to dynamically adjust to ave_width * 1.5.\n dask.config key: ``optimization.fuse.max-depth-new-edges``\n rename_keys: bool or func, optional (default True)\n Whether to rename the fused keys with ``default_fused_keys_renamer``\n or not. Renaming fused keys can keep the graph more understandable\n and comprehensive, but it comes at the cost of additional processing.\n If False, then the top-most key will be used. For advanced usage, a\n function to create the new name is also accepted.\n dask.config key: ``optimization.fuse.rename-keys``\n fuse_subgraphs : bool or None, optional (default None)\n Whether to fuse multiple tasks into ``SubgraphCallable`` objects.\n Set to None to let the default optimizer of individual dask collections decide.\n If no collection-specific default exists, None defaults to False.\n dask.config key: ``optimization.fuse.subgraphs``\n\n Returns\n -------\n dsk\n output graph with keys fused\n dependencies\n dict mapping dependencies after fusion. Useful side effect to accelerate other\n downstream optimizations.\n \"\"\"\n if not config.get(\"optimization.fuse.active\"):\n return dsk, dependencies\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_fuse.if_keys_is_not_None_and_n_fuse.children_stack_pop.children_stack_pop": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_fuse.if_keys_is_not_None_and_n_fuse.children_stack_pop.children_stack_pop", "embedding": null, "metadata": {"file_path": "dask/optimization.py", "file_name": "optimization.py", "file_type": "text/x-python", "category": "implementation", "start_line": 498, "end_line": 586, "span_ids": ["fuse"], "tokens": 859}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def fuse(\n dsk,\n keys=None,\n dependencies=None,\n ave_width=_default,\n max_width=_default,\n max_height=_default,\n max_depth_new_edges=_default,\n rename_keys=_default,\n fuse_subgraphs=_default,\n):\n # ... other code\n\n if keys is not None and not isinstance(keys, set):\n if not isinstance(keys, list):\n keys = [keys]\n keys = set(flatten(keys))\n\n # Read defaults from dask.yaml and/or user-defined config file\n if ave_width is _default:\n ave_width = config.get(\"optimization.fuse.ave-width\")\n assert ave_width is not _default\n if max_height is _default:\n max_height = config.get(\"optimization.fuse.max-height\")\n assert max_height is not _default\n if max_depth_new_edges is _default:\n max_depth_new_edges = config.get(\"optimization.fuse.max-depth-new-edges\")\n assert max_depth_new_edges is not _default\n if max_depth_new_edges is None:\n max_depth_new_edges = ave_width * 1.5\n if max_width is _default:\n max_width = config.get(\"optimization.fuse.max-width\")\n assert max_width is not _default\n if max_width is None:\n max_width = 1.5 + ave_width * math.log(ave_width + 1)\n if fuse_subgraphs is _default:\n fuse_subgraphs = config.get(\"optimization.fuse.subgraphs\")\n assert fuse_subgraphs is not _default\n if fuse_subgraphs is None:\n fuse_subgraphs = False\n\n if not ave_width or not max_height:\n return dsk, dependencies\n\n if rename_keys is _default:\n rename_keys = config.get(\"optimization.fuse.rename-keys\")\n assert rename_keys is not _default\n if rename_keys is True:\n key_renamer = default_fused_keys_renamer\n elif rename_keys is False:\n key_renamer = None\n elif not callable(rename_keys):\n raise TypeError(\"rename_keys must be a boolean or callable\")\n else:\n key_renamer = rename_keys\n rename_keys = key_renamer is not None\n\n if dependencies is None:\n deps = {k: get_dependencies(dsk, k, as_list=True) for k in dsk}\n else:\n deps = dict(dependencies)\n\n rdeps = {}\n for k, vals in deps.items():\n for v in vals:\n if v not in rdeps:\n rdeps[v] = [k]\n else:\n rdeps[v].append(k)\n deps[k] = set(vals)\n\n reducible = {k for k, vals in rdeps.items() if len(vals) == 1}\n if keys:\n reducible -= keys\n\n for k, v in dsk.items():\n if type(v) is not tuple and not isinstance(v, (numbers.Number, str)):\n reducible.discard(k)\n\n if not reducible and (\n not fuse_subgraphs or all(len(set(v)) != 1 for v in rdeps.values())\n ):\n # Quick return if there's nothing to do. Only progress if there's tasks\n # fusible by the main `fuse`, or by `fuse_subgraphs` if enabled.\n return dsk, deps\n\n rv = dsk.copy()\n fused_trees = {}\n # These are the stacks we use to store data as we traverse the graph\n info_stack = []\n children_stack = []\n # For speed\n deps_pop = deps.pop\n reducible_add = reducible.add\n reducible_pop = reducible.pop\n reducible_remove = reducible.remove\n fused_trees_pop = fused_trees.pop\n info_stack_append = info_stack.append\n info_stack_pop = info_stack.pop\n children_stack_append = children_stack.append\n children_stack_extend = children_stack.extend\n children_stack_pop = children_stack.pop\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_fuse.while_reducible__fuse.return.rv_deps": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_fuse.while_reducible__fuse.return.rv_deps", "embedding": null, "metadata": {"file_path": "dask/optimization.py", "file_name": "optimization.py", "file_type": "text/x-python", "category": "implementation", "start_line": 587, "end_line": 847, "span_ids": ["fuse"], "tokens": 1721}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def fuse(\n dsk,\n keys=None,\n dependencies=None,\n ave_width=_default,\n max_width=_default,\n max_height=_default,\n max_depth_new_edges=_default,\n rename_keys=_default,\n fuse_subgraphs=_default,\n):\n # ... other code\n while reducible:\n parent = reducible_pop()\n reducible_add(parent)\n while parent in reducible:\n # Go to the top\n parent = rdeps[parent][0]\n children_stack_append(parent)\n children_stack_extend(reducible & deps[parent])\n while True:\n child = children_stack[-1]\n if child != parent:\n children = reducible & deps[child]\n while children:\n # Depth-first search\n children_stack_extend(children)\n parent = child\n child = children_stack[-1]\n children = reducible & deps[child]\n children_stack_pop()\n # This is a leaf node in the reduction region\n # key, task, fused_keys, height, width, number of nodes, fudge, set of edges\n info_stack_append(\n (\n child,\n rv[child],\n [child] if rename_keys else None,\n 1,\n 1,\n 1,\n 0,\n deps[child] - reducible,\n )\n )\n else:\n children_stack_pop()\n # Calculate metrics and fuse as appropriate\n deps_parent = deps[parent]\n edges = deps_parent - reducible\n children = deps_parent - edges\n num_children = len(children)\n\n if num_children == 1:\n (\n child_key,\n child_task,\n child_keys,\n height,\n width,\n num_nodes,\n fudge,\n children_edges,\n ) = info_stack_pop()\n num_children_edges = len(children_edges)\n\n if fudge > num_children_edges - 1 >= 0:\n fudge = num_children_edges - 1\n edges |= children_edges\n no_new_edges = len(edges) == num_children_edges\n if not no_new_edges:\n fudge += 1\n if (\n (num_nodes + fudge) / height <= ave_width\n and\n # Sanity check; don't go too deep if new levels introduce new edge dependencies\n (no_new_edges or height < max_depth_new_edges)\n ):\n # Perform substitutions as we go\n val = subs(dsk[parent], child_key, child_task)\n deps_parent.remove(child_key)\n deps_parent |= deps_pop(child_key)\n del rv[child_key]\n reducible_remove(child_key)\n if rename_keys:\n child_keys.append(parent)\n fused_trees[parent] = child_keys\n fused_trees_pop(child_key, None)\n\n if children_stack:\n if no_new_edges:\n # Linear fuse\n info_stack_append(\n (\n parent,\n val,\n child_keys,\n height,\n width,\n num_nodes,\n fudge,\n edges,\n )\n )\n else:\n info_stack_append(\n (\n parent,\n val,\n child_keys,\n height + 1,\n width,\n num_nodes + 1,\n fudge,\n edges,\n )\n )\n else:\n rv[parent] = val\n break\n else:\n rv[child_key] = child_task\n reducible_remove(child_key)\n if children_stack:\n # Allow the parent to be fused, but only under strict circumstances.\n # Ensure that linear chains may still be fused.\n if fudge > int(ave_width - 1):\n fudge = int(ave_width - 1)\n # This task *implicitly* depends on `edges`\n info_stack_append(\n (\n parent,\n rv[parent],\n [parent] if rename_keys else None,\n 1,\n width,\n 1,\n fudge,\n edges,\n )\n )\n else:\n break\n else:\n child_keys = []\n height = 1\n width = 0\n num_single_nodes = 0\n num_nodes = 0\n fudge = 0\n children_edges = set()\n max_num_edges = 0\n children_info = info_stack[-num_children:]\n del info_stack[-num_children:]\n for (\n cur_key,\n cur_task,\n cur_keys,\n cur_height,\n cur_width,\n cur_num_nodes,\n cur_fudge,\n cur_edges,\n ) in children_info:\n if cur_height == 1:\n num_single_nodes += 1\n elif cur_height > height:\n height = cur_height\n width += cur_width\n num_nodes += cur_num_nodes\n fudge += cur_fudge\n if len(cur_edges) > max_num_edges:\n max_num_edges = len(cur_edges)\n children_edges |= cur_edges\n # Fudge factor to account for possible parallelism with the boundaries\n num_children_edges = len(children_edges)\n fudge += min(\n num_children - 1, max(0, num_children_edges - max_num_edges)\n )\n\n if fudge > num_children_edges - 1 >= 0:\n fudge = num_children_edges - 1\n edges |= children_edges\n no_new_edges = len(edges) == num_children_edges\n if not no_new_edges:\n fudge += 1\n if (\n (num_nodes + fudge) / height <= ave_width\n and num_single_nodes <= ave_width\n and width <= max_width\n and height <= max_height\n and\n # Sanity check; don't go too deep if new levels introduce new edge dependencies\n (no_new_edges or height < max_depth_new_edges)\n ):\n # Perform substitutions as we go\n val = dsk[parent]\n children_deps = set()\n for child_info in children_info:\n cur_child = child_info[0]\n val = subs(val, cur_child, child_info[1])\n del rv[cur_child]\n children_deps |= deps_pop(cur_child)\n reducible_remove(cur_child)\n if rename_keys:\n fused_trees_pop(cur_child, None)\n child_keys.extend(child_info[2])\n deps_parent -= children\n deps_parent |= children_deps\n\n if rename_keys:\n child_keys.append(parent)\n fused_trees[parent] = child_keys\n\n if children_stack:\n info_stack_append(\n (\n parent,\n val,\n child_keys,\n height + 1,\n width,\n num_nodes + 1,\n fudge,\n edges,\n )\n )\n else:\n rv[parent] = val\n break\n else:\n for child_info in children_info:\n rv[child_info[0]] = child_info[1]\n reducible_remove(child_info[0])\n if children_stack:\n # Allow the parent to be fused, but only under strict circumstances.\n # Ensure that linear chains may still be fused.\n if width > max_width:\n width = max_width\n if fudge > int(ave_width - 1):\n fudge = int(ave_width - 1)\n # key, task, height, width, number of nodes, fudge, set of edges\n # This task *implicitly* depends on `edges`\n info_stack_append(\n (\n parent,\n rv[parent],\n [parent] if rename_keys else None,\n 1,\n width,\n 1,\n fudge,\n edges,\n )\n )\n else:\n break\n # Traverse upwards\n parent = rdeps[parent][0]\n\n if fuse_subgraphs:\n _inplace_fuse_subgraphs(rv, keys, deps, fused_trees, rename_keys)\n\n if key_renamer:\n for root_key, fused_keys in fused_trees.items():\n alias = key_renamer(fused_keys)\n if alias is not None and alias not in rv:\n rv[alias] = rv[root_key]\n rv[root_key] = alias\n deps[alias] = deps[root_key]\n deps[root_key] = {alias}\n\n return rv, deps", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py__inplace_fuse_subgraphs__inplace_fuse_subgraphs.for_chain_in_chains_.if_rename_keys_.fused_trees_outkey_cha": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py__inplace_fuse_subgraphs__inplace_fuse_subgraphs.for_chain_in_chains_.if_rename_keys_.fused_trees_outkey_cha", "embedding": null, "metadata": {"file_path": "dask/optimization.py", "file_name": "optimization.py", "file_type": "text/x-python", "category": "implementation", "start_line": 850, "end_line": 918, "span_ids": ["_inplace_fuse_subgraphs"], "tokens": 541}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _inplace_fuse_subgraphs(dsk, keys, dependencies, fused_trees, rename_keys):\n \"\"\"Subroutine of fuse.\n\n Mutates dsk, depenencies, and fused_trees inplace\"\"\"\n # locate all members of linear chains\n child2parent = {}\n unfusible = set()\n for parent in dsk:\n deps = dependencies[parent]\n has_many_children = len(deps) > 1\n for child in deps:\n if keys is not None and child in keys:\n unfusible.add(child)\n elif child in child2parent:\n del child2parent[child]\n unfusible.add(child)\n elif has_many_children:\n unfusible.add(child)\n elif child not in unfusible:\n child2parent[child] = parent\n\n # construct the chains from ancestor to descendant\n chains = []\n parent2child = {v: k for k, v in child2parent.items()}\n while child2parent:\n child, parent = child2parent.popitem()\n chain = [child, parent]\n while parent in child2parent:\n parent = child2parent.pop(parent)\n del parent2child[parent]\n chain.append(parent)\n chain.reverse()\n while child in parent2child:\n child = parent2child.pop(child)\n del child2parent[child]\n chain.append(child)\n # Skip chains with < 2 executable tasks\n ntasks = 0\n for key in chain:\n ntasks += istask(dsk[key])\n if ntasks > 1:\n chains.append(chain)\n break\n\n # Mutate dsk fusing chains into subgraphs\n for chain in chains:\n subgraph = {k: dsk[k] for k in chain}\n outkey = chain[0]\n\n # Update dependencies and graph\n inkeys_set = dependencies[outkey] = dependencies[chain[-1]]\n for k in chain[1:]:\n del dependencies[k]\n del dsk[k]\n\n # Create new task\n inkeys = tuple(inkeys_set)\n dsk[outkey] = (SubgraphCallable(subgraph, outkey, inkeys),) + inkeys\n\n # Mutate `fused_trees` if key renaming is needed (renaming done in fuse)\n if rename_keys:\n chain2 = []\n for k in chain:\n subchain = fused_trees.pop(k, False)\n if subchain:\n chain2.extend(subchain)\n else:\n chain2.append(k)\n fused_trees[outkey] = chain2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_SubgraphCallable_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_SubgraphCallable_", "embedding": null, "metadata": {"file_path": "dask/optimization.py", "file_name": "optimization.py", "file_type": "text/x-python", "category": "implementation", "start_line": 923, "end_line": 970, "span_ids": ["SubgraphCallable.__hash__", "SubgraphCallable.__repr__", "SubgraphCallable.__eq__", "SubgraphCallable.__reduce__", "SubgraphCallable.__ne__", "SubgraphCallable.__call__", "SubgraphCallable", "SubgraphCallable.__init__"], "tokens": 361}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class SubgraphCallable(object):\n \"\"\"Create a callable object from a dask graph.\n\n Parameters\n ----------\n dsk : dict\n A dask graph\n outkey : hashable\n The output key from the graph\n inkeys : list\n A list of keys to be used as arguments to the callable.\n name : str, optional\n The name to use for the function.\n \"\"\"\n\n __slots__ = (\"dsk\", \"outkey\", \"inkeys\", \"name\")\n\n def __init__(self, dsk, outkey, inkeys, name=\"subgraph_callable\"):\n self.dsk = dsk\n self.outkey = outkey\n self.inkeys = inkeys\n self.name = name\n\n def __repr__(self):\n return self.name\n\n def __eq__(self, other):\n return (\n type(self) is type(other)\n and self.name == other.name\n and self.outkey == other.outkey\n and set(self.inkeys) == set(other.inkeys)\n )\n\n def __ne__(self, other):\n return not (self == other)\n\n def __call__(self, *args):\n if not len(args) == len(self.inkeys):\n raise ValueError(\"Expected %d args, got %d\" % (len(self.inkeys), len(args)))\n return core.get(self.dsk, self.outkey, dict(zip(self.inkeys, args)))\n\n def __reduce__(self):\n return (SubgraphCallable, (self.dsk, self.outkey, self.inkeys, self.name))\n\n def __hash__(self):\n return hash(tuple((self.outkey, tuple(self.inkeys), self.name)))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_r_Static_order_of_node_add": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_r_Static_order_of_node_add", "embedding": null, "metadata": {"file_path": "dask/order.py", "file_name": "order.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 81, "span_ids": ["imports", "docstring"], "tokens": 698}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "r\"\"\" Static order of nodes in dask graph\n\nDask makes decisions on what tasks to prioritize both\n\n* Dynamically at runtime\n* Statically before runtime\n\nDynamically we prefer to run tasks that were just made available. However when\nseveral tasks become available at the same time we have an opportunity to break\nties in an intelligent way\n\n d\n |\n b c\n \\ /\n a\n\nFor example after we finish ``a`` we can choose to run either ``b`` or ``c``\nnext. Making small decisions like this can greatly affect our performance,\nespecially because the order in which we run tasks affects the order in which\nwe can release memory, which operationally we find to have a large affect on\nmany computation. We want to run tasks in such a way that we keep only a small\namount of data in memory at any given time.\n\n\nStatic Ordering\n---------------\n\nAnd so we create a total ordering over all nodes to serve as a tie breaker. We\nrepresent this ordering with a dictionary mapping keys to integer values.\nLower scores have higher priority. These scores correspond to the order in\nwhich a sequential scheduler would visit each node.\n\n {'a': 0,\n 'c': 1,\n 'd': 2,\n 'b': 3}\n\nThere are several ways in which we might order our keys. This is a nuanced\nprocess that has to take into account many different kinds of workflows, and\noperate efficiently in linear time. We strongly recommend that readers look at\nthe docstrings of tests in dask/tests/test_order.py. These tests usually have\ngraph types laid out very carefully to show the kinds of situations that often\narise, and the order we would like to be determined.\n\n\nPolicy\n------\n\nWork towards *small goals* with *big steps*.\n\n1. **Small goals**: prefer tasks that have few total dependents and whose final\n dependents have few total dependencies.\n\n We prefer to prioritize those tasks that help branches of computation that\n can terminate quickly.\n\n With more detail, we compute the total number of dependencies that each\n task depends on (both its own dependencies, and the dependencies of its\n dependencies, and so on), and then we choose those tasks that drive towards\n results with a low number of total dependencies. We choose to prioritize\n tasks that work towards finishing shorter computations first.\n\n2. **Big steps**: prefer tasks with many dependents\n\n However, many tasks work towards the same final dependents. Among those,\n we choose those tasks with the most work left to do. We want to finish\n the larger portions of a sub-computation before we start on the smaller\n ones.\n\n3. **Name comparison**: break ties with key name\n\n Often graphs are made with regular keynames. When no other structural\n difference exists between two keys, use the key name to break ties.\n This relies on the regularity of graph constructors like dask.array to be a\n good proxy for ordering. This is usually a good idea and a sane default.\n\"\"\"\nfrom collections import defaultdict\nfrom math import log\nfrom .core import get_dependencies, reverse_dict, get_deps, getcycle # noqa: F401\nfrom .utils_test import add, inc", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_None_2_order.initial_stack_key.init_stack___getitem__": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_None_2_order.initial_stack_key.init_stack___getitem__", "embedding": null, "metadata": {"file_path": "dask/order.py", "file_name": "order.py", "file_type": "text/x-python", "category": "implementation", "start_line": 81, "end_line": 154, "span_ids": ["imports", "order"], "tokens": 683}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": " # noqa: F401\n\n\ndef order(dsk, dependencies=None):\n \"\"\"Order nodes in dask graph\n\n This produces an ordering over our tasks that we use to break ties when\n executing. We do this ahead of time to reduce a bit of stress on the\n scheduler and also to assist in static analysis.\n\n This currently traverses the graph as a single-threaded scheduler would\n traverse it. It breaks ties in the following ways:\n\n 1. Begin at a leaf node that is a dependency of a root node that has the\n largest subgraph (start hard things first)\n 2. Prefer tall branches with few dependents (start hard things first and\n try to avoid memory usage)\n 3. Prefer dependents that are dependencies of root nodes that have\n the smallest subgraph (do small goals that can terminate quickly)\n\n Examples\n --------\n >>> dsk = {'a': 1, 'b': 2, 'c': (inc, 'a'), 'd': (add, 'b', 'c')}\n >>> order(dsk)\n {'a': 0, 'c': 1, 'b': 2, 'd': 3}\n \"\"\"\n if not dsk:\n return {}\n\n if dependencies is None:\n dependencies = {k: get_dependencies(dsk, k) for k in dsk}\n\n dependents = reverse_dict(dependencies)\n num_needed, total_dependencies = ndependencies(dependencies, dependents)\n metrics = graph_metrics(dependencies, dependents, total_dependencies)\n if len(metrics) != len(dsk):\n cycle = getcycle(dsk, None)\n raise RuntimeError(\n \"Cycle detected between the following keys:\\n -> %s\"\n % \"\\n -> \".join(str(x) for x in cycle)\n )\n\n # Leaf nodes. We choose one--the initial node--for each weakly connected subgraph.\n # Let's calculate the `initial_stack_key` as we determine `init_stack` set.\n init_stack = {\n # First prioritize large, tall groups, then prioritize the same as ``dependents_key``.\n key: (\n # at a high-level, work towards a large goal (and prefer tall and narrow)\n -max_dependencies,\n num_dependents - max_heights,\n # tactically, finish small connected jobs first\n min_dependencies,\n num_dependents - min_heights, # prefer tall and narrow\n -total_dependents, # take a big step\n # try to be memory efficient\n num_dependents,\n # tie-breaker\n StrComparable(key),\n )\n for key, num_dependents, (\n total_dependents,\n min_dependencies,\n max_dependencies,\n min_heights,\n max_heights,\n ) in (\n (key, len(dependents[key]), metrics[key])\n for key, val in dependencies.items()\n if not val\n )\n }\n # `initial_stack_key` chooses which task to run at the very beginning.\n # This value is static, so we pre-compute as the value of this dict.\n initial_stack_key = init_stack.__getitem__\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_order.dependents_key_order.dependencies_key.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_order.dependents_key_order.dependencies_key.return._", "embedding": null, "metadata": {"file_path": "dask/order.py", "file_name": "order.py", "file_type": "text/x-python", "category": "implementation", "start_line": 156, "end_line": 199, "span_ids": ["order"], "tokens": 359}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def order(dsk, dependencies=None):\n # ... other code\n\n def dependents_key(x):\n \"\"\"Choose a path from our starting task to our tactical goal\n\n This path is connected to a large goal, but focuses on completing\n a small goal and being memory efficient.\n \"\"\"\n return (\n # Focus on being memory-efficient\n len(dependents[x]) - len(dependencies[x]) + num_needed[x],\n -metrics[x][3], # min_heights\n # tie-breaker\n StrComparable(x),\n )\n\n def dependencies_key(x):\n \"\"\"Choose which dependency to run as part of a reverse DFS\n\n This is very similar to both ``initial_stack_key``.\n \"\"\"\n num_dependents = len(dependents[x])\n (\n total_dependents,\n min_dependencies,\n max_dependencies,\n min_heights,\n max_heights,\n ) = metrics[x]\n # Prefer short and narrow instead of tall in narrow, because we're going in\n # reverse along dependencies.\n return (\n # at a high-level, work towards a large goal (and prefer short and narrow)\n -max_dependencies,\n num_dependents + max_heights,\n # tactically, finish small connected jobs first\n min_dependencies,\n num_dependents + min_heights, # prefer short and narrow\n -total_dependencies[x], # go where the work is\n # try to be memory efficient\n num_dependents - len(dependencies[x]) + num_needed[x],\n num_dependents,\n total_dependents, # already found work, so don't add more\n # tie-breaker\n StrComparable(x),\n )\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_order.finish_now_key_order.is_init_sorted.False": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_order.finish_now_key_order.is_init_sorted.False", "embedding": null, "metadata": {"file_path": "dask/order.py", "file_name": "order.py", "file_type": "text/x-python", "category": "implementation", "start_line": 201, "end_line": 272, "span_ids": ["order"], "tokens": 790}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def order(dsk, dependencies=None):\n # ... other code\n\n def finish_now_key(x):\n \"\"\" Determine the order of dependents that are ready to run and be released\"\"\"\n return (-len(dependencies[x]), StrComparable(x))\n\n # Computing this for all keys can sometimes be relatively expensive :(\n partition_keys = {\n key: (\n (min_dependencies - total_dependencies[key] + 1)\n * (total_dependents - min_heights)\n )\n for key, (\n total_dependents,\n min_dependencies,\n _,\n min_heights,\n _,\n ) in metrics.items()\n }\n\n result = {}\n i = 0\n\n # `inner_stask` is used to perform a DFS along dependencies. Once emptied\n # (when traversing dependencies), this continue down a path along dependents\n # until a root node is reached.\n #\n # Sometimes, a better path along a dependent is discovered (i.e., something\n # that is easier to compute and doesn't requiring holding too much in memory).\n # In this case, the current `inner_stack` is appended to `inner_stacks` and\n # we begin a new DFS from the better node.\n #\n # A \"better path\" is determined by comparing `partition_keys`.\n inner_stacks = [[min(init_stack, key=initial_stack_key)]]\n inner_stacks_append = inner_stacks.append\n inner_stacks_extend = inner_stacks.extend\n inner_stacks_pop = inner_stacks.pop\n\n # Okay, now we get to the data structures used for fancy behavior.\n #\n # As we traverse nodes in the DFS along dependencies, we partition the dependents\n # via `partition_key`. A dependent goes to:\n # 1) `inner_stack` if it's better than our current target,\n # 2) `next_nodes` if the partition key is lower than it's parent,\n # 3) `later_nodes` otherwise.\n # When the inner stacks are depleted, we process `next_nodes`. If `next_nodes` is\n # empty (and `outer_stacks` is empty`), then we process `later_nodes` the same way.\n # These dicts use `partition_keys` as keys. We process them by placing the values\n # in `outer_stack` so that the smallest keys will be processed first.\n next_nodes = defaultdict(list)\n later_nodes = defaultdict(list)\n\n # `outer_stack` is used to populate `inner_stacks`. From the time we partition the\n # dependents of a node, we group them: one list per partition key per parent node.\n # This likely results in many small lists. We do this to avoid sorting many larger\n # lists (i.e., to avoid n*log(n) behavior). So, we have many small lists that we\n # partitioned, and we keep them in the order that we saw them (we will process them\n # in a FIFO manner). By delaying sorting for as long as we can, we can first filter\n # out nodes that have already been computed. All this complexity is worth it!\n outer_stack = []\n outer_stack_extend = outer_stack.extend\n outer_stack_pop = outer_stack.pop\n\n # Keep track of nodes that are in `inner_stack` or `inner_stacks` so we don't\n # process them again.\n seen = set() # seen in an inner_stack (and has dependencies)\n seen_update = seen.update\n seen_add = seen.add\n\n # alias for speed\n set_difference = set.difference\n\n is_init_sorted = False\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_order.while_True__order.while_True_.if_inner_stacks_.continue": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_order.while_True__order.while_True_.if_inner_stacks_.continue", "embedding": null, "metadata": {"file_path": "dask/order.py", "file_name": "order.py", "file_type": "text/x-python", "category": "implementation", "start_line": 273, "end_line": 455, "span_ids": ["order"], "tokens": 1650}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def order(dsk, dependencies=None):\n # ... other code\n while True:\n while inner_stacks:\n inner_stack = inner_stacks_pop()\n inner_stack_pop = inner_stack.pop\n while inner_stack:\n # Perform a DFS along dependencies until we complete our tactical goal\n item = inner_stack_pop()\n if item in result:\n continue\n if num_needed[item]:\n inner_stack.append(item)\n deps = set_difference(dependencies[item], result)\n if 1 < len(deps) < 1000:\n inner_stack.extend(\n sorted(deps, key=dependencies_key, reverse=True)\n )\n else:\n inner_stack.extend(deps)\n seen_update(deps)\n continue\n\n result[item] = i\n i += 1\n deps = dependents[item]\n\n # If inner_stack is empty, then we typically add the best dependent to it.\n # However, we don't add to it if we complete a node early via \"finish_now\" below\n # or if a dependent is already on an inner_stack. In this case, we add the\n # dependents (not in an inner_stack) to next_nodes or later_nodes to handle later.\n # This serves three purposes:\n # 1. shrink `deps` so that it can be processed faster,\n # 2. make sure we don't process the same dependency repeatedly, and\n # 3. make sure we don't accidentally continue down an expensive-to-compute path.\n add_to_inner_stack = True\n if metrics[item][3] == 1: # min_height\n # Don't leave any dangling single nodes! Finish all dependents that are\n # ready and are also root nodes.\n finish_now = {\n dep\n for dep in deps\n if not dependents[dep] and num_needed[dep] == 1\n }\n if finish_now:\n deps -= finish_now # Safe to mutate\n if len(finish_now) > 1:\n finish_now = sorted(finish_now, key=finish_now_key)\n for dep in finish_now:\n result[dep] = i\n i += 1\n add_to_inner_stack = False\n\n if deps:\n for dep in deps:\n num_needed[dep] -= 1\n\n already_seen = deps & seen\n if already_seen:\n if len(deps) == len(already_seen):\n continue\n add_to_inner_stack = False\n deps -= already_seen\n\n if len(deps) == 1:\n # Fast path! We trim down `deps` above hoping to reach here.\n (dep,) = deps\n if not inner_stack:\n if add_to_inner_stack:\n inner_stack = [dep]\n inner_stack_pop = inner_stack.pop\n seen_add(dep)\n continue\n key = partition_keys[dep]\n else:\n key = partition_keys[dep]\n if key < partition_keys[inner_stack[0]]:\n # Run before `inner_stack` (change tactical goal!)\n inner_stacks_append(inner_stack)\n inner_stack = [dep]\n inner_stack_pop = inner_stack.pop\n seen_add(dep)\n continue\n if key < partition_keys[item]:\n next_nodes[key].append(deps)\n else:\n later_nodes[key].append(deps)\n else:\n # Slow path :(. This requires grouping by partition_key.\n dep_pools = defaultdict(list)\n for dep in deps:\n dep_pools[partition_keys[dep]].append(dep)\n item_key = partition_keys[item]\n if inner_stack:\n # If we have an inner_stack, we need to look for a \"better\" path\n prev_key = partition_keys[inner_stack[0]]\n now_keys = [] # < inner_stack[0]\n for key, vals in dep_pools.items():\n if key < prev_key:\n now_keys.append(key)\n elif key < item_key:\n next_nodes[key].append(vals)\n else:\n later_nodes[key].append(vals)\n if now_keys:\n # Run before `inner_stack` (change tactical goal!)\n inner_stacks_append(inner_stack)\n if 1 < len(now_keys):\n now_keys.sort(reverse=True)\n for key in now_keys:\n pool = dep_pools[key]\n if 1 < len(pool) < 100:\n pool.sort(key=dependents_key, reverse=True)\n inner_stacks_extend([dep] for dep in pool)\n seen_update(pool)\n inner_stack = inner_stacks_pop()\n inner_stack_pop = inner_stack.pop\n else:\n # If we don't have an inner_stack, then we don't need to look\n # for a \"better\" path, but we do need traverse along dependents.\n if add_to_inner_stack:\n min_key = min(dep_pools)\n min_pool = dep_pools.pop(min_key)\n if len(min_pool) == 1:\n inner_stack = min_pool\n seen_update(inner_stack)\n elif (\n 10 * item_key\n > 11 * len(min_pool) * len(min_pool) * min_key\n ):\n # Put all items in min_pool onto inner_stacks.\n # I know this is a weird comparison. Hear me out.\n # Although it is often beneficial to put all of the items in `min_pool`\n # onto `inner_stacks` to process next, it is very easy to be overzealous.\n # Sometimes it is actually better to defer until `next_nodes` is handled.\n # We should only put items onto `inner_stacks` that we're reasonably\n # confident about. The above formula is a best effort heuristic given\n # what we have easily available. It is obviously very specific to our\n # choice of partition_key. Dask tests take this route about 40%.\n if len(min_pool) < 100:\n min_pool.sort(key=dependents_key, reverse=True)\n inner_stacks_extend([dep] for dep in min_pool)\n inner_stack = inner_stacks_pop()\n seen_update(min_pool)\n else:\n # Put one item in min_pool onto inner_stack and the rest into next_nodes.\n if len(min_pool) < 100:\n inner_stack = [\n min(min_pool, key=dependents_key)\n ]\n else:\n inner_stack = [min_pool.pop()]\n next_nodes[min_key].append(min_pool)\n seen_update(inner_stack)\n\n inner_stack_pop = inner_stack.pop\n for key, vals in dep_pools.items():\n if key < item_key:\n next_nodes[key].append(vals)\n else:\n later_nodes[key].append(vals)\n\n if len(dependencies) == len(result):\n break # all done!\n\n if next_nodes:\n for key in sorted(next_nodes, reverse=True):\n # `outer_stacks` may not be empty here--it has data from previous `next_nodes`.\n # Since we pop things off of it (onto `inner_nodes`), this means we handle\n # multiple `next_nodes` in a LIFO manner.\n outer_stack_extend(reversed(next_nodes[key]))\n next_nodes = defaultdict(list)\n\n while outer_stack:\n # Try to add a few items to `inner_stacks`\n deps = [x for x in outer_stack_pop() if x not in result]\n if deps:\n if 1 < len(deps) < 100:\n deps.sort(key=dependents_key, reverse=True)\n inner_stacks_extend([dep] for dep in deps)\n seen_update(deps)\n break\n\n if inner_stacks:\n continue\n # ... other code\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_order.while_True_.if_later_nodes__order.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_order.while_True_.if_later_nodes__order.return.result", "embedding": null, "metadata": {"file_path": "dask/order.py", "file_name": "order.py", "file_type": "text/x-python", "category": "implementation", "start_line": 457, "end_line": 492, "span_ids": ["order"], "tokens": 356}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def order(dsk, dependencies=None):\n while True:\n # ... other code\n\n if later_nodes:\n # You know all those dependents with large keys we've been hanging onto to run \"later\"?\n # Well, \"later\" has finally come.\n next_nodes, later_nodes = later_nodes, next_nodes\n continue\n\n # We just finished computing a connected group.\n # Let's choose the first `item` in the next group to compute.\n # If we have few large groups left, then it's best to find `item` by taking a minimum.\n # If we have many small groups left, then it's best to sort.\n # If we have many tiny groups left, then it's best to simply iterate.\n if not is_init_sorted:\n prev_len = len(init_stack)\n if type(init_stack) is dict:\n init_stack = set(init_stack)\n init_stack = set_difference(init_stack, result)\n N = len(init_stack)\n m = prev_len - N\n # is `min` likely better than `sort`?\n if m >= N or N + (N - m) * log(N - m) < N * log(N):\n item = min(init_stack, key=initial_stack_key)\n continue\n\n if len(init_stack) < 10000:\n init_stack = sorted(init_stack, key=initial_stack_key, reverse=True)\n else:\n init_stack = list(init_stack)\n init_stack_pop = init_stack.pop\n is_init_sorted = True\n\n item = init_stack_pop()\n while item in result:\n item = init_stack_pop()\n inner_stacks_append([item])\n\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_graph_metrics_graph_metrics.for_key_deps_in_dependen.if_not_deps_.for_child_in_dependencies.if_not_num_needed_child_.current_append_child_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_graph_metrics_graph_metrics.for_key_deps_in_dependen.if_not_deps_.for_child_in_dependencies.if_not_num_needed_child_.current_append_child_", "embedding": null, "metadata": {"file_path": "dask/order.py", "file_name": "order.py", "file_type": "text/x-python", "category": "implementation", "start_line": 495, "end_line": 579, "span_ids": ["graph_metrics"], "tokens": 665}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def graph_metrics(dependencies, dependents, total_dependencies):\n r\"\"\"Useful measures of a graph used by ``dask.order.order``\n\n Example DAG (a1 has no dependencies; b2 and c1 are root nodes):\n\n c1\n |\n b1 b2\n \\ /\n a1\n\n For each key we return:\n 1. The number of keys that can only be run after this key is run. The\n root nodes have value 1 while deep child nodes will have larger values.\n\n 1\n |\n 2 1\n \\ /\n 4\n\n 2. The minimum value of the total number of dependencies of\n all final dependents (see module-level comment for more).\n In other words, the minimum of ``ndependencies`` of root\n nodes connected to the current node.\n\n 3\n |\n 3 2\n \\ /\n 2\n\n 3. The maximum value of the total number of dependencies of\n all final dependents (see module-level comment for more).\n In other words, the maximum of ``ndependencies`` of root\n nodes connected to the current node.\n\n 3\n |\n 3 2\n \\ /\n 3\n\n 4. The minimum height from a root node\n\n 0\n |\n 1 0\n \\ /\n 1\n\n 5. The maximum height from a root node\n\n 0\n |\n 1 0\n \\ /\n 2\n\n Examples\n --------\n >>> dsk = {'a1': 1, 'b1': (inc, 'a1'), 'b2': (inc, 'a1'), 'c1': (inc, 'b1')}\n >>> dependencies, dependents = get_deps(dsk)\n >>> _, total_dependencies = ndependencies(dependencies, dependents)\n >>> metrics = graph_metrics(dependencies, dependents, total_dependencies)\n >>> sorted(metrics.items())\n [('a1', (4, 2, 3, 1, 2)), ('b1', (2, 3, 3, 1, 1)), ('b2', (1, 2, 2, 0, 0)), ('c1', (1, 3, 3, 0, 0))]\n\n Returns\n -------\n metrics: Dict[key, Tuple[int, int, int, int, int]]\n \"\"\"\n result = {}\n num_needed = {k: len(v) for k, v in dependents.items() if v}\n current = []\n current_pop = current.pop\n current_append = current.append\n for key, deps in dependents.items():\n if not deps:\n val = total_dependencies[key]\n result[key] = (1, val, val, 0, 0)\n for child in dependencies[key]:\n num_needed[child] -= 1\n if not num_needed[child]:\n current_append(child)\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_graph_metrics.while_current__graph_metrics.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_graph_metrics.while_current__graph_metrics.return.result", "embedding": null, "metadata": {"file_path": "dask/order.py", "file_name": "order.py", "file_type": "text/x-python", "category": "implementation", "start_line": 581, "end_line": 619, "span_ids": ["graph_metrics"], "tokens": 242}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def graph_metrics(dependencies, dependents, total_dependencies):\n # ... other code\n\n while current:\n key = current_pop()\n parents = dependents[key]\n if len(parents) == 1:\n (parent,) = parents\n (\n total_dependents,\n min_dependencies,\n max_dependencies,\n min_heights,\n max_heights,\n ) = result[parent]\n result[key] = (\n 1 + total_dependents,\n min_dependencies,\n max_dependencies,\n 1 + min_heights,\n 1 + max_heights,\n )\n else:\n (\n total_dependents,\n min_dependencies,\n max_dependencies,\n min_heights,\n max_heights,\n ) = zip(*(result[parent] for parent in dependents[key]))\n result[key] = (\n 1 + sum(total_dependents),\n min(min_dependencies),\n max(max_dependencies),\n 1 + min(min_heights),\n 1 + max(max_heights),\n )\n for child in dependencies[key]:\n num_needed[child] -= 1\n if not num_needed[child]:\n current_append(child)\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_ndependencies_ndependencies.return.num_dependencies_result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_ndependencies_ndependencies.return.num_dependencies_result", "embedding": null, "metadata": {"file_path": "dask/order.py", "file_name": "order.py", "file_type": "text/x-python", "category": "implementation", "start_line": 622, "end_line": 665, "span_ids": ["ndependencies"], "tokens": 324}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def ndependencies(dependencies, dependents):\n \"\"\"Number of total data elements on which this key depends\n\n For each key we return the number of tasks that must be run for us to run\n this task.\n\n Examples\n --------\n >>> dsk = {'a': 1, 'b': (inc, 'a'), 'c': (inc, 'b')}\n >>> dependencies, dependents = get_deps(dsk)\n >>> num_dependencies, total_dependencies = ndependencies(dependencies, dependents)\n >>> sorted(total_dependencies.items())\n [('a', 1), ('b', 2), ('c', 3)]\n\n Returns\n -------\n num_dependencies: Dict[key, int]\n total_dependencies: Dict[key, int]\n \"\"\"\n num_needed = {}\n result = {}\n for k, v in dependencies.items():\n num_needed[k] = len(v)\n if not v:\n result[k] = 1\n\n num_dependencies = num_needed.copy()\n current = []\n current_pop = current.pop\n current_append = current.append\n\n for key in result:\n for parent in dependents[key]:\n num_needed[parent] -= 1\n if not num_needed[parent]:\n current_append(parent)\n while current:\n key = current_pop()\n result[key] = 1 + sum(result[child] for child in dependencies[key])\n for parent in dependents[key]:\n num_needed[parent] -= 1\n if not num_needed[parent]:\n current_append(parent)\n return num_dependencies, result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_StrComparable_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_StrComparable_", "embedding": null, "metadata": {"file_path": "dask/order.py", "file_name": "order.py", "file_type": "text/x-python", "category": "implementation", "start_line": 668, "end_line": 695, "span_ids": ["StrComparable", "StrComparable.__lt__", "StrComparable.__init__"], "tokens": 171}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class StrComparable(object):\n \"\"\"Wrap object so that it defaults to string comparison\n\n When comparing two objects of different types Python fails\n\n >>> 'a' < 1 # doctest: +SKIP\n Traceback (most recent call last):\n ...\n TypeError: '<' not supported between instances of 'str' and 'int'\n\n This class wraps the object so that, when this would occur it instead\n compares the string representation\n\n >>> StrComparable('a') < StrComparable(1)\n False\n \"\"\"\n\n __slots__ = (\"obj\",)\n\n def __init__(self, obj):\n self.obj = obj\n\n def __lt__(self, other):\n try:\n return self.obj < other.obj\n except Exception:\n return str(self.obj) < str(other.obj)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/rewrite.py_from_collections_import_d_Traverser.skip.self.term.self__stack_pop_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/rewrite.py_from_collections_import_d_Traverser.skip.self.term.self__stack_pop_", "embedding": null, "metadata": {"file_path": "dask/rewrite.py", "file_name": "rewrite.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 85, "span_ids": ["Traverser.__init__", "Traverser.current", "imports", "Traverser.next", "Traverser.copy", "Traverser", "head", "Traverser.__iter__", "args", "Traverser.skip"], "tokens": 434}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from collections import deque\n\nfrom dask.core import istask, subs\n\n\ndef head(task):\n \"\"\"Return the top level node of a task\"\"\"\n\n if istask(task):\n return task[0]\n elif isinstance(task, list):\n return list\n else:\n return task\n\n\ndef args(task):\n \"\"\"Get the arguments for the current task\"\"\"\n\n if istask(task):\n return task[1:]\n elif isinstance(task, list):\n return task\n else:\n return ()\n\n\nclass Traverser(object):\n \"\"\"Traverser interface for tasks.\n\n Class for storing the state while performing a preorder-traversal of a\n task.\n\n Parameters\n ----------\n term : task\n The task to be traversed\n\n Attributes\n ----------\n term\n The current element in the traversal\n current\n The head of the current element in the traversal. This is simply `head`\n applied to the attribute `term`.\n \"\"\"\n\n def __init__(self, term, stack=None):\n self.term = term\n if not stack:\n self._stack = deque([END])\n else:\n self._stack = stack\n\n def __iter__(self):\n while self.current is not END:\n yield self.current\n self.next()\n\n def copy(self):\n \"\"\"Copy the traverser in its current state.\n\n This allows the traversal to be pushed onto a stack, for easy\n backtracking.\"\"\"\n\n return Traverser(self.term, deque(self._stack))\n\n def next(self):\n \"\"\"Proceed to the next term in the preorder traversal.\"\"\"\n\n subterms = args(self.term)\n if not subterms:\n # No subterms, pop off stack\n self.term = self._stack.pop()\n else:\n self.term = subterms[0]\n self._stack.extend(reversed(subterms[1:]))\n\n @property\n def current(self):\n return head(self.term)\n\n def skip(self):\n \"\"\"Skip over all subterms of the current level in the traversal\"\"\"\n self.term = self._stack.pop()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/rewrite.py_Token_END.Token_end_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/rewrite.py_Token_END.Token_end_", "embedding": null, "metadata": {"file_path": "dask/rewrite.py", "file_name": "rewrite.py", "file_type": "text/x-python", "category": "implementation", "start_line": 88, "end_line": 104, "span_ids": ["Token", "Token.__init__", "Token.__repr__", "impl"], "tokens": 113}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Token(object):\n \"\"\"A token object.\n\n Used to express certain objects in the traversal of a task or pattern.\"\"\"\n\n def __init__(self, name):\n self.name = name\n\n def __repr__(self):\n return self.name\n\n\n# A variable to represent *all* variables in a discrimination net\nVAR = Token(\"?\")\n# Represents the end of the traversal of an expression. We can't use `None`,\n# 'False', etc... here, as anything may be an argument to a function.\nEND = Token(\"end\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/rewrite.py_Node_Node.patterns.return.self_1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/rewrite.py_Node_Node.patterns.return.self_1_", "embedding": null, "metadata": {"file_path": "dask/rewrite.py", "file_name": "rewrite.py", "file_type": "text/x-python", "category": "implementation", "start_line": 107, "end_line": 125, "span_ids": ["Node.patterns", "Node.__new__", "Node", "Node.edges"], "tokens": 121}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Node(tuple):\n \"\"\"A Discrimination Net node.\"\"\"\n\n __slots__ = ()\n\n def __new__(cls, edges=None, patterns=None):\n edges = edges if edges else {}\n patterns = patterns if patterns else []\n return tuple.__new__(cls, (edges, patterns))\n\n @property\n def edges(self):\n \"\"\"A dictionary, where the keys are edges, and the values are nodes\"\"\"\n return self[0]\n\n @property\n def patterns(self):\n \"\"\"A list of all patterns that currently match at this node\"\"\"\n return self[1]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/rewrite.py_RewriteRule_RewriteRule.__repr__.return.str_self_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/rewrite.py_RewriteRule_RewriteRule.__repr__.return.str_self_", "embedding": null, "metadata": {"file_path": "dask/rewrite.py", "file_name": "rewrite.py", "file_type": "text/x-python", "category": "implementation", "start_line": 128, "end_line": 197, "span_ids": ["RewriteRule.__repr__", "RewriteRule.__str__", "RewriteRule._apply", "RewriteRule", "RewriteRule.__init__"], "tokens": 618}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class RewriteRule(object):\n \"\"\"A rewrite rule.\n\n Expresses `lhs` -> `rhs`, for variables `vars`.\n\n Parameters\n ----------\n lhs : task\n The left-hand-side of the rewrite rule.\n rhs : task or function\n The right-hand-side of the rewrite rule. If it's a task, variables in\n `rhs` will be replaced by terms in the subject that match the variables\n in `lhs`. If it's a function, the function will be called with a dict\n of such matches.\n vars: tuple, optional\n Tuple of variables found in the lhs. Variables can be represented as\n any hashable object; a good convention is to use strings. If there are\n no variables, this can be omitted.\n\n Examples\n --------\n Here's a `RewriteRule` to replace all nested calls to `list`, so that\n `(list, (list, 'x'))` is replaced with `(list, 'x')`, where `'x'` is a\n variable.\n\n >>> import dask.rewrite as dr\n >>> lhs = (list, (list, 'x'))\n >>> rhs = (list, 'x')\n >>> variables = ('x',)\n >>> rule = dr.RewriteRule(lhs, rhs, variables)\n\n Here's a more complicated rule that uses a callable right-hand-side. A\n callable `rhs` takes in a dictionary mapping variables to their matching\n values. This rule replaces all occurrences of `(list, 'x')` with `'x'` if\n `'x'` is a list itself.\n\n >>> lhs = (list, 'x')\n >>> def repl_list(sd):\n ... x = sd['x']\n ... if isinstance(x, list):\n ... return x\n ... else:\n ... return (list, x)\n >>> rule = dr.RewriteRule(lhs, repl_list, variables)\n \"\"\"\n\n def __init__(self, lhs, rhs, vars=()):\n if not isinstance(vars, tuple):\n raise TypeError(\"vars must be a tuple of variables\")\n self.lhs = lhs\n if callable(rhs):\n self.subs = rhs\n else:\n self.subs = self._apply\n self.rhs = rhs\n self._varlist = [t for t in Traverser(lhs) if t in vars]\n # Reduce vars down to just variables found in lhs\n self.vars = tuple(sorted(set(self._varlist)))\n\n def _apply(self, sub_dict):\n term = self.rhs\n for key, val in sub_dict.items():\n term = subs(term, key, val)\n return term\n\n def __str__(self):\n return \"RewriteRule({0}, {1}, {2})\".format(self.lhs, self.rhs, self.vars)\n\n def __repr__(self):\n return str(self)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/rewrite.py_RuleSet_RuleSet.__init__.for_p_in_rules_.self_add_p_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/rewrite.py_RuleSet_RuleSet.__init__.for_p_in_rules_.self_add_p_", "embedding": null, "metadata": {"file_path": "dask/rewrite.py", "file_name": "rewrite.py", "file_type": "text/x-python", "category": "implementation", "start_line": 200, "end_line": 253, "span_ids": ["RuleSet.__init__", "RuleSet"], "tokens": 435}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class RuleSet(object):\n \"\"\"A set of rewrite rules.\n\n Forms a structure for fast rewriting over a set of rewrite rules. This\n allows for syntactic matching of terms to patterns for many patterns at\n the same time.\n\n Examples\n --------\n\n >>> import dask.rewrite as dr\n >>> def f(*args): pass\n >>> def g(*args): pass\n >>> def h(*args): pass\n >>> from operator import add\n\n >>> rs = dr.RuleSet( # doctest: +SKIP\n ... dr.RewriteRule((add, 'x', 0), 'x', ('x',)),\n ... dr.RewriteRule((f, (g, 'x'), 'y'),\n ... (h, 'x', 'y'),\n ... ('x', 'y')))\n\n >>> rs.rewrite((add, 2, 0)) # doctest: +SKIP\n 2\n\n >>> rs.rewrite((f, (g, 'a', 3))) # doctest: +SKIP\n (h, 'a', 3)\n\n >>> dsk = {'a': (add, 2, 0), # doctest: +SKIP\n ... 'b': (f, (g, 'a', 3))}\n\n >>> from toolz import valmap # doctest: +SKIP\n >>> valmap(rs.rewrite, dsk) # doctest: +SKIP\n {'a': 2,\n 'b': (h, 'a', 3)}\n\n Attributes\n ----------\n rules : list\n A list of `RewriteRule`s included in the `RuleSet`.\n \"\"\"\n\n def __init__(self, *rules):\n \"\"\"Create a `RuleSet` for a number of rules\n\n Parameters\n ----------\n rules\n One or more instances of RewriteRule\n \"\"\"\n self._net = Node()\n self.rules = []\n for p in rules:\n self.add(p)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/rewrite.py_RuleSet.add_RuleSet.add.self_rules_append_rule_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/rewrite.py_RuleSet.add_RuleSet.add.self_rules_append_rule_", "embedding": null, "metadata": {"file_path": "dask/rewrite.py", "file_name": "rewrite.py", "file_type": "text/x-python", "category": "implementation", "start_line": 253, "end_line": 278, "span_ids": ["RuleSet.add"], "tokens": 194}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class RuleSet(object):\n\n def add(self, rule):\n \"\"\"Add a rule to the RuleSet.\n\n Parameters\n ----------\n rule : RewriteRule\n \"\"\"\n\n if not isinstance(rule, RewriteRule):\n raise TypeError(\"rule must be instance of RewriteRule\")\n vars = rule.vars\n curr_node = self._net\n ind = len(self.rules)\n # List of variables, in order they appear in the POT of the term\n for t in Traverser(rule.lhs):\n prev_node = curr_node\n if t in vars:\n t = VAR\n if t in curr_node.edges:\n curr_node = curr_node.edges[t]\n else:\n curr_node.edges[t] = Node()\n curr_node = curr_node.edges[t]\n # We've reached a leaf node. Add the term index to this leaf.\n prev_node.edges[t].patterns.append(ind)\n self.rules.append(rule)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/rewrite.py_RuleSet.iter_matches_RuleSet._rewrite.return.term": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/rewrite.py_RuleSet.iter_matches_RuleSet._rewrite.return.term", "embedding": null, "metadata": {"file_path": "dask/rewrite.py", "file_name": "rewrite.py", "file_type": "text/x-python", "category": "implementation", "start_line": 280, "end_line": 310, "span_ids": ["RuleSet._rewrite", "RuleSet.iter_matches"], "tokens": 244}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class RuleSet(object):\n\n def iter_matches(self, term):\n \"\"\"A generator that lazily finds matchings for term from the RuleSet.\n\n Parameters\n ----------\n term : task\n\n Yields\n ------\n Tuples of `(rule, subs)`, where `rule` is the rewrite rule being\n matched, and `subs` is a dictionary mapping the variables in the lhs\n of the rule to their matching values in the term.\"\"\"\n\n S = Traverser(term)\n for m, syms in _match(S, self._net):\n for i in m:\n rule = self.rules[i]\n subs = _process_match(rule, syms)\n if subs is not None:\n yield rule, subs\n\n def _rewrite(self, term):\n \"\"\"Apply the rewrite rules in RuleSet to top level of term\"\"\"\n\n for rule, sd in self.iter_matches(term):\n # We use for (...) because it's fast in all cases for getting the\n # first element from the match iterator. As we only want that\n # element, we break here\n term = rule.subs(sd)\n break\n return term", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/rewrite.py_RuleSet.rewrite_RuleSet.rewrite.return.strategies_strategy_self": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/rewrite.py_RuleSet.rewrite_RuleSet.rewrite.return.strategies_strategy_self", "embedding": null, "metadata": {"file_path": "dask/rewrite.py", "file_name": "rewrite.py", "file_type": "text/x-python", "category": "implementation", "start_line": 312, "end_line": 353, "span_ids": ["RuleSet.rewrite"], "tokens": 385}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class RuleSet(object):\n\n def rewrite(self, task, strategy=\"bottom_up\"):\n \"\"\"Apply the `RuleSet` to `task`.\n\n This applies the most specific matching rule in the RuleSet to the\n task, using the provided strategy.\n\n Parameters\n ----------\n term: a task\n The task to be rewritten\n strategy: str, optional\n The rewriting strategy to use. Options are \"bottom_up\" (default),\n or \"top_level\".\n\n Examples\n --------\n Suppose there was a function `add` that returned the sum of 2 numbers,\n and another function `double` that returned twice its input:\n\n >>> add = lambda x, y: x + y\n >>> double = lambda x: 2*x\n\n Now suppose `double` was *significantly* faster than `add`, so\n you'd like to replace all expressions `(add, x, x)` with `(double,\n x)`, where `x` is a variable. This can be expressed as a rewrite rule:\n\n >>> rule = RewriteRule((add, 'x', 'x'), (double, 'x'), ('x',))\n >>> rs = RuleSet(rule)\n\n This can then be applied to terms to perform the rewriting:\n\n >>> term = (add, (add, 2, 2), (add, 2, 2))\n >>> rs.rewrite(term) # doctest: +SKIP\n (double, (double, 2))\n\n If we only wanted to apply this to the top level of the term, the\n `strategy` kwarg can be set to \"top_level\".\n\n >>> rs.rewrite(term) # doctest: +SKIP\n (double, (add, 2, 2))\n \"\"\"\n return strategies[strategy](self, task)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/rewrite.py__top_level__match.while_True_.None_1.except_Exception_.return": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/rewrite.py__top_level__match.while_True_.None_1.except_Exception_.return", "embedding": null, "metadata": {"file_path": "dask/rewrite.py", "file_name": "rewrite.py", "file_type": "text/x-python", "category": "implementation", "start_line": 356, "end_line": 406, "span_ids": ["_match", "_top_level", "_bottom_up", "impl:5"], "tokens": 353}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _top_level(net, term):\n return net._rewrite(term)\n\n\ndef _bottom_up(net, term):\n if istask(term):\n term = (head(term),) + tuple(_bottom_up(net, t) for t in args(term))\n elif isinstance(term, list):\n term = [_bottom_up(net, t) for t in args(term)]\n return net._rewrite(term)\n\n\nstrategies = {\"top_level\": _top_level, \"bottom_up\": _bottom_up}\n\n\ndef _match(S, N):\n \"\"\"Structural matching of term S to discrimination net node N.\"\"\"\n\n stack = deque()\n restore_state_flag = False\n # matches are stored in a tuple, because all mutations result in a copy,\n # preventing operations from changing matches stored on the stack.\n matches = ()\n while True:\n if S.current is END:\n yield N.patterns, matches\n try:\n # This try-except block is to catch hashing errors from un-hashable\n # types. This allows for variables to be matched with un-hashable\n # objects.\n n = N.edges.get(S.current, None)\n if n and not restore_state_flag:\n stack.append((S.copy(), N, matches))\n N = n\n S.next()\n continue\n except TypeError:\n pass\n n = N.edges.get(VAR, None)\n if n:\n restore_state_flag = False\n matches = matches + (S.term,)\n S.skip()\n N = n\n continue\n try:\n # Backtrack here\n (S, N, matches) = stack.pop()\n restore_state_flag = True\n except Exception:\n return", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/rewrite.py__process_match_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/rewrite.py__process_match_", "embedding": null, "metadata": {"file_path": "dask/rewrite.py", "file_name": "rewrite.py", "file_type": "text/x-python", "category": "implementation", "start_line": 409, "end_line": 435, "span_ids": ["_process_match"], "tokens": 190}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _process_match(rule, syms):\n \"\"\"Process a match to determine if it is correct, and to find the correct\n substitution that will convert the term into the pattern.\n\n Parameters\n ----------\n rule : RewriteRule\n syms : iterable\n Iterable of subterms that match a corresponding variable.\n\n Returns\n -------\n A dictionary of {vars : subterms} describing the substitution to make the\n pattern equivalent with the term. Returns `None` if the match is\n invalid.\"\"\"\n\n subs = {}\n varlist = rule._varlist\n if not len(varlist) == len(syms):\n raise RuntimeError(\"length of varlist doesn't match length of syms.\")\n for v, s in zip(varlist, syms):\n if v in subs and subs[v] != s:\n return None\n else:\n subs[v] = s\n return subs", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/sizeof.py_random_register_numpy.sizeof_numpy_ndarray.return.int_x_nbytes_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/sizeof.py_random_register_numpy.sizeof_numpy_ndarray.return.int_x_nbytes_", "embedding": null, "metadata": {"file_path": "dask/sizeof.py", "file_name": "sizeof.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 106, "span_ids": ["sizeof_python_collection", "sizeof_bytes", "imports", "sizeof_default", "sizeof_memoryview", "register_numba", "register_rmm", "sizeof_array", "register_numpy", "sizeof_python_dict", "register_cupy"], "tokens": 538}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import random\nimport sys\nfrom array import array\nfrom distutils.version import LooseVersion\n\nfrom .utils import Dispatch\n\ntry: # PyPy does not support sys.getsizeof\n sys.getsizeof(1)\n getsizeof = sys.getsizeof\nexcept (AttributeError, TypeError): # Monkey patch\n\n def getsizeof(x):\n return 100\n\n\nsizeof = Dispatch(name=\"sizeof\")\n\n\n@sizeof.register(object)\ndef sizeof_default(o):\n return getsizeof(o)\n\n\n@sizeof.register(bytes)\n@sizeof.register(bytearray)\ndef sizeof_bytes(o):\n return len(o)\n\n\n@sizeof.register(memoryview)\ndef sizeof_memoryview(o):\n return o.nbytes\n\n\n@sizeof.register(array)\ndef sizeof_array(o):\n return o.itemsize * len(o)\n\n\n@sizeof.register(list)\n@sizeof.register(tuple)\n@sizeof.register(set)\n@sizeof.register(frozenset)\ndef sizeof_python_collection(seq):\n num_items = len(seq)\n samples = 10\n if num_items > samples:\n s = getsizeof(seq) + num_items / samples * sum(\n map(sizeof, random.sample(seq, samples))\n )\n return int(s)\n else:\n return getsizeof(seq) + sum(map(sizeof, seq))\n\n\n@sizeof.register(dict)\ndef sizeof_python_dict(d):\n return (\n getsizeof(d)\n + sizeof(list(d.keys()))\n + sizeof(list(d.values()))\n - 2 * sizeof(list())\n )\n\n\n@sizeof.register_lazy(\"cupy\")\ndef register_cupy():\n import cupy\n\n @sizeof.register(cupy.ndarray)\n def sizeof_cupy_ndarray(x):\n return int(x.nbytes)\n\n\n@sizeof.register_lazy(\"numba\")\ndef register_numba():\n import numba.cuda\n\n @sizeof.register(numba.cuda.cudadrv.devicearray.DeviceNDArray)\n def sizeof_numba_devicendarray(x):\n return int(x.nbytes)\n\n\n@sizeof.register_lazy(\"rmm\")\ndef register_rmm():\n import rmm\n\n # Only included in 0.11.0+\n if hasattr(rmm, \"DeviceBuffer\"):\n\n @sizeof.register(rmm.DeviceBuffer)\n def sizeof_rmm_devicebuffer(x):\n return int(x.nbytes)\n\n\n@sizeof.register_lazy(\"numpy\")\ndef register_numpy():\n import numpy as np\n\n @sizeof.register(np.ndarray)\n def sizeof_numpy_ndarray(x):\n if 0 in x.strides:\n xs = x[tuple(slice(None) if s != 0 else slice(1) for s in x.strides)]\n return xs.nbytes\n return int(x.nbytes)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/sizeof.py_register_pandas_register_pandas.sizeof_pandas_multiindex.return.int_p_1000": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/sizeof.py_register_pandas_register_pandas.sizeof_pandas_multiindex.return.int_p_1000", "embedding": null, "metadata": {"file_path": "dask/sizeof.py", "file_name": "sizeof.py", "file_type": "text/x-python", "category": "implementation", "start_line": 109, "end_line": 151, "span_ids": ["register_pandas"], "tokens": 323}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@sizeof.register_lazy(\"pandas\")\ndef register_pandas():\n import pandas as pd\n import numpy as np\n\n def object_size(x):\n if not len(x):\n return 0\n sample = np.random.choice(x, size=20, replace=True)\n sample = list(map(sizeof, sample))\n return sum(sample) / 20 * len(x)\n\n @sizeof.register(pd.DataFrame)\n def sizeof_pandas_dataframe(df):\n p = sizeof(df.index)\n for name, col in df.iteritems():\n p += col.memory_usage(index=False)\n if col.dtype == object:\n p += object_size(col._values)\n return int(p) + 1000\n\n @sizeof.register(pd.Series)\n def sizeof_pandas_series(s):\n p = int(s.memory_usage(index=True))\n if s.dtype == object:\n p += object_size(s._values)\n if s.index.dtype == object:\n p += object_size(s.index)\n return int(p) + 1000\n\n @sizeof.register(pd.Index)\n def sizeof_pandas_index(i):\n p = int(i.memory_usage())\n if i.dtype == object:\n p += object_size(i)\n return int(p) + 1000\n\n @sizeof.register(pd.MultiIndex)\n def sizeof_pandas_multiindex(i):\n p = int(sum(object_size(l) for l in i.levels))\n for c in i.codes if hasattr(i, \"codes\") else i.labels:\n p += c.nbytes\n return int(p) + 1000", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/sizeof.py_register_spmatrix_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/sizeof.py_register_spmatrix_", "embedding": null, "metadata": {"file_path": "dask/sizeof.py", "file_name": "sizeof.py", "file_type": "text/x-python", "category": "implementation", "start_line": 154, "end_line": 198, "span_ids": ["register_spmatrix", "register_pyarrow"], "tokens": 314}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@sizeof.register_lazy(\"scipy\")\ndef register_spmatrix():\n from scipy import sparse\n\n @sizeof.register(sparse.dok_matrix)\n def sizeof_spmatrix_dok(s):\n return s.__sizeof__()\n\n @sizeof.register(sparse.spmatrix)\n def sizeof_spmatrix(s):\n return sum(sizeof(v) for v in s.__dict__.values())\n\n\n@sizeof.register_lazy(\"pyarrow\")\ndef register_pyarrow():\n import pyarrow as pa\n\n def _get_col_size(data):\n p = 0\n if not isinstance(data, pa.ChunkedArray):\n data = data.data # pyarrow <0.15.0\n for chunk in data.iterchunks():\n for buffer in chunk.buffers():\n if buffer:\n p += buffer.size\n return p\n\n @sizeof.register(pa.Table)\n def sizeof_pyarrow_table(table):\n p = sizeof(table.schema.metadata)\n for col in table.itercolumns():\n p += _get_col_size(col)\n return int(p) + 1000\n\n @sizeof.register(pa.ChunkedArray)\n def sizeof_pyarrow_chunked_array(data):\n return int(_get_col_size(data)) + 1000\n\n # Handle pa.Column for pyarrow < 0.15\n if pa.__version__ < LooseVersion(\"0.15.0\"):\n\n @sizeof.register(pa.Column)\n def sizeof_pyarrow_column(col):\n return int(_get_col_size(col)) + 1000", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/system.py_math_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/system.py_math_", "embedding": null, "metadata": {"file_path": "dask/system.py", "file_name": "system.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 54, "span_ids": ["imports", "cpu_count", "impl:7"], "tokens": 325}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import math\nimport os\nimport sys\n\ntry:\n import psutil\nexcept ImportError:\n psutil = None\n\n__all__ = (\"cpu_count\", \"CPU_COUNT\")\n\n\ndef cpu_count():\n \"\"\"Get the available CPU count for this system.\n\n Takes the minimum value from the following locations:\n\n - Total system cpus available on the host.\n - CPU Affinity (if set)\n - Cgroups limit (if set)\n \"\"\"\n count = os.cpu_count()\n\n # Check CPU affinity if available\n if psutil is not None:\n try:\n affinity_count = len(psutil.Process().cpu_affinity())\n if affinity_count > 0:\n count = min(count, affinity_count)\n except Exception:\n pass\n\n # Check cgroups if available\n if sys.platform == \"linux\":\n # The directory name isn't standardized across linux distros, check both\n for dirname in [\"cpuacct,cpu\", \"cpu,cpuacct\"]:\n try:\n with open(\"/sys/fs/cgroup/%s/cpu.cfs_quota_us\" % dirname) as f:\n quota = int(f.read())\n with open(\"/sys/fs/cgroup/%s/cpu.cfs_period_us\" % dirname) as f:\n period = int(f.read())\n # We round up on fractional CPUs\n cgroups_count = math.ceil(quota / period)\n if cgroups_count > 0:\n count = min(count, cgroups_count)\n break\n except Exception:\n pass\n\n return count\n\n\nCPU_COUNT = cpu_count()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_normalize_function_test_normalize_function.None_10": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_normalize_function_test_normalize_function.None_10", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 62, "end_line": 95, "span_ids": ["test_normalize_function"], "tokens": 273}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_normalize_function():\n\n assert normalize_function(f2)\n\n assert normalize_function(lambda a: a)\n\n assert normalize_function(tz.partial(f2, b=2)) == normalize_function(\n tz.partial(f2, b=2)\n )\n\n assert normalize_function(tz.partial(f2, b=2)) != normalize_function(\n tz.partial(f2, b=3)\n )\n\n assert normalize_function(tz.partial(f1, b=2)) != normalize_function(\n tz.partial(f2, b=2)\n )\n\n assert normalize_function(tz.compose(f2, f3)) == normalize_function(\n tz.compose(f2, f3)\n )\n\n assert normalize_function(tz.compose(f2, f3)) != normalize_function(\n tz.compose(f2, f1)\n )\n\n assert normalize_function(tz.curry(f2)) == normalize_function(tz.curry(f2))\n assert normalize_function(tz.curry(f2)) != normalize_function(tz.curry(f1))\n assert normalize_function(tz.curry(f2, b=1)) == normalize_function(\n tz.curry(f2, b=1)\n )\n assert normalize_function(tz.curry(f2, b=1)) != normalize_function(\n tz.curry(f2, b=2)\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_test_tokenize_numpy_datetime.tokenize_np_array_2000_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_test_tokenize_numpy_datetime.tokenize_np_array_2000_", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 98, "end_line": 122, "span_ids": ["test_tokenize_numpy_datetime", "test_tokenize", "test_tokenize_numpy_array_consistent_on_values", "test_tokenize_numpy_array_supports_uneven_sizes", "test_tokenize_discontiguous_numpy_array"], "tokens": 186}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_tokenize():\n a = (1, 2, 3)\n assert isinstance(tokenize(a), (str, bytes))\n\n\n@pytest.mark.skipif(\"not np\")\ndef test_tokenize_numpy_array_consistent_on_values():\n assert tokenize(np.random.RandomState(1234).random_sample(1000)) == tokenize(\n np.random.RandomState(1234).random_sample(1000)\n )\n\n\n@pytest.mark.skipif(\"not np\")\ndef test_tokenize_numpy_array_supports_uneven_sizes():\n tokenize(np.random.random(7).astype(dtype=\"i2\"))\n\n\n@pytest.mark.skipif(\"not np\")\ndef test_tokenize_discontiguous_numpy_array():\n tokenize(np.random.random(8)[::2])\n\n\n@pytest.mark.skipif(\"not np\")\ndef test_tokenize_numpy_datetime():\n tokenize(np.array([\"2000-01-01T12:00:00\"], dtype=\"M8[ns]\"))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_numpy_scalar_test_tokenize_numpy_scalar_string_rep.try_.finally_.np_set_string_function_No": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_numpy_scalar_test_tokenize_numpy_scalar_string_rep.try_.finally_.np_set_string_function_No", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 125, "end_line": 141, "span_ids": ["test_tokenize_numpy_scalar_string_rep", "test_tokenize_numpy_scalar"], "tokens": 184}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not np\")\ndef test_tokenize_numpy_scalar():\n assert tokenize(np.array(1.0, dtype=\"f8\")) == tokenize(np.array(1.0, dtype=\"f8\"))\n assert tokenize(\n np.array([(1, 2)], dtype=[(\"a\", \"i4\"), (\"b\", \"i8\")])[0]\n ) == tokenize(np.array([(1, 2)], dtype=[(\"a\", \"i4\"), (\"b\", \"i8\")])[0])\n\n\n@pytest.mark.skipif(\"not np\")\ndef test_tokenize_numpy_scalar_string_rep():\n # Test tokenizing numpy scalars doesn't depend on their string representation\n try:\n np.set_string_function(lambda x: \"foo\")\n assert tokenize(np.array(1)) != tokenize(np.array(2))\n finally:\n # Reset back to default\n np.set_string_function(None)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_numpy_array_on_object_dtype_test_tokenize_numpy_array_on_object_dtype.assert_tokenize_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_numpy_array_on_object_dtype_test_tokenize_numpy_array_on_object_dtype.assert_tokenize_", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 144, "end_line": 154, "span_ids": ["test_tokenize_numpy_array_on_object_dtype"], "tokens": 150}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not np\")\ndef test_tokenize_numpy_array_on_object_dtype():\n assert tokenize(np.array([\"a\", \"aa\", \"aaa\"], dtype=object)) == tokenize(\n np.array([\"a\", \"aa\", \"aaa\"], dtype=object)\n )\n assert tokenize(np.array([\"a\", None, \"aaa\"], dtype=object)) == tokenize(\n np.array([\"a\", None, \"aaa\"], dtype=object)\n )\n assert tokenize(\n np.array([(1, \"a\"), (1, None), (1, \"aaa\")], dtype=object)\n ) == tokenize(np.array([(1, \"a\"), (1, None), (1, \"aaa\")], dtype=object))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_numpy_memmap_offset_test_tokenize_numpy_memmap_offset.with_open_fn_rb_as_f_.assert_tokenize_sub1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_numpy_memmap_offset_test_tokenize_numpy_memmap_offset.with_open_fn_rb_as_f_.assert_tokenize_sub1_", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 157, "end_line": 173, "span_ids": ["test_tokenize_numpy_memmap_offset"], "tokens": 187}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not np\")\ndef test_tokenize_numpy_memmap_offset(tmpdir):\n # Test two different memmaps into the same numpy file\n fn = str(tmpdir.join(\"demo_data\"))\n\n with open(fn, \"wb\") as f:\n f.write(b\"ashekwicht\")\n\n with open(fn, \"rb\") as f:\n mmap1 = np.memmap(f, dtype=np.uint8, mode=\"r\", offset=0, shape=5)\n mmap2 = np.memmap(f, dtype=np.uint8, mode=\"r\", offset=5, shape=5)\n\n assert tokenize(mmap1) != tokenize(mmap2)\n # also make sure that they tokenize correctly when taking sub-arrays\n sub1 = mmap1[1:-1]\n sub2 = mmap2[1:-1]\n assert tokenize(sub1) != tokenize(sub2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_numpy_memmap_test_tokenize_numpy_memmap.None_2.assert_tokenize_mm_1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_numpy_memmap_test_tokenize_numpy_memmap.None_2.assert_tokenize_mm_1_", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 176, "end_line": 201, "span_ids": ["test_tokenize_numpy_memmap"], "tokens": 241}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not np\")\ndef test_tokenize_numpy_memmap():\n with tmpfile(\".npy\") as fn:\n x = np.arange(5)\n np.save(fn, x)\n y = tokenize(np.load(fn, mmap_mode=\"r\"))\n\n with tmpfile(\".npy\") as fn:\n x = np.arange(5)\n np.save(fn, x)\n z = tokenize(np.load(fn, mmap_mode=\"r\"))\n\n assert y != z\n\n with tmpfile(\".npy\") as fn:\n x = np.random.normal(size=(10, 10))\n np.save(fn, x)\n mm = np.load(fn, mmap_mode=\"r\")\n mm2 = np.load(fn, mmap_mode=\"r\")\n a = tokenize(mm[0, :])\n b = tokenize(mm[1, :])\n c = tokenize(mm[0:3, :])\n d = tokenize(mm[:, 0])\n assert len(set([a, b, c, d])) == 4\n assert tokenize(mm) == tokenize(mm2)\n assert tokenize(mm[1, :]) == tokenize(mm2[1, :])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_numpy_memmap_no_filename_test_tokenize_numpy_ufunc_consistent.assert_tokenize_inc_t": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_numpy_memmap_no_filename_test_tokenize_numpy_ufunc_consistent.assert_tokenize_inc_t", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 204, "end_line": 225, "span_ids": ["test_tokenize_numpy_ufunc_consistent", "test_tokenize_numpy_memmap_no_filename"], "tokens": 228}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not np\")\ndef test_tokenize_numpy_memmap_no_filename():\n # GH 1562:\n with tmpfile(\".npy\") as fn1, tmpfile(\".npy\") as fn2:\n x = np.arange(5)\n np.save(fn1, x)\n np.save(fn2, x)\n\n a = np.load(fn1, mmap_mode=\"r\")\n b = a + a\n assert tokenize(b) == tokenize(b)\n\n\n@pytest.mark.skipif(\"not np\")\ndef test_tokenize_numpy_ufunc_consistent():\n assert tokenize(np.sin) == \"02106e2c67daf452fb480d264e0dac21\"\n assert tokenize(np.cos) == \"c99e52e912e4379882a9a4b387957a0b\"\n\n # Make a ufunc that isn't in the numpy namespace. Similar to\n # any found in other packages.\n inc = np.frompyfunc(lambda x: x + 1, 1, 1)\n assert tokenize(inc) == tokenize(inc)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_partial_func_args_kwargs_consistent_test_normalize_base.for_i_in_1_1_1_1_sl.assert_normalize_token_i_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_partial_func_args_kwargs_consistent_test_normalize_base.for_i_in_1_1_1_1_sl.assert_normalize_token_i_", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 228, "end_line": 241, "span_ids": ["test_tokenize_partial_func_args_kwargs_consistent", "test_normalize_base"], "tokens": 137}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_tokenize_partial_func_args_kwargs_consistent():\n f = tz.partial(f3, f2, c=f1)\n res = normalize_token(f)\n sol = (\n b\"cdask.tests.test_base\\nf3\\np0\\n.\",\n (b\"cdask.tests.test_base\\nf2\\np0\\n.\",),\n ((\"c\", b\"cdask.tests.test_base\\nf1\\np0\\n.\"),),\n )\n assert res == sol\n\n\ndef test_normalize_base():\n for i in [1, 1.1, \"1\", slice(1, 2, 3)]:\n assert normalize_token(i) is i", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_pandas_test_tokenize_pandas.None_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_pandas_test_tokenize_pandas.None_5", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 244, "end_line": 258, "span_ids": ["test_tokenize_pandas"], "tokens": 218}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not pd\")\ndef test_tokenize_pandas():\n a = pd.DataFrame({\"x\": [1, 2, 3], \"y\": [\"4\", \"asd\", None]}, index=[1, 2, 3])\n b = pd.DataFrame({\"x\": [1, 2, 3], \"y\": [\"4\", \"asd\", None]}, index=[1, 2, 3])\n\n assert tokenize(a) == tokenize(b)\n b.index.name = \"foo\"\n assert tokenize(a) != tokenize(b)\n\n a = pd.DataFrame({\"x\": [1, 2, 3], \"y\": [\"a\", \"b\", \"a\"]})\n b = pd.DataFrame({\"x\": [1, 2, 3], \"y\": [\"a\", \"b\", \"a\"]})\n a[\"z\"] = a.y.astype(\"category\")\n assert tokenize(a) != tokenize(b)\n b[\"z\"] = a.y.astype(\"category\")\n assert tokenize(a) == tokenize(b)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_pandas_invalid_unicode_test_tokenize_pandas_no_pickle.tokenize_df_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_pandas_invalid_unicode_test_tokenize_pandas_no_pickle.tokenize_df_", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 271, "end_line": 296, "span_ids": ["test_tokenize_pandas_no_pickle", "test_tokenize_pandas_mixed_unicode_bytes", "test_tokenize_pandas_invalid_unicode"], "tokens": 226}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not pd\")\ndef test_tokenize_pandas_invalid_unicode():\n # see https://github.com/dask/dask/issues/2713\n df = pd.DataFrame(\n {\"x\\ud83d\": [1, 2, 3], \"y\\ud83d\": [\"4\", \"asd\\ud83d\", None]}, index=[1, 2, 3]\n )\n tokenize(df)\n\n\n@pytest.mark.skipif(\"not pd\")\ndef test_tokenize_pandas_mixed_unicode_bytes():\n df = pd.DataFrame(\n {\"\u00f6\".encode(\"utf8\"): [1, 2, 3], \"\u00f6\": [\"\u00f6\", \"\u00f6\".encode(\"utf8\"), None]},\n index=[1, 2, 3],\n )\n tokenize(df)\n\n\n@pytest.mark.skipif(\"not pd\")\ndef test_tokenize_pandas_no_pickle():\n class NoPickle(object):\n # pickling not supported because it is a local class\n pass\n\n df = pd.DataFrame({\"x\": [\"foo\", None, NoPickle()]})\n tokenize(df)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_pandas_extension_array_test_tokenize_pandas_extension_array.for_arr_in_arrays_.assert_tokenize_arr_t": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_pandas_extension_array_test_tokenize_pandas_extension_array.for_arr_in_arrays_.assert_tokenize_arr_t", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 289, "end_line": 317, "span_ids": ["test_tokenize_pandas_extension_array"], "tokens": 259}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not pd\")\ndef test_tokenize_pandas_extension_array():\n from dask.dataframe._compat import PANDAS_GT_100, PANDAS_GT_0240\n\n if not PANDAS_GT_0240:\n pytest.skip(\"requires pandas>=1.0.0\")\n\n arrays = [\n pd.array([1, 0, None], dtype=\"Int64\"),\n pd.array([\"2000\"], dtype=\"Period[D]\"),\n pd.array([1, 0, 0], dtype=\"Sparse[int]\"),\n pd.array([pd.Timestamp(\"2000\")], dtype=\"datetime64[ns]\"),\n pd.array([pd.Timestamp(\"2000\", tz=\"CET\")], dtype=\"datetime64[ns, CET]\"),\n pd.array(\n [\"a\", \"b\"],\n dtype=pd.api.types.CategoricalDtype([\"a\", \"b\", \"c\"], ordered=False),\n ),\n ]\n\n if PANDAS_GT_100:\n arrays.extend(\n [\n pd.array([\"a\", \"b\", None], dtype=\"string\"),\n pd.array([True, False, None], dtype=\"boolean\"),\n ]\n )\n\n for arr in arrays:\n assert tokenize(arr) == tokenize(arr)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_pandas_index_test_tokenize_ordered_dict.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_pandas_index_test_tokenize_ordered_dict.None_1", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 320, "end_line": 394, "span_ids": ["test_tokenize_dict", "test_tokenize_same_repr", "test_tokenize_method", "test_tokenize_set", "test_tokenize_ordered_dict", "test_tokenize_pandas_index", "test_tokenize_sequences", "test_tokenize_kwargs"], "tokens": 567}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not pd\")\ndef test_tokenize_pandas_index():\n idx = pd.Index([\"a\", \"b\"])\n assert tokenize(idx) == tokenize(idx)\n\n idx = pd.MultiIndex.from_product([[\"a\", \"b\"], [0, 1]])\n assert tokenize(idx) == tokenize(idx)\n\n\ndef test_tokenize_kwargs():\n assert tokenize(5, x=1) == tokenize(5, x=1)\n assert tokenize(5) != tokenize(5, x=1)\n assert tokenize(5, x=1) != tokenize(5, x=2)\n assert tokenize(5, x=1) != tokenize(5, y=1)\n\n\ndef test_tokenize_same_repr():\n class Foo(object):\n def __init__(self, x):\n self.x = x\n\n def __repr__(self):\n return \"a foo\"\n\n assert tokenize(Foo(1)) != tokenize(Foo(2))\n\n\ndef test_tokenize_method():\n class Foo(object):\n def __init__(self, x):\n self.x = x\n\n def __dask_tokenize__(self):\n return self.x\n\n a, b = Foo(1), Foo(2)\n assert tokenize(a) == tokenize(a)\n assert tokenize(a) != tokenize(b)\n\n # dispatch takes precedence\n before = tokenize(a)\n normalize_token.register(Foo, lambda self: self.x + 1)\n after = tokenize(a)\n assert before != after\n\n\n@pytest.mark.skipif(\"not np\")\ndef test_tokenize_sequences():\n assert tokenize([1]) != tokenize([2])\n assert tokenize([1]) != tokenize((1,))\n assert tokenize([1]) == tokenize([1])\n\n x = np.arange(2000) # long enough to drop information in repr\n y = np.arange(2000)\n y[1000] = 0 # middle isn't printed in repr\n assert tokenize([x]) != tokenize([y])\n\n\ndef test_tokenize_dict():\n assert tokenize({\"x\": 1, 1: \"x\"}) == tokenize({\"x\": 1, 1: \"x\"})\n\n\ndef test_tokenize_set():\n assert tokenize({1, 2, \"x\", (1, \"x\")}) == tokenize({1, 2, \"x\", (1, \"x\")})\n\n\ndef test_tokenize_ordered_dict():\n from collections import OrderedDict\n\n a = OrderedDict([(\"a\", 1), (\"b\", 2)])\n b = OrderedDict([(\"a\", 1), (\"b\", 2)])\n c = OrderedDict([(\"b\", 2), (\"a\", 1)])\n\n assert tokenize(a) == tokenize(b)\n assert tokenize(a) != tokenize(c)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_range_test_tokenize_range._Different_step": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_range_test_tokenize_range._Different_step", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 397, "end_line": 401, "span_ids": ["test_tokenize_range"], "tokens": 125}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_tokenize_range():\n assert tokenize(range(5, 10, 2)) == tokenize(range(5, 10, 2)) # Identical ranges\n assert tokenize(range(5, 10, 2)) != tokenize(range(1, 10, 2)) # Different start\n assert tokenize(range(5, 10, 2)) != tokenize(range(5, 15, 2)) # Different stop\n assert tokenize(range(5, 10, 2)) != tokenize(range(5, 10, 1)) # Different step", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_object_array_with_nans_test_tokenize_numpy_matrix.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_object_array_with_nans_test_tokenize_numpy_matrix.None_2", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 414, "end_line": 440, "span_ids": ["test_tokenize_numpy_matrix", "test_tokenize_base_types", "test_tokenize_object_array_with_nans", "test_tokenize_literal"], "tokens": 220}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not np\")\ndef test_tokenize_object_array_with_nans():\n a = np.array([\"foo\", \"Jos\\xe9\", np.nan], dtype=\"O\")\n assert tokenize(a) == tokenize(a)\n\n\n@pytest.mark.parametrize(\n \"x\", [1, True, \"a\", b\"a\", 1.0, 1j, 1.0j, [], (), {}, None, str, int]\n)\ndef test_tokenize_base_types(x):\n assert tokenize(x) == tokenize(x), x\n\n\ndef test_tokenize_literal():\n assert tokenize(literal([\"x\", 1])) == tokenize(literal([\"x\", 1]))\n\n\n@pytest.mark.skipif(\"not np\")\n@pytest.mark.filterwarnings(\"ignore:the matrix:PendingDeprecationWarning\")\ndef test_tokenize_numpy_matrix():\n rng = np.random.RandomState(1234)\n a = np.asmatrix(rng.rand(100))\n b = a.copy()\n assert tokenize(a) == tokenize(b)\n\n b[:10] = 1\n assert tokenize(a) != tokenize(b)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_dense_sparse_array_test_tokenize_dense_sparse_array.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_dense_sparse_array_test_tokenize_dense_sparse_array.None_2", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 433, "end_line": 460, "span_ids": ["test_tokenize_dense_sparse_array"], "tokens": 234}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not sp\")\n@pytest.mark.parametrize(\"cls_name\", (\"dia\", \"bsr\", \"coo\", \"csc\", \"csr\", \"dok\", \"lil\"))\ndef test_tokenize_dense_sparse_array(cls_name):\n rng = np.random.RandomState(1234)\n\n with pytest.warns(None):\n # ignore scipy.sparse.SparseEfficiencyWarning\n a = sp.rand(10, 10000, random_state=rng).asformat(cls_name)\n b = a.copy()\n\n assert tokenize(a) == tokenize(b)\n\n # modifying the data values\n if hasattr(b, \"data\"):\n b.data[:10] = 1\n elif cls_name == \"dok\":\n b[3, 3] = 1\n else:\n raise ValueError\n\n assert tokenize(a) != tokenize(b)\n\n # modifying the data indices\n with pytest.warns(None):\n b = a.copy().asformat(\"coo\")\n b.row[:10] = np.arange(10)\n b = b.asformat(cls_name)\n assert tokenize(a) != tokenize(b)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_object_with_recursion_error_returns_uuid_try_.except_ImportError_.dataclasses.None": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_object_with_recursion_error_returns_uuid_try_.except_ImportError_.dataclasses.None", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 473, "end_line": 502, "span_ids": ["impl:24", "test_is_dask_collection", "test_tokenize_object_with_recursion_error_returns_uuid"], "tokens": 203}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_tokenize_object_with_recursion_error_returns_uuid():\n cycle = dict(a=None)\n cycle[\"a\"] = cycle\n\n assert len(tokenize(cycle)) == 32\n\n\ndef test_is_dask_collection():\n class DummyCollection(object):\n def __init__(self, dsk=None):\n self.dask = dsk\n\n def __dask_graph__(self):\n return self.dask\n\n x = delayed(1) + 2\n assert is_dask_collection(x)\n assert not is_dask_collection(2)\n assert is_dask_collection(DummyCollection({}))\n assert not is_dask_collection(DummyCollection())\n assert not is_dask_collection(DummyCollection)\n\n\ntry:\n import dataclasses\n\n # Avoid @dataclass decorator as Python < 3.7 fail to interpret the type hints\n ADataClass = dataclasses.make_dataclass(\"ADataClass\", [(\"a\", int)])\nexcept ImportError:\n dataclasses = None", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_unpack_collections_test_unpack_collections.build.return.t": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_unpack_collections_test_unpack_collections.build.return.t", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 495, "end_line": 520, "span_ids": ["test_unpack_collections"], "tokens": 219}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_unpack_collections():\n a = delayed(1) + 5\n b = a + 1\n c = a + 2\n\n def build(a, b, c, iterator):\n t = (\n a,\n b, # Top-level collections\n {\n \"a\": a, # dict\n a: b, # collections as keys\n \"b\": [1, 2, [b]], # list\n \"c\": 10, # other builtins pass through unchanged\n \"d\": (c, 2), # tuple\n \"e\": {a, 2, 3}, # set\n \"f\": OrderedDict([(\"a\", a)]),\n }, # OrderedDict\n iterator,\n ) # Iterator\n\n if dataclasses is not None:\n t[2][\"f\"] = ADataClass(a=a)\n t[2][\"g\"] = (ADataClass, a)\n\n return t\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_unpack_collections.args_test_unpack_collections._Smoketest_results_that_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_unpack_collections.args_test_unpack_collections._Smoketest_results_that_", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 522, "end_line": 550, "span_ids": ["test_unpack_collections"], "tokens": 296}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_unpack_collections():\n # ... other code\n\n args = build(a, b, c, (i for i in [a, b, c]))\n\n collections, repack = unpack_collections(*args)\n assert len(collections) == 3\n\n # Replace collections with `'~a'` strings\n result = repack([\"~a\", \"~b\", \"~c\"])\n sol = build(\"~a\", \"~b\", \"~c\", [\"~a\", \"~b\", \"~c\"])\n assert result == sol\n\n # traverse=False\n collections, repack = unpack_collections(*args, traverse=False)\n assert len(collections) == 2 # just a and b\n assert repack(collections) == args\n\n # No collections\n collections, repack = unpack_collections(1, 2, {\"a\": 3})\n assert not collections\n assert repack(collections) == (1, 2, {\"a\": 3})\n\n # Result that looks like a task\n def fail(*args):\n raise ValueError(\"Shouldn't have been called\")\n\n collections, repack = unpack_collections(\n a, (fail, 1), [(fail, 2, 3)], traverse=False\n )\n repack(collections) # Smoketest task literals\n repack([(fail, 1)]) # Smoketest results that look like tasks", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_Tuple_Tuple.__dask_postpersist__.return.Tuple_self__keys_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_Tuple_Tuple.__dask_postpersist__.return.Tuple_self__keys_", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 553, "end_line": 579, "span_ids": ["Tuple.__init__", "Tuple.__add__", "Tuple.__dask_keys__", "Tuple.__dask_tokenize__", "Tuple", "Tuple.__dask_postpersist__", "Tuple.__dask_postcompute__", "Tuple.__dask_graph__"], "tokens": 186}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Tuple(DaskMethodsMixin):\n __slots__ = (\"_dask\", \"_keys\")\n __dask_scheduler__ = staticmethod(dask.threaded.get)\n\n def __init__(self, dsk, keys):\n self._dask = dsk\n self._keys = keys\n\n def __add__(self, other):\n if isinstance(other, Tuple):\n return Tuple(merge(self._dask, other._dask), self._keys + other._keys)\n return NotImplemented\n\n def __dask_graph__(self):\n return self._dask\n\n def __dask_keys__(self):\n return self._keys\n\n def __dask_tokenize__(self):\n return self._keys\n\n def __dask_postcompute__(self):\n return tuple, ()\n\n def __dask_postpersist__(self):\n return Tuple, (self._keys,)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_custom_collection_test_custom_collection.assert_t2__dask_t3__da": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_custom_collection_test_custom_collection.assert_t2__dask_t3__da", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 582, "end_line": 617, "span_ids": ["test_custom_collection"], "tokens": 386}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_custom_collection():\n dsk = {\"a\": 1, \"b\": 2}\n dsk2 = {\"c\": (add, \"a\", \"b\"), \"d\": (add, \"c\", 1)}\n dsk2.update(dsk)\n dsk3 = {\"e\": (add, \"a\", 4), \"f\": (inc, \"e\")}\n dsk3.update(dsk)\n\n x = Tuple(dsk, [\"a\", \"b\"])\n y = Tuple(dsk2, [\"c\", \"d\"])\n z = Tuple(dsk3, [\"e\", \"f\"])\n\n # __slots__ defined on base mixin class propagates\n with pytest.raises(AttributeError):\n x.foo = 1\n\n # is_dask_collection\n assert is_dask_collection(x)\n\n # tokenize\n assert tokenize(x) == tokenize(x)\n assert tokenize(x) != tokenize(y)\n\n # compute\n assert x.compute() == (1, 2)\n assert dask.compute(x, [y, z]) == ((1, 2), [(3, 4), (5, 6)])\n t = x + y + z\n assert t.compute() == (1, 2, 3, 4, 5, 6)\n\n # persist\n t2 = t.persist()\n assert isinstance(t2, Tuple)\n assert t2._dask == dict(zip(\"abcdef\", range(1, 7)))\n assert t2.compute() == (1, 2, 3, 4, 5, 6)\n x2, y2, z2 = dask.persist(x, y, z)\n t3 = x2 + y2 + z2\n assert t2._dask == t3._dask", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_compute_no_opt_test_compute_no_opt._See_Renamed": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_compute_no_opt_test_compute_no_opt._See_Renamed", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 620, "end_line": 645, "span_ids": ["test_compute_no_opt"], "tokens": 362}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not db\")\ndef test_compute_no_opt():\n # Bag does `fuse` by default. Test that with `optimize_graph=False` that\n # doesn't get called. We check this by using a callback to track the keys\n # that are computed.\n from dask.callbacks import Callback\n\n b = db.from_sequence(range(100), npartitions=4)\n add1 = tz.partial(add, 1)\n mul2 = tz.partial(mul, 2)\n o = b.map(add1).map(mul2)\n # Check that with the kwarg, the optimization doesn't happen\n keys = []\n with Callback(pretask=lambda key, *args: keys.append(key)):\n o.compute(scheduler=\"single-threaded\", optimize_graph=False)\n assert len([k for k in keys if \"mul\" in k[0]]) == 4\n assert len([k for k in keys if \"add\" in k[0]]) == 4\n # Check that without the kwarg, the optimization does happen\n keys = []\n with Callback(pretask=lambda key, *args: keys.append(key)):\n o.compute(scheduler=\"single-threaded\")\n # Names of fused tasks have been merged, and the original key is an alias.\n # Otherwise, the lengths below would be 4 and 0.\n assert len([k for k in keys if \"mul\" in k[0]]) == 8\n assert len([k for k in keys if \"add\" in k[0]]) == 4\n assert len([k for k in keys if \"add-mul\" in k[0]]) == 4 # See? Renamed", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_compute_array_test_persist_array.assert_len_y_dask_y_n": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_compute_array_test_persist_array.assert_len_y_dask_y_n", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 648, "end_line": 670, "span_ids": ["test_persist_array", "test_compute_array"], "tokens": 217}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not da\")\ndef test_compute_array():\n arr = np.arange(100).reshape((10, 10))\n darr = da.from_array(arr, chunks=(5, 5))\n darr1 = darr + 1\n darr2 = darr + 2\n out1, out2 = compute(darr1, darr2)\n assert np.allclose(out1, arr + 1)\n assert np.allclose(out2, arr + 2)\n\n\n@pytest.mark.skipif(\"not da\")\ndef test_persist_array():\n from dask.array.utils import assert_eq\n\n arr = np.arange(100).reshape((10, 10))\n x = da.from_array(arr, chunks=(5, 5))\n x = (x + 1) - x.mean(axis=0)\n y = x.persist()\n\n assert_eq(x, y)\n assert set(y.dask).issubset(x.dask)\n assert len(y.dask) == y.npartitions", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_compute_dataframe_test_compute_dataframe.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_compute_dataframe_test_compute_dataframe.None_1", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 683, "end_line": 691, "span_ids": ["test_compute_dataframe"], "tokens": 133}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not dd\")\ndef test_compute_dataframe():\n df = pd.DataFrame({\"a\": [1, 2, 3, 4], \"b\": [5, 5, 3, 3]})\n ddf = dd.from_pandas(df, npartitions=2)\n ddf1 = ddf.a + 1\n ddf2 = ddf.a + ddf.b\n out1, out2 = compute(ddf1, ddf2)\n tm.assert_series_equal(out1, df.a + 1)\n tm.assert_series_equal(out2, df.a + df.b)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_compute_array_dataframe_test_compute_array_dataframe.dd__compat_tm_assert_seri": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_compute_array_dataframe_test_compute_array_dataframe.dd__compat_tm_assert_seri", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 684, "end_line": 692, "span_ids": ["test_compute_array_dataframe"], "tokens": 152}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not dd or not da\")\ndef test_compute_array_dataframe():\n arr = np.arange(100).reshape((10, 10))\n darr = da.from_array(arr, chunks=(5, 5)) + 1\n df = pd.DataFrame({\"a\": [1, 2, 3, 4], \"b\": [5, 5, 3, 3]})\n ddf = dd.from_pandas(df, npartitions=2).a + 2\n arr_out, df_out = compute(darr, ddf)\n assert np.allclose(arr_out, arr + 1)\n dd._compat.tm.assert_series_equal(df_out, df.a + 2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_compute_dataframe_valid_unicode_in_bytes_test_compute_with_literal.assert_compute_5_5_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_compute_dataframe_valid_unicode_in_bytes_test_compute_with_literal.assert_compute_5_5_", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 705, "end_line": 739, "span_ids": ["test_compute_dataframe_valid_unicode_in_bytes", "test_compute_with_literal", "test_compute_array_bag", "test_compute_dataframe_invalid_unicode"], "tokens": 286}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not dd\")\ndef test_compute_dataframe_valid_unicode_in_bytes():\n df = pd.DataFrame(data=np.random.random((3, 1)), columns=[\"\u00f6\".encode(\"utf8\")])\n dd.from_pandas(df, npartitions=4)\n\n\n@pytest.mark.skipif(\"not dd\")\ndef test_compute_dataframe_invalid_unicode():\n # see https://github.com/dask/dask/issues/2713\n df = pd.DataFrame(data=np.random.random((3, 1)), columns=[\"\\ud83d\"])\n dd.from_pandas(df, npartitions=4)\n\n\n@pytest.mark.skipif(\"not da or not db\")\ndef test_compute_array_bag():\n x = da.arange(5, chunks=2)\n b = db.from_sequence([1, 2, 3])\n\n pytest.raises(ValueError, lambda: compute(x, b))\n\n xx, bb = compute(x, b, scheduler=\"single-threaded\")\n assert np.allclose(xx, np.arange(5))\n assert bb == [1, 2, 3]\n\n\n@pytest.mark.skipif(\"not da\")\ndef test_compute_with_literal():\n x = da.arange(5, chunks=2)\n y = 10\n\n xx, yy = compute(x, y)\n assert (xx == x.compute()).all()\n assert yy == y\n\n assert compute(5) == (5,)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_compute_nested_test_compute_nested.assert_res_1_8": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_compute_nested_test_compute_nested.assert_res_1_8", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 732, "end_line": 744, "span_ids": ["test_compute_nested"], "tokens": 132}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_compute_nested():\n a = delayed(1) + 5\n b = a + 1\n c = a + 2\n assert compute({\"a\": a, \"b\": [1, 2, b]}, (c, 2)) == (\n {\"a\": 6, \"b\": [1, 2, 7]},\n (8, 2),\n )\n\n res = compute([a, b], c, traverse=False)\n assert res[0][0] is a\n assert res[0][1] is b\n assert res[1] == 8", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_visualize_test_visualize.with_tmpdir_as_d_.None_4": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_visualize_test_visualize.with_tmpdir_as_d_.None_4", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 747, "end_line": 770, "span_ids": ["test_visualize"], "tokens": 282}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not da\")\n@pytest.mark.skipif(\n sys.flags.optimize, reason=\"graphviz exception with Python -OO flag\"\n)\ndef test_visualize():\n pytest.importorskip(\"graphviz\")\n with tmpdir() as d:\n x = da.arange(5, chunks=2)\n x.visualize(filename=os.path.join(d, \"mydask\"))\n assert os.path.exists(os.path.join(d, \"mydask.png\"))\n\n x.visualize(filename=os.path.join(d, \"mydask.pdf\"))\n assert os.path.exists(os.path.join(d, \"mydask.pdf\"))\n\n visualize(x, 1, 2, filename=os.path.join(d, \"mydask.png\"))\n assert os.path.exists(os.path.join(d, \"mydask.png\"))\n\n dsk = {\"a\": 1, \"b\": (add, \"a\", 2), \"c\": (mul, \"a\", 1)}\n visualize(x, dsk, filename=os.path.join(d, \"mydask.png\"))\n assert os.path.exists(os.path.join(d, \"mydask.png\"))\n\n x = Tuple(dsk, [\"a\", \"b\", \"c\"])\n visualize(x, filename=os.path.join(d, \"mydask.png\"))\n assert os.path.exists(os.path.join(d, \"mydask.png\"))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_visualize_lists_test_visualize_order.with_tmpfile_extension_d.assert_color_in_text": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_visualize_lists_test_visualize_order.with_tmpfile_extension_d.assert_color_in_text", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 773, "end_line": 797, "span_ids": ["test_visualize_order", "test_visualize_lists"], "tokens": 219}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n sys.flags.optimize, reason=\"graphviz exception with Python -OO flag\"\n)\ndef test_visualize_lists(tmpdir):\n pytest.importorskip(\"graphviz\")\n fn = os.path.join(str(tmpdir), \"myfile.dot\")\n dask.visualize([{\"abc-xyz\": (add, 1, 2)}], filename=fn)\n with open(fn) as f:\n text = f.read()\n assert \"abc-xyz\" in text\n\n\n@pytest.mark.skipif(\"not da\")\n@pytest.mark.skipif(\n sys.flags.optimize, reason=\"graphviz exception with Python -OO flag\"\n)\ndef test_visualize_order():\n pytest.importorskip(\"graphviz\")\n pytest.importorskip(\"matplotlib.pyplot\")\n x = da.arange(5, chunks=2)\n with tmpfile(extension=\"dot\") as fn:\n x.visualize(color=\"order\", filename=fn, cmap=\"RdBu\")\n with open(fn) as f:\n text = f.read()\n assert 'color=\"#' in text", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_use_cloudpickle_to_tokenize_functions_in__main___test_optimizations_keyword.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_use_cloudpickle_to_tokenize_functions_in__main___test_optimizations_keyword.None_1", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 800, "end_line": 834, "span_ids": ["test_use_cloudpickle_to_tokenize_functions_in__main__", "inc_to_dec", "test_optimizations_keyword"], "tokens": 221}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_use_cloudpickle_to_tokenize_functions_in__main__():\n pytest.importorskip(\"cloudpickle\")\n from textwrap import dedent\n\n defn = dedent(\n \"\"\"\n def inc():\n return x\n \"\"\"\n )\n\n __main__ = sys.modules[\"__main__\"]\n exec(compile(defn, \"\", \"exec\"), __main__.__dict__)\n f = __main__.inc\n\n t = normalize_token(f)\n assert b\"cloudpickle\" in t\n\n\ndef inc_to_dec(dsk, keys):\n dsk = dict(dsk)\n for key in dsk:\n if dsk[key][0] == inc:\n dsk[key] = (dec,) + dsk[key][1:]\n return dsk\n\n\ndef test_optimizations_keyword():\n x = dask.delayed(inc)(1)\n assert x.compute() == 2\n\n with dask.config.set(optimizations=[inc_to_dec]):\n assert x.compute() == 0\n\n assert x.compute() == 2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_optimize_test_optimize.for_a_b_in_zip_x3_y3_.assert_dict_a_dask_di": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_optimize_test_optimize.for_a_b_in_zip_x3_y3_.assert_dict_a_dask_di", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 837, "end_line": 863, "span_ids": ["test_optimize"], "tokens": 299}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_optimize():\n x = dask.delayed(inc)(1)\n y = dask.delayed(inc)(x)\n z = x + y\n\n x2, y2, z2, constant = optimize(x, y, z, 1)\n assert constant == 1\n\n # Same graphs for each\n dsk = dict(x2.dask)\n assert dict(y2.dask) == dsk\n assert dict(z2.dask) == dsk\n\n # Computationally equivalent\n assert dask.compute(x2, y2, z2) == dask.compute(x, y, z)\n\n # Applying optimizations before compute and during compute gives\n # same results. Shows optimizations are occurring.\n sols = dask.compute(x, y, z, optimizations=[inc_to_dec])\n x3, y3, z3 = optimize(x, y, z, optimizations=[inc_to_dec])\n assert dask.compute(x3, y3, z3) == sols\n\n # Optimize respects global optimizations as well\n with dask.config.set(optimizations=[inc_to_dec]):\n x4, y4, z4 = optimize(x, y, z)\n for a, b in zip([x3, y3, z3], [x4, y4, z4]):\n assert dict(a.dask) == dict(b.dask)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_optimize_nested_test_optimize_nested.assert_res_1_compute_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_optimize_nested_test_optimize_nested.assert_res_1_compute_", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 866, "end_line": 886, "span_ids": ["test_optimize_nested"], "tokens": 223}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_optimize_nested():\n a = dask.delayed(inc)(1)\n b = dask.delayed(inc)(a)\n c = a + b\n\n result = optimize({\"a\": a, \"b\": [1, 2, b]}, (c, 2))\n\n a2 = result[0][\"a\"]\n b2 = result[0][\"b\"][2]\n c2 = result[1][0]\n\n assert isinstance(a2, Delayed)\n assert isinstance(b2, Delayed)\n assert isinstance(c2, Delayed)\n assert dict(a2.dask) == dict(b2.dask) == dict(c2.dask)\n assert compute(*result) == ({\"a\": 2, \"b\": [1, 2, 3]}, (5, 2))\n\n res = optimize([a, b], c, traverse=False)\n assert res[0][0] is a\n assert res[0][1] is b\n assert res[1].compute() == 5", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_default_imports_test_persist_literals.assert_persist_1_2_3_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_default_imports_test_persist_literals.assert_persist_1_2_3_", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 889, "end_line": 917, "span_ids": ["test_default_imports", "test_persist_literals"], "tokens": 168}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_default_imports():\n \"\"\"\n Startup time: `import dask` should not import too many modules.\n \"\"\"\n code = \"\"\"if 1:\n import dask\n import sys\n\n print(sorted(sys.modules))\n \"\"\"\n\n out = subprocess.check_output([sys.executable, \"-c\", code])\n modules = set(eval(out.decode()))\n assert \"dask\" in modules\n blacklist = [\n \"dask.array\",\n \"dask.dataframe\",\n \"numpy\",\n \"pandas\",\n \"partd\",\n \"s3fs\",\n \"distributed\",\n ]\n for mod in blacklist:\n assert mod not in modules\n\n\ndef test_persist_literals():\n assert persist(1, 2, 3) == (1, 2, 3)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_persist_nested_test_persist_nested.assert_res_1_compute_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_persist_nested_test_persist_nested.assert_res_1_compute_", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 920, "end_line": 933, "span_ids": ["test_persist_nested"], "tokens": 176}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_persist_nested():\n a = delayed(1) + 5\n b = a + 1\n c = a + 2\n result = persist({\"a\": a, \"b\": [1, 2, b]}, (c, 2))\n assert isinstance(result[0][\"a\"], Delayed)\n assert isinstance(result[0][\"b\"][2], Delayed)\n assert isinstance(result[1][0], Delayed)\n assert compute(*result) == ({\"a\": 6, \"b\": [1, 2, 7]}, (8, 2))\n\n res = persist([a, b], c, traverse=False)\n assert res[0][0] is a\n assert res[0][1] is b\n assert res[1].compute() == 8", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_persist_delayed_test_persist_array_bag.assert_list_b_list_bb": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_persist_delayed_test_persist_array_bag.assert_list_b_list_bb", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 936, "end_line": 967, "span_ids": ["test_persist_array_bag", "test_persist_delayed"], "tokens": 252}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_persist_delayed():\n x1 = delayed(1)\n x2 = delayed(inc)(x1)\n x3 = delayed(inc)(x2)\n (xx,) = persist(x3)\n assert isinstance(xx, Delayed)\n assert xx.key == x3.key\n assert len(xx.dask) == 1\n\n assert x3.compute() == xx.compute()\n\n\n@pytest.mark.skipif(\"not da or not db\")\ndef test_persist_array_bag():\n x = da.arange(5, chunks=2) + 1\n b = db.from_sequence([1, 2, 3]).map(inc)\n\n with pytest.raises(ValueError):\n persist(x, b)\n\n xx, bb = persist(x, b, scheduler=\"single-threaded\")\n\n assert isinstance(xx, da.Array)\n assert isinstance(bb, db.Bag)\n\n assert xx.name == x.name\n assert bb.name == b.name\n assert len(xx.dask) == xx.npartitions < len(x.dask)\n assert len(bb.dask) == bb.npartitions < len(b.dask)\n\n assert np.allclose(x, xx)\n assert list(b) == list(bb)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_normalize_function_limited_size_test_optimize_globals.None_1.assert_eq_xx_np_ones_10": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_normalize_function_limited_size_test_optimize_globals.None_1.assert_eq_xx_np_ones_10", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 970, "end_line": 999, "span_ids": ["test_optimize_globals", "test_normalize_function_limited_size"], "tokens": 269}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_normalize_function_limited_size():\n for i in range(1000):\n normalize_function(lambda x: x)\n\n assert 50 < len(function_cache) < 600\n\n\ndef test_optimize_globals():\n da = pytest.importorskip(\"dask.array\")\n db = pytest.importorskip(\"dask.bag\")\n\n x = da.ones(10, chunks=(5,))\n\n def optimize_double(dsk, keys):\n return {k: (mul, 2, v) for k, v in dsk.items()}\n\n from dask.array.utils import assert_eq\n\n assert_eq(x + 1, np.ones(10) + 1)\n\n with dask.config.set(array_optimize=optimize_double):\n assert_eq(x + 1, (np.ones(10) * 2 + 1) * 2)\n\n assert_eq(x + 1, np.ones(10) + 1)\n\n b = db.range(10, npartitions=2)\n\n with dask.config.set(array_optimize=optimize_double):\n xx, bb = dask.compute(x + 1, b.map(inc), scheduler=\"single-threaded\")\n assert_eq(xx, (np.ones(10) * 2 + 1) * 2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_optimize_None_test_optimize_None.with_dask_config_set_arra.y_compute_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_optimize_None_test_optimize_None.with_dask_config_set_arra.y_compute_", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 1002, "end_line": 1013, "span_ids": ["test_optimize_None"], "tokens": 111}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_optimize_None():\n da = pytest.importorskip(\"dask.array\")\n\n x = da.ones(10, chunks=(5,))\n y = x[:9][1:8][::2] + 1 # normally these slices would be fused\n\n def my_get(dsk, keys):\n assert dsk == dict(y.dask) # but they aren't\n return dask.get(dsk, keys)\n\n with dask.config.set(array_optimize=None, scheduler=my_get):\n y.compute()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_scheduler_keyword_test_scheduler_keyword.try_.finally_.del_named_schedulers_foo": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_scheduler_keyword_test_scheduler_keyword.try_.finally_.del_named_schedulers_foo", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 1016, "end_line": 1035, "span_ids": ["test_scheduler_keyword"], "tokens": 129}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_scheduler_keyword():\n def schedule(dsk, keys, **kwargs):\n return [[123]]\n\n named_schedulers[\"foo\"] = schedule\n\n x = delayed(inc)(1)\n\n try:\n assert x.compute() == 2\n assert x.compute(scheduler=\"foo\") == 123\n\n with dask.config.set(scheduler=\"foo\"):\n assert x.compute() == 123\n assert x.compute() == 2\n\n with dask.config.set(scheduler=\"foo\"):\n assert x.compute(scheduler=\"threads\") == 2\n finally:\n del named_schedulers[\"foo\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_raise_get_keyword_test_callable_scheduler.assert_called_0_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_raise_get_keyword_test_callable_scheduler.assert_called_0_", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 1038, "end_line": 1064, "span_ids": ["test_get_scheduler", "test_raise_get_keyword", "test_callable_scheduler"], "tokens": 189}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_raise_get_keyword():\n x = delayed(inc)(1)\n\n with pytest.raises(TypeError) as info:\n x.compute(get=dask.get)\n\n assert \"scheduler=\" in str(info.value)\n\n\ndef test_get_scheduler():\n assert get_scheduler() is None\n assert get_scheduler(scheduler=\"threads\") is dask.threaded.get\n assert get_scheduler(scheduler=\"sync\") is dask.local.get_sync\n with dask.config.set(scheduler=\"threads\"):\n assert get_scheduler(scheduler=\"threads\") is dask.threaded.get\n assert get_scheduler() is None\n\n\ndef test_callable_scheduler():\n called = [False]\n\n def get(dsk, keys, *args, **kwargs):\n called[0] = True\n return dask.get(dsk, keys)\n\n assert delayed(lambda: 1)().compute(scheduler=get) == 1\n assert called[0]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_num_workers_config_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_num_workers_config_", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 1067, "end_line": 1085, "span_ids": ["test_num_workers_config"], "tokens": 133}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"scheduler\", [\"threads\", \"processes\"])\ndef test_num_workers_config(scheduler):\n pytest.importorskip(\"cloudpickle\")\n # Regression test for issue #4082\n\n @delayed\n def f(x):\n time.sleep(0.5)\n return x\n\n a = [f(i) for i in range(5)]\n num_workers = 3\n with dask.config.set(num_workers=num_workers), Profiler() as prof:\n a = compute(*a, scheduler=scheduler)\n\n workers = {i.worker_id for i in prof.results}\n\n assert len(workers) == num_workers", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_cache.py_from_dask_callbacks_impor_test_cache.assert_not_Callback_activ": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_cache.py_from_dask_callbacks_impor_test_cache.assert_not_Callback_activ", "embedding": null, "metadata": {"file_path": "dask/tests/test_cache.py", "file_name": "test_cache.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 41, "span_ids": ["test_cache", "imports", "inc"], "tokens": 227}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from dask.callbacks import Callback\nfrom dask.cache import Cache\nfrom dask.local import get_sync\nfrom dask.threaded import get\nfrom operator import add\nfrom time import sleep\nimport pytest\n\ncachey = pytest.importorskip(\"cachey\")\n\n\nflag = []\n\n\ndef inc(x):\n flag.append(x)\n return x + 1\n\n\ndef test_cache():\n c = cachey.Cache(10000)\n cc = Cache(c)\n\n with cc:\n assert get({\"x\": (inc, 1)}, \"x\") == 2\n\n assert flag == [1]\n assert c.data[\"x\"] == 2\n\n assert not cc.starttimes\n assert not cc.durations\n\n while flag:\n flag.pop()\n dsk = {\"x\": (inc, 1), \"y\": (inc, 2), \"z\": (add, \"x\", \"y\")}\n with cc:\n assert get(dsk, \"z\") == 5\n\n assert flag == [2] # no x present\n\n assert not Callback.active", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_cache.py_test_cache_with_number_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_cache.py_test_cache_with_number_", "embedding": null, "metadata": {"file_path": "dask/tests/test_cache.py", "file_name": "test_cache.py", "file_type": "text/x-python", "category": "test", "start_line": 44, "end_line": 76, "span_ids": ["f", "test_prefer_cheap_dependent", "test_cache_correctness", "test_cache_with_number"], "tokens": 263}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_cache_with_number():\n c = Cache(10000, limit=1)\n assert isinstance(c.cache, cachey.Cache)\n assert c.cache.available_bytes == 10000\n assert c.cache.limit == 1\n\n\ndef test_cache_correctness():\n # https://github.com/dask/dask/issues/3631\n c = Cache(10000)\n da = pytest.importorskip(\"dask.array\")\n from numpy import ones, zeros\n\n z = da.from_array(zeros(1), chunks=10)\n o = da.from_array(ones(1), chunks=10)\n with c:\n assert (z.compute() == 0).all()\n assert (o.compute() == 1).all()\n\n\ndef f(duration, size, *args):\n sleep(duration)\n return [0] * size\n\n\ndef test_prefer_cheap_dependent():\n dsk = {\"x\": (f, 0.01, 10), \"y\": (f, 0.000001, 1, \"x\")}\n c = Cache(10000)\n with c:\n get_sync(dsk, \"y\")\n\n assert c.cache.scorer.cost[\"x\"] < c.cache.scorer.cost[\"y\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_callbacks.py_from_dask_local_import_ge_test_start_state_callback.assert_flag_0_is_True": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_callbacks.py_from_dask_local_import_ge_test_start_state_callback.assert_flag_0_is_True", "embedding": null, "metadata": {"file_path": "dask/tests/test_callbacks.py", "file_name": "test_callbacks.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 32, "span_ids": ["test_start_state_callback", "imports", "test_start_callback"], "tokens": 182}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from dask.local import get_sync\nfrom dask.threaded import get as get_threaded\nfrom dask.callbacks import Callback\nfrom dask.utils_test import add\n\n\ndef test_start_callback():\n flag = [False]\n\n class MyCallback(Callback):\n def _start(self, dsk):\n flag[0] = True\n\n with MyCallback():\n get_sync({\"x\": 1}, \"x\")\n\n assert flag[0] is True\n\n\ndef test_start_state_callback():\n flag = [False]\n\n class MyCallback(Callback):\n def _start_state(self, dsk, state):\n flag[0] = True\n assert dsk[\"x\"] == 1\n assert len(state[\"cache\"]) == 1\n\n with MyCallback():\n get_sync({\"x\": 1}, \"x\")\n\n assert flag[0] is True", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_callbacks.py_test_finish_always_called_test_finish_always_called.None_4": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_callbacks.py_test_finish_always_called_test_finish_always_called.None_4", "embedding": null, "metadata": {"file_path": "dask/tests/test_callbacks.py", "file_name": "test_callbacks.py", "file_type": "text/x-python", "category": "test", "start_line": 35, "end_line": 73, "span_ids": ["test_finish_always_called"], "tokens": 235}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_finish_always_called():\n flag = [False]\n\n class MyCallback(Callback):\n def _finish(self, dsk, state, errored):\n flag[0] = True\n assert errored\n\n dsk = {\"x\": (lambda: 1 / 0,)}\n\n # `raise_on_exception=True`\n try:\n with MyCallback():\n get_sync(dsk, \"x\")\n except Exception as e:\n assert isinstance(e, ZeroDivisionError)\n assert flag[0]\n\n # `raise_on_exception=False`\n flag[0] = False\n try:\n with MyCallback():\n get_threaded(dsk, \"x\")\n except Exception as e:\n assert isinstance(e, ZeroDivisionError)\n assert flag[0]\n\n # KeyboardInterrupt\n def raise_keyboard():\n raise KeyboardInterrupt()\n\n dsk = {\"x\": (raise_keyboard,)}\n flag[0] = False\n try:\n with MyCallback():\n get_sync(dsk, \"x\")\n except BaseException as e:\n assert isinstance(e, KeyboardInterrupt)\n assert flag[0]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_callbacks.py_test_nested_schedulers_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_callbacks.py_test_nested_schedulers_", "embedding": null, "metadata": {"file_path": "dask/tests/test_callbacks.py", "file_name": "test_callbacks.py", "file_type": "text/x-python", "category": "test", "start_line": 76, "end_line": 111, "span_ids": ["test_add_remove_mutates_not_replaces", "test_nested_schedulers"], "tokens": 235}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_nested_schedulers():\n class MyCallback(Callback):\n def _start(self, dsk):\n self.dsk = dsk\n\n def _pretask(self, key, dsk, state):\n assert key in self.dsk\n\n inner_callback = MyCallback()\n inner_dsk = {\"x\": (add, 1, 2), \"y\": (add, \"x\", 3)}\n\n def nested_call(x):\n assert not Callback.active\n with inner_callback:\n return get_threaded(inner_dsk, \"y\") + x\n\n outer_callback = MyCallback()\n outer_dsk = {\"a\": (nested_call, 1), \"b\": (add, \"a\", 2)}\n\n with outer_callback:\n get_threaded(outer_dsk, \"b\")\n\n assert not Callback.active\n assert outer_callback.dsk == outer_dsk\n assert inner_callback.dsk == inner_dsk\n assert not Callback.active\n\n\ndef test_add_remove_mutates_not_replaces():\n assert not Callback.active\n\n with Callback():\n assert Callback.active\n\n assert not Callback.active", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_os_test_canonical_name.None_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_os_test_canonical_name.None_5", "embedding": null, "metadata": {"file_path": "dask/tests/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 39, "span_ids": ["imports", "test_canonical_name"], "tokens": 225}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import os\nimport stat\nimport sys\nfrom collections import OrderedDict\nfrom contextlib import contextmanager\n\nimport pytest\n\nimport dask.config\nfrom dask.config import (\n update,\n merge,\n collect,\n collect_yaml,\n collect_env,\n get,\n ensure_file,\n set,\n config,\n rename,\n update_defaults,\n refresh,\n expand_environment_variables,\n canonical_name,\n)\n\nfrom dask.utils import tmpfile\n\nyaml = pytest.importorskip(\"yaml\")\n\n\ndef test_canonical_name():\n c = {\"foo-bar\": 1, \"fizz_buzz\": 2}\n assert canonical_name(\"foo-bar\", c) == \"foo-bar\"\n assert canonical_name(\"foo_bar\", c) == \"foo-bar\"\n assert canonical_name(\"fizz-buzz\", c) == \"fizz_buzz\"\n assert canonical_name(\"fizz_buzz\", c) == \"fizz_buzz\"\n assert canonical_name(\"new-key\", c) == \"new-key\"\n assert canonical_name(\"new_key\", c) == \"new_key\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_update_test_update.assert_b_x_2_y_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_update_test_update.assert_b_x_2_y_", "embedding": null, "metadata": {"file_path": "dask/tests/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 42, "end_line": 51, "span_ids": ["test_update"], "tokens": 173}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_update():\n a = {\"x\": 1, \"y\": {\"a\": 1}}\n b = {\"x\": 2, \"z\": 3, \"y\": OrderedDict({\"b\": 2})}\n update(b, a)\n assert b == {\"x\": 1, \"y\": {\"a\": 1, \"b\": 2}, \"z\": 3}\n\n a = {\"x\": 1, \"y\": {\"a\": 1}}\n b = {\"x\": 2, \"z\": 3, \"y\": {\"a\": 3, \"b\": 2}}\n update(b, a, priority=\"old\")\n assert b == {\"x\": 2, \"y\": {\"a\": 3, \"b\": 2}, \"z\": 3}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_merge_test_collect_yaml_paths.with_tmpfile_extension_y.with_tmpfile_extension_y.assert_config_expected": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_merge_test_collect_yaml_paths.with_tmpfile_extension_y.with_tmpfile_extension_y.assert_config_expected", "embedding": null, "metadata": {"file_path": "dask/tests/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 54, "end_line": 78, "span_ids": ["test_collect_yaml_paths", "test_merge"], "tokens": 250}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_merge():\n a = {\"x\": 1, \"y\": {\"a\": 1}}\n b = {\"x\": 2, \"z\": 3, \"y\": {\"b\": 2}}\n\n expected = {\"x\": 2, \"y\": {\"a\": 1, \"b\": 2}, \"z\": 3}\n\n c = merge(a, b)\n assert c == expected\n\n\ndef test_collect_yaml_paths():\n a = {\"x\": 1, \"y\": {\"a\": 1}}\n b = {\"x\": 2, \"z\": 3, \"y\": {\"b\": 2}}\n\n expected = {\"x\": 2, \"y\": {\"a\": 1, \"b\": 2}, \"z\": 3}\n\n with tmpfile(extension=\"yaml\") as fn1:\n with tmpfile(extension=\"yaml\") as fn2:\n with open(fn1, \"w\") as f:\n yaml.dump(a, f)\n with open(fn2, \"w\") as f:\n yaml.dump(b, f)\n\n config = merge(*collect_yaml(paths=[fn1, fn2]))\n assert config == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_collect_yaml_dir_no_read_permissions.try_.finally_.os_chmod_path_perm_orig_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_collect_yaml_dir_no_read_permissions.try_.finally_.os_chmod_path_perm_orig_", "embedding": null, "metadata": {"file_path": "dask/tests/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 81, "end_line": 106, "span_ids": ["test_collect_yaml_dir", "no_read_permissions"], "tokens": 225}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_collect_yaml_dir():\n a = {\"x\": 1, \"y\": {\"a\": 1}}\n b = {\"x\": 2, \"z\": 3, \"y\": {\"b\": 2}}\n\n expected = {\"x\": 2, \"y\": {\"a\": 1, \"b\": 2}, \"z\": 3}\n\n with tmpfile() as dirname:\n os.mkdir(dirname)\n with open(os.path.join(dirname, \"a.yaml\"), mode=\"w\") as f:\n yaml.dump(a, f)\n with open(os.path.join(dirname, \"b.yaml\"), mode=\"w\") as f:\n yaml.dump(b, f)\n\n config = merge(*collect_yaml(paths=[dirname]))\n assert config == expected\n\n\n@contextmanager\ndef no_read_permissions(path):\n perm_orig = stat.S_IMODE(os.stat(path).st_mode)\n perm_new = perm_orig ^ stat.S_IREAD\n try:\n os.chmod(path, perm_new)\n yield\n finally:\n os.chmod(path, perm_orig)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_collect_yaml_permission_errors_test_collect_yaml_permission_errors.with_no_read_permissions_.assert_config_expected": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_collect_yaml_permission_errors_test_collect_yaml_permission_errors.with_no_read_permissions_.assert_config_expected", "embedding": null, "metadata": {"file_path": "dask/tests/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 109, "end_line": 135, "span_ids": ["test_collect_yaml_permission_errors"], "tokens": 213}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n sys.platform == \"win32\", reason=\"Can't make writeonly file on windows\"\n)\n@pytest.mark.parametrize(\"kind\", [\"directory\", \"file\"])\ndef test_collect_yaml_permission_errors(tmpdir, kind):\n a = {\"x\": 1, \"y\": 2}\n b = {\"y\": 3, \"z\": 4}\n\n dir_path = str(tmpdir)\n a_path = os.path.join(dir_path, \"a.yaml\")\n b_path = os.path.join(dir_path, \"b.yaml\")\n\n with open(a_path, mode=\"w\") as f:\n yaml.dump(a, f)\n with open(b_path, mode=\"w\") as f:\n yaml.dump(b, f)\n\n if kind == \"directory\":\n cant_read = dir_path\n expected = {}\n else:\n cant_read = a_path\n expected = b\n\n with no_read_permissions(cant_read):\n config = merge(*collect_yaml(paths=[dir_path]))\n assert config == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_env_test_env.assert_res_expected": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_env_test_env.assert_res_expected", "embedding": null, "metadata": {"file_path": "dask/tests/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 138, "end_line": 160, "span_ids": ["test_env"], "tokens": 181}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_env():\n env = {\n \"DASK_A_B\": \"123\",\n \"DASK_C\": \"True\",\n \"DASK_D\": \"hello\",\n \"DASK_E__X\": \"123\",\n \"DASK_E__Y\": \"456\",\n \"DASK_F\": '[1, 2, \"3\"]',\n \"DASK_G\": \"/not/parsable/as/literal\",\n \"FOO\": \"not included\",\n }\n\n expected = {\n \"a_b\": 123,\n \"c\": True,\n \"d\": \"hello\",\n \"e\": {\"x\": 123, \"y\": 456},\n \"f\": [1, 2, \"3\"],\n \"g\": \"/not/parsable/as/literal\",\n }\n\n res = collect_env(env)\n assert res == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_collect_test_collect.with_tmpfile_extension_y.with_tmpfile_extension_y.assert_config_expected": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_collect_test_collect.with_tmpfile_extension_y.with_tmpfile_extension_y.assert_config_expected", "embedding": null, "metadata": {"file_path": "dask/tests/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 163, "end_line": 178, "span_ids": ["test_collect"], "tokens": 175}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_collect():\n a = {\"x\": 1, \"y\": {\"a\": 1}}\n b = {\"x\": 2, \"z\": 3, \"y\": {\"b\": 2}}\n env = {\"DASK_W\": 4}\n\n expected = {\"w\": 4, \"x\": 2, \"y\": {\"a\": 1, \"b\": 2}, \"z\": 3}\n\n with tmpfile(extension=\"yaml\") as fn1:\n with tmpfile(extension=\"yaml\") as fn2:\n with open(fn1, \"w\") as f:\n yaml.dump(a, f)\n with open(fn2, \"w\") as f:\n yaml.dump(b, f)\n\n config = collect([fn1, fn2], env=env)\n assert config == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_collect_env_none_test_get.with_pytest_raises_KeyErr.get_y_b_config_d_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_collect_env_none_test_get.with_pytest_raises_KeyErr.get_y_b_config_d_", "embedding": null, "metadata": {"file_path": "dask/tests/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 181, "end_line": 197, "span_ids": ["test_get", "test_collect_env_none"], "tokens": 133}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_collect_env_none():\n os.environ[\"DASK_FOO\"] = \"bar\"\n try:\n config = collect([])\n assert config == {\"foo\": \"bar\"}\n finally:\n del os.environ[\"DASK_FOO\"]\n\n\ndef test_get():\n d = {\"x\": 1, \"y\": {\"a\": 2}}\n\n assert get(\"x\", config=d) == 1\n assert get(\"y.a\", config=d) == 2\n assert get(\"y.b\", 123, config=d) == 123\n with pytest.raises(KeyError):\n get(\"y.b\", config=d)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_ensure_file_test_ensure_file.assert_not_result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_ensure_file_test_ensure_file.assert_not_result", "embedding": null, "metadata": {"file_path": "dask/tests/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 200, "end_line": 238, "span_ids": ["test_ensure_file"], "tokens": 261}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_ensure_file(tmpdir):\n a = {\"x\": 1, \"y\": {\"a\": 1}}\n b = {\"x\": 123}\n\n source = os.path.join(str(tmpdir), \"source.yaml\")\n dest = os.path.join(str(tmpdir), \"dest\")\n destination = os.path.join(dest, \"source.yaml\")\n\n with open(source, \"w\") as f:\n yaml.dump(a, f)\n\n ensure_file(source=source, destination=dest, comment=False)\n\n with open(destination) as f:\n result = yaml.safe_load(f)\n assert result == a\n\n # don't overwrite old config files\n with open(source, \"w\") as f:\n yaml.dump(b, f)\n\n ensure_file(source=source, destination=dest, comment=False)\n\n with open(destination) as f:\n result = yaml.safe_load(f)\n assert result == a\n\n os.remove(destination)\n\n # Write again, now with comments\n ensure_file(source=source, destination=dest, comment=True)\n\n with open(destination) as f:\n text = f.read()\n assert \"123\" in text\n\n with open(destination) as f:\n result = yaml.safe_load(f)\n assert not result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_set_test_set.assert_d_abc_x_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_set_test_set.assert_d_abc_x_1", "embedding": null, "metadata": {"file_path": "dask/tests/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 241, "end_line": 260, "span_ids": ["test_set"], "tokens": 178}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_set():\n with set(abc=123):\n assert config[\"abc\"] == 123\n with set(abc=456):\n assert config[\"abc\"] == 456\n assert config[\"abc\"] == 123\n\n assert \"abc\" not in config\n\n with set({\"abc\": 123}):\n assert config[\"abc\"] == 123\n assert \"abc\" not in config\n\n with set({\"abc.x\": 1, \"abc.y\": 2, \"abc.z.a\": 3}):\n assert config[\"abc\"] == {\"x\": 1, \"y\": 2, \"z\": {\"a\": 3}}\n assert \"abc\" not in config\n\n d = {}\n set({\"abc.x\": 123}, config=d)\n assert d[\"abc\"][\"x\"] == 123", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_set_kwargs_test_set_kwargs.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_set_kwargs_test_set_kwargs.None_2", "embedding": null, "metadata": {"file_path": "dask/tests/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 263, "end_line": 276, "span_ids": ["test_set_kwargs"], "tokens": 196}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_set_kwargs():\n with set(foo__bar=1, foo__baz=2):\n assert config[\"foo\"] == {\"bar\": 1, \"baz\": 2}\n assert \"foo\" not in config\n\n # Mix kwargs and dict, kwargs override\n with set({\"foo.bar\": 1, \"foo.baz\": 2}, foo__buzz=3, foo__bar=4):\n assert config[\"foo\"] == {\"bar\": 4, \"baz\": 2, \"buzz\": 3}\n assert \"foo\" not in config\n\n # Mix kwargs and nested dict, kwargs override\n with set({\"foo\": {\"bar\": 1, \"baz\": 2}}, foo__buzz=3, foo__bar=4):\n assert config[\"foo\"] == {\"bar\": 4, \"baz\": 2, \"buzz\": 3}\n assert \"foo\" not in config", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_set_nested_test_set_hard_to_copyables.with_set_x_threading_Lock.with_set_y_1_.pass": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_set_nested_test_set_hard_to_copyables.with_set_x_threading_Lock.with_set_y_1_.pass", "embedding": null, "metadata": {"file_path": "dask/tests/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 279, "end_line": 293, "span_ids": ["test_set_hard_to_copyables", "test_set_nested"], "tokens": 113}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_set_nested():\n with set({\"abc\": {\"x\": 123}}):\n assert config[\"abc\"] == {\"x\": 123}\n with set({\"abc.y\": 456}):\n assert config[\"abc\"] == {\"x\": 123, \"y\": 456}\n assert config[\"abc\"] == {\"x\": 123}\n assert \"abc\" not in config\n\n\ndef test_set_hard_to_copyables():\n import threading\n\n with set(x=threading.Lock()):\n with set(y=1):\n pass", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_ensure_file_directory_test_ensure_file_directory.assert_os_path_exists_os_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_ensure_file_directory_test_ensure_file_directory.assert_os_path_exists_os_", "embedding": null, "metadata": {"file_path": "dask/tests/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 296, "end_line": 312, "span_ids": ["test_ensure_file_directory"], "tokens": 127}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"mkdir\", [True, False])\ndef test_ensure_file_directory(mkdir, tmpdir):\n a = {\"x\": 1, \"y\": {\"a\": 1}}\n\n source = os.path.join(str(tmpdir), \"source.yaml\")\n dest = os.path.join(str(tmpdir), \"dest\")\n\n with open(source, \"w\") as f:\n yaml.dump(a, f)\n\n if mkdir:\n os.mkdir(dest)\n\n ensure_file(source=source, destination=dest)\n\n assert os.path.isdir(dest)\n assert os.path.exists(os.path.join(dest, \"source.yaml\"))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_ensure_file_defaults_to_DASK_CONFIG_directory_test_ensure_file_defaults_to_DASK_CONFIG_directory.assert_os_path_split_fn_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_ensure_file_defaults_to_DASK_CONFIG_directory_test_ensure_file_defaults_to_DASK_CONFIG_directory.assert_os_path_split_fn_", "embedding": null, "metadata": {"file_path": "dask/tests/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 315, "end_line": 331, "span_ids": ["test_ensure_file_defaults_to_DASK_CONFIG_directory"], "tokens": 151}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_ensure_file_defaults_to_DASK_CONFIG_directory(tmpdir):\n a = {\"x\": 1, \"y\": {\"a\": 1}}\n source = os.path.join(str(tmpdir), \"source.yaml\")\n with open(source, \"w\") as f:\n yaml.dump(a, f)\n\n destination = os.path.join(str(tmpdir), \"dask\")\n PATH = dask.config.PATH\n try:\n dask.config.PATH = destination\n ensure_file(source=source)\n finally:\n dask.config.PATH = PATH\n\n assert os.path.isdir(destination)\n [fn] = os.listdir(destination)\n assert os.path.split(fn)[1] == os.path.split(source)[1]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_rename_test_refresh.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_rename_test_refresh.None_2", "embedding": null, "metadata": {"file_path": "dask/tests/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 334, "end_line": 352, "span_ids": ["test_rename", "test_refresh"], "tokens": 157}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_rename():\n aliases = {\"foo_bar\": \"foo.bar\"}\n config = {\"foo-bar\": 123}\n rename(aliases, config=config)\n assert config == {\"foo\": {\"bar\": 123}}\n\n\ndef test_refresh():\n defaults = []\n config = {}\n\n update_defaults({\"a\": 1}, config=config, defaults=defaults)\n assert config == {\"a\": 1}\n\n refresh(paths=[], env={\"DASK_B\": \"2\"}, config=config, defaults=defaults)\n assert config == {\"a\": 1, \"b\": 2}\n\n refresh(paths=[], env={\"DASK_C\": \"3\"}, config=config, defaults=defaults)\n assert config == {\"a\": 1, \"c\": 3}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_expand_environment_variables_test_env_var_canonical_name.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_expand_environment_variables_test_env_var_canonical_name.None_1", "embedding": null, "metadata": {"file_path": "dask/tests/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 355, "end_line": 382, "span_ids": ["test_expand_environment_variables", "test_env_var_canonical_name"], "tokens": 248}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"inp,out\",\n [\n (\"1\", \"1\"),\n (1, 1),\n (\"$FOO\", \"foo\"),\n ([1, \"$FOO\"], [1, \"foo\"]),\n ((1, \"$FOO\"), (1, \"foo\")),\n ({1, \"$FOO\"}, {1, \"foo\"}),\n ({\"a\": \"$FOO\"}, {\"a\": \"foo\"}),\n ({\"a\": \"A\", \"b\": [1, \"2\", \"$FOO\"]}, {\"a\": \"A\", \"b\": [1, \"2\", \"foo\"]}),\n ],\n)\ndef test_expand_environment_variables(inp, out):\n try:\n os.environ[\"FOO\"] = \"foo\"\n assert expand_environment_variables(inp) == out\n finally:\n del os.environ[\"FOO\"]\n\n\ndef test_env_var_canonical_name(monkeypatch):\n value = 3\n monkeypatch.setenv(\"DASK_A_B\", str(value))\n d = {}\n dask.config.refresh(config=d)\n assert get(\"a_b\", config=d) == value\n assert get(\"a-b\", config=d) == value", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_get_set_canonical_name_test_get_set_canonical_name.None_2.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_get_set_canonical_name_test_get_set_canonical_name.None_2.None_1", "embedding": null, "metadata": {"file_path": "dask/tests/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 385, "end_line": 399, "span_ids": ["test_get_set_canonical_name"], "tokens": 188}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_get_set_canonical_name():\n c = {\"x-y\": {\"a_b\": 123}}\n\n keys = [\"x_y.a_b\", \"x-y.a-b\", \"x_y.a-b\"]\n for k in keys:\n assert dask.config.get(k, config=c) == 123\n\n with dask.config.set({\"x_y\": {\"a-b\": 456}}, config=c):\n for k in keys:\n assert dask.config.get(k, config=c) == 456\n\n # No change to new keys in sub dicts\n with dask.config.set({\"x_y\": {\"a-b\": {\"c_d\": 1}, \"e-f\": 2}}, config=c):\n assert dask.config.get(\"x_y.a-b\", config=c) == {\"c_d\": 1}\n assert dask.config.get(\"x_y.e_f\", config=c) == 2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_get_set_roundtrip_test_schema.jsonschema_validate_confi": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_get_set_roundtrip_test_schema.jsonschema_validate_confi", "embedding": null, "metadata": {"file_path": "dask/tests/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 402, "end_line": 435, "span_ids": ["test_schema", "test_core_file", "test_get_set_roundtrip", "test_merge_None_to_dict"], "tokens": 265}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"key\", [\"custom_key\", \"custom-key\"])\ndef test_get_set_roundtrip(key):\n value = 123\n with dask.config.set({key: value}):\n assert dask.config.get(\"custom_key\") == value\n assert dask.config.get(\"custom-key\") == value\n\n\ndef test_merge_None_to_dict():\n assert dask.config.merge({\"a\": None, \"c\": 0}, {\"a\": {\"b\": 1}}) == {\n \"a\": {\"b\": 1},\n \"c\": 0,\n }\n\n\ndef test_core_file():\n assert \"temporary-directory\" in dask.config.config\n assert \"dataframe\" in dask.config.config\n assert \"shuffle-compression\" in dask.config.get(\"dataframe\")\n\n\ndef test_schema():\n jsonschema = pytest.importorskip(\"jsonschema\")\n\n config_fn = os.path.join(os.path.dirname(__file__), \"..\", \"dask.yaml\")\n schema_fn = os.path.join(os.path.dirname(__file__), \"..\", \"dask-schema.yaml\")\n\n with open(config_fn) as f:\n config = yaml.safe_load(f)\n\n with open(schema_fn) as f:\n schema = yaml.safe_load(f)\n\n jsonschema.validate(config, schema)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_schema_is_complete_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_schema_is_complete_", "embedding": null, "metadata": {"file_path": "dask/tests/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 438, "end_line": 475, "span_ids": ["test_schema_is_complete", "test_deprecations"], "tokens": 346}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_schema_is_complete():\n config_fn = os.path.join(os.path.dirname(__file__), \"..\", \"dask.yaml\")\n schema_fn = os.path.join(os.path.dirname(__file__), \"..\", \"dask-schema.yaml\")\n\n with open(config_fn) as f:\n config = yaml.safe_load(f)\n\n with open(schema_fn) as f:\n schema = yaml.safe_load(f)\n\n def test_matches(c, s):\n for k, v in c.items():\n if list(c) != list(s[\"properties\"]):\n raise ValueError(\n \"\\nThe dask.yaml and dask-schema.yaml files are not in sync.\\n\"\n \"This usually happens when we add a new configuration value,\\n\"\n \"but don't add the schema of that value to the dask-schema.yaml file\\n\"\n \"Please modify these files to include the missing values: \\n\\n\"\n \" dask.yaml: {}\\n\"\n \" dask-schema.yaml: {}\\n\\n\"\n \"Examples in these files should be a good start, \\n\"\n \"even if you are not familiar with the jsonschema spec\".format(\n sorted(c), sorted(s[\"properties\"])\n )\n )\n if isinstance(v, dict):\n test_matches(c[k], s[\"properties\"][k])\n\n test_matches(config, schema)\n\n\ndef test_deprecations():\n with pytest.warns(Warning) as info:\n with dask.config.set(fuse_ave_width=123):\n assert dask.config.get(\"optimization.fuse.ave-width\") == 123\n\n assert \"optimization.fuse.ave-width\" in str(info[0].message)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_context.py_from_dask_context_import__test_with_get.None_4": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_context.py_from_dask_context_import__test_with_get.None_4", "embedding": null, "metadata": {"file_path": "dask/tests/test_context.py", "file_name": "test_context.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 24, "span_ids": ["imports", "test_with_get"], "tokens": 164}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from dask.context import globalmethod\nimport dask.array as da\nimport dask\n\n\ndef test_with_get():\n var = [0]\n\n def myget(dsk, keys, **kwargs):\n var[0] = var[0] + 1\n return dask.get(dsk, keys, **kwargs)\n\n x = da.ones(10, chunks=(5,))\n\n assert x.sum().compute() == 10\n assert var[0] == 0\n\n with dask.config.set(scheduler=myget):\n assert x.sum().compute() == 10\n assert var[0] == 1\n\n # Make sure we've cleaned up\n assert x.sum().compute() == 10\n assert var[0] == 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_context.py_foo_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_context.py_foo_", "embedding": null, "metadata": {"file_path": "dask/tests/test_context.py", "file_name": "test_context.py", "file_type": "text/x-python", "category": "test", "start_line": 27, "end_line": 61, "span_ids": ["Foo", "Foo:2", "bar", "Foo.f", "test_globalmethod", "foo"], "tokens": 160}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def foo():\n return \"foo\"\n\n\ndef bar():\n return \"bar\"\n\n\nclass Foo(object):\n @globalmethod(key=\"f\")\n def f():\n return 1\n\n g = globalmethod(foo, key=\"g\", falsey=bar)\n\n\ndef test_globalmethod():\n x = Foo()\n\n assert x.f() == 1\n\n with dask.config.set(f=lambda: 2):\n assert x.f() == 2\n\n with dask.config.set(f=foo):\n assert x.f is foo\n assert x.f() == \"foo\"\n\n assert x.g is foo\n assert x.g() == \"foo\"\n\n with dask.config.set(g=False):\n assert x.g is bar\n assert x.g() == \"bar\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_core.py_from_collections_import_n_test_istask.assert_not_istask_f_sum_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_core.py_from_collections_import_n_test_istask.assert_not_istask_f_sum_", "embedding": null, "metadata": {"file_path": "dask/tests/test_core.py", "file_name": "test_core.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 37, "span_ids": ["contains", "imports", "test_istask"], "tokens": 206}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from collections import namedtuple\n\nimport pytest\nimport pickle\n\nfrom dask.utils_test import GetFunctionTestMixin, inc, add\nfrom dask import core\nfrom dask.core import (\n istask,\n get_dependencies,\n get_deps,\n flatten,\n subs,\n preorder_traversal,\n literal,\n quote,\n has_tasks,\n)\n\n\ndef contains(a, b):\n \"\"\"\n\n >>> contains({'x': 1, 'y': 2}, {'x': 1})\n True\n >>> contains({'x': 1, 'y': 2}, {'z': 3})\n False\n \"\"\"\n return all(a.get(k) == v for k, v in b.items())\n\n\ndef test_istask():\n assert istask((inc, 1))\n assert not istask(1)\n assert not istask((1, 2))\n f = namedtuple(\"f\", [\"x\", \"y\"])\n assert not istask(f(sum, 2))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_core.py_test_has_tasks_test_has_tasks.None_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_core.py_test_has_tasks_test_has_tasks.None_5", "embedding": null, "metadata": {"file_path": "dask/tests/test_core.py", "file_name": "test_core.py", "file_type": "text/x-python", "category": "test", "start_line": 40, "end_line": 54, "span_ids": ["test_has_tasks"], "tokens": 155}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_has_tasks():\n dsk = {\n \"a\": [1, 2, 3],\n \"b\": \"a\",\n \"c\": [1, (inc, 1)],\n \"d\": [(sum, \"a\")],\n \"e\": [\"a\", \"b\"],\n \"f\": [[\"a\", \"b\"], 2, 3],\n }\n assert not has_tasks(dsk, dsk[\"a\"])\n assert has_tasks(dsk, dsk[\"b\"])\n assert has_tasks(dsk, dsk[\"c\"])\n assert has_tasks(dsk, dsk[\"d\"])\n assert has_tasks(dsk, dsk[\"e\"])\n assert has_tasks(dsk, dsk[\"f\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_core.py_test_preorder_traversal_test_preorder_traversal.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_core.py_test_preorder_traversal_test_preorder_traversal.None_2", "embedding": null, "metadata": {"file_path": "dask/tests/test_core.py", "file_name": "test_core.py", "file_type": "text/x-python", "category": "test", "start_line": 57, "end_line": 63, "span_ids": ["test_preorder_traversal"], "tokens": 135}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_preorder_traversal():\n t = (add, 1, 2)\n assert list(preorder_traversal(t)) == [add, 1, 2]\n t = (add, (add, 1, 2), (add, 3, 4))\n assert list(preorder_traversal(t)) == [add, add, 1, 2, add, 3, 4]\n t = (add, (sum, [1, 2]), 3)\n assert list(preorder_traversal(t)) == [add, sum, list, 1, 2, 3]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_core.py_TestGet_test_get_dependencies_nothing.with_pytest_raises_ValueE.get_dependencies_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_core.py_TestGet_test_get_dependencies_nothing.with_pytest_raises_ValueE.get_dependencies_", "embedding": null, "metadata": {"file_path": "dask/tests/test_core.py", "file_name": "test_core.py", "file_type": "text/x-python", "category": "test", "start_line": 66, "end_line": 111, "span_ids": ["TestGet", "test_GetFunctionTestMixin_class", "test_get_dependencies_empty", "test_get_dependencies_nothing", "test_get_dependencies_list", "test_get_dependencies_task", "test_get_dependencies_nested"], "tokens": 408}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestGet(GetFunctionTestMixin):\n get = staticmethod(core.get)\n\n\ndef test_GetFunctionTestMixin_class():\n class TestCustomGetFail(GetFunctionTestMixin):\n get = staticmethod(lambda x, y: 1)\n\n custom_testget = TestCustomGetFail()\n pytest.raises(AssertionError, custom_testget.test_get)\n\n class TestCustomGetPass(GetFunctionTestMixin):\n get = staticmethod(core.get)\n\n custom_testget = TestCustomGetPass()\n custom_testget.test_get()\n\n\ndef test_get_dependencies_nested():\n dsk = {\"x\": 1, \"y\": 2, \"z\": (add, (inc, [[\"x\"]]), \"y\")}\n\n assert get_dependencies(dsk, \"z\") == set([\"x\", \"y\"])\n assert sorted(get_dependencies(dsk, \"z\", as_list=True)) == [\"x\", \"y\"]\n\n\ndef test_get_dependencies_empty():\n dsk = {\"x\": (inc,)}\n assert get_dependencies(dsk, \"x\") == set()\n assert get_dependencies(dsk, \"x\", as_list=True) == []\n\n\ndef test_get_dependencies_list():\n dsk = {\"x\": 1, \"y\": 2, \"z\": [\"x\", [(inc, \"y\")]]}\n assert get_dependencies(dsk, \"z\") == set([\"x\", \"y\"])\n assert sorted(get_dependencies(dsk, \"z\", as_list=True)) == [\"x\", \"y\"]\n\n\ndef test_get_dependencies_task():\n dsk = {\"x\": 1, \"y\": 2, \"z\": [\"x\", [(inc, \"y\")]]}\n assert get_dependencies(dsk, task=(inc, \"x\")) == set([\"x\"])\n assert get_dependencies(dsk, task=(inc, \"x\"), as_list=True) == [\"x\"]\n\n\ndef test_get_dependencies_nothing():\n with pytest.raises(ValueError):\n get_dependencies({})", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_core.py_test_get_dependencies_many_test_get_dependencies_task_none.assert_get_dependencies_d": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_core.py_test_get_dependencies_many_test_get_dependencies_task_none.assert_get_dependencies_d", "embedding": null, "metadata": {"file_path": "dask/tests/test_core.py", "file_name": "test_core.py", "file_type": "text/x-python", "category": "test", "start_line": 114, "end_line": 139, "span_ids": ["test_get_dependencies_task_none", "test_get_dependencies_many"], "tokens": 246}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_get_dependencies_many():\n dsk = {\n \"a\": [1, 2, 3],\n \"b\": \"a\",\n \"c\": [1, (inc, 1)],\n \"d\": [(sum, \"c\")],\n \"e\": [\"a\", \"b\", \"zzz\"],\n \"f\": [[\"a\", \"b\"], 2, 3],\n }\n\n tasks = [dsk[k] for k in (\"d\", \"f\")]\n s = get_dependencies(dsk, task=tasks)\n assert s == {\"a\", \"b\", \"c\"}\n s = get_dependencies(dsk, task=tasks, as_list=True)\n assert sorted(s) == [\"a\", \"b\", \"c\"]\n\n s = get_dependencies(dsk, task=[])\n assert s == set()\n s = get_dependencies(dsk, task=[], as_list=True)\n assert s == []\n\n\ndef test_get_dependencies_task_none():\n # Regression test for https://github.com/dask/distributed/issues/2756\n dsk = {\"foo\": None}\n assert get_dependencies(dsk, task=dsk[\"foo\"]) == set()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_core.py_test_get_deps_test_get_deps.assert_dependents_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_core.py_test_get_deps_test_get_deps.assert_dependents_", "embedding": null, "metadata": {"file_path": "dask/tests/test_core.py", "file_name": "test_core.py", "file_type": "text/x-python", "category": "test", "start_line": 142, "end_line": 175, "span_ids": ["test_get_deps"], "tokens": 297}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_get_deps():\n \"\"\"\n >>> dsk = {'a': 1, 'b': (inc, 'a'), 'c': (inc, 'b')}\n >>> dependencies, dependents = get_deps(dsk)\n >>> dependencies\n {'a': set(), 'b': {'a'}, 'c': {'b'}}\n >>> dependents # doctest: +SKIP\n {'a': {'b'}, 'b': {'c'}, 'c': set()}\n \"\"\"\n dsk = {\n \"a\": [1, 2, 3],\n \"b\": \"a\",\n \"c\": [1, (inc, 1)],\n \"d\": [(sum, \"c\")],\n \"e\": [\"b\", \"zzz\", \"b\"],\n \"f\": [[\"a\", \"b\"], 2, 3],\n }\n dependencies, dependents = get_deps(dsk)\n assert dependencies == {\n \"a\": set(),\n \"b\": {\"a\"},\n \"c\": set(),\n \"d\": {\"c\"},\n \"e\": {\"b\"},\n \"f\": {\"a\", \"b\"},\n }\n assert dependents == {\n \"a\": {\"b\", \"f\"},\n \"b\": {\"e\", \"f\"},\n \"c\": {\"d\"},\n \"d\": set(),\n \"e\": set(),\n \"f\": set(),\n }", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_core.py_test_flatten_MutateOnEq.__eq__.return.False": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_core.py_test_flatten_MutateOnEq.__eq__.return.False", "embedding": null, "metadata": {"file_path": "dask/tests/test_core.py", "file_name": "test_core.py", "file_type": "text/x-python", "category": "test", "start_line": 178, "end_line": 193, "span_ids": ["MutateOnEq", "MutateOnEq.__eq__", "test_subs", "test_flatten"], "tokens": 123}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_flatten():\n assert list(flatten(())) == []\n assert list(flatten(\"foo\")) == [\"foo\"]\n\n\ndef test_subs():\n assert subs((sum, [1, \"x\"]), \"x\", 2) == (sum, [1, 2])\n assert subs((sum, [1, [\"x\"]]), \"x\", 2) == (sum, [1, [2]])\n\n\nclass MutateOnEq(object):\n hit_eq = 0\n\n def __eq__(self, other):\n self.hit_eq += 1\n return False", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_core.py_test_subs_no_key_data_eq_test_subs_no_key_data_eq.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_core.py_test_subs_no_key_data_eq_test_subs_no_key_data_eq.None_1", "embedding": null, "metadata": {"file_path": "dask/tests/test_core.py", "file_name": "test_core.py", "file_type": "text/x-python", "category": "test", "start_line": 196, "end_line": 205, "span_ids": ["test_subs_no_key_data_eq"], "tokens": 132}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_subs_no_key_data_eq():\n # Numpy throws a deprecation warning on bool(array == scalar), which\n # pollutes the terminal. This test checks that `subs` never tries to\n # compare keys (scalars) with values (which could be arrays)`subs` never\n # tries to compare keys (scalars) with values (which could be arrays).\n a = MutateOnEq()\n subs(a, \"x\", 1)\n assert a.hit_eq == 0\n subs((add, a, \"x\"), \"x\", 1)\n assert a.hit_eq == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_core.py_test_subs_with_unfriendly_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_core.py_test_subs_with_unfriendly_eq_", "embedding": null, "metadata": {"file_path": "dask/tests/test_core.py", "file_name": "test_core.py", "file_type": "text/x-python", "category": "test", "start_line": 208, "end_line": 262, "span_ids": ["test_subs_unexpected_hashable_key", "test_literal_serializable", "test_subs_with_unfriendly_eq", "test_quote", "test_subs_with_surprisingly_friendly_eq"], "tokens": 363}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_subs_with_unfriendly_eq():\n try:\n import numpy as np\n except ImportError:\n return\n else:\n task = (np.sum, np.array([1, 2]))\n assert (subs(task, (4, 5), 1) == task) is True\n\n class MyException(Exception):\n pass\n\n class F:\n def __eq__(self, other):\n raise MyException()\n\n task = F()\n assert subs(task, 1, 2) is task\n\n\ndef test_subs_with_surprisingly_friendly_eq():\n try:\n import pandas as pd\n except ImportError:\n return\n else:\n df = pd.DataFrame()\n assert subs(df, \"x\", 1) is df\n\n\ndef test_subs_unexpected_hashable_key():\n class UnexpectedButHashable:\n def __init__(self):\n self.name = \"a\"\n\n def __hash__(self):\n return hash(self.name)\n\n def __eq__(self, other):\n return isinstance(other, UnexpectedButHashable)\n\n assert subs((id, UnexpectedButHashable()), UnexpectedButHashable(), 1) == (id, 1)\n\n\ndef test_quote():\n literals = [[1, 2, 3], (add, 1, 2), [1, [2, 3]], (add, 1, (add, 2, 3)), {\"x\": \"x\"}]\n\n for l in literals:\n assert core.get({\"x\": quote(l)}, \"x\") == l\n\n\ndef test_literal_serializable():\n l = literal((add, 1, 2))\n assert pickle.loads(pickle.dumps(l)).data == (add, 1, 2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_datasets.py_dask_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_datasets.py_dask_", "embedding": null, "metadata": {"file_path": "dask/tests/test_datasets.py", "file_name": "test_datasets.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 35, "span_ids": ["imports", "test_mimesis", "test_no_mimesis", "test_deterministic", "test_full_dataset"], "tokens": 205}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import dask\nimport pytest\n\n\ndef test_mimesis():\n pytest.importorskip(\"mimesis\")\n\n b = dask.datasets.make_people()\n assert b.take(5)\n\n assert b.take(3) == b.take(3)\n\n\ndef test_full_dataset():\n pytest.importorskip(\"mimesis\")\n b = dask.datasets.make_people(npartitions=2, records_per_partition=10)\n assert b.count().compute() == 20\n\n\ndef test_no_mimesis():\n try:\n import mimesis # noqa: F401\n except ImportError:\n with pytest.raises(Exception) as info:\n dask.datasets.make_people()\n\n assert \"python -m pip install mimesis\" in str(info.value)\n\n\ndef test_deterministic():\n pytest.importorskip(\"mimesis\")\n\n b = dask.datasets.make_people(seed=123)\n assert b.take(1)[0][\"name\"] == (\"Leandro\", \"Orr\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_from_collections_import_n_Tuple.__dask_postcompute__.return.tuple_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_from_collections_import_n_Tuple.__dask_postcompute__.return.tuple_", "embedding": null, "metadata": {"file_path": "dask/tests/test_delayed.py", "file_name": "test_delayed.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 40, "span_ids": ["Tuple.__init__", "imports", "Tuple.__dask_keys__", "Tuple.__dask_tokenize__", "Tuple", "Tuple.__dask_postcompute__", "Tuple.__dask_graph__"], "tokens": 208}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from collections import namedtuple\nfrom operator import add, setitem\nfrom functools import partial\nimport pickle\nfrom random import random\nimport types\n\nfrom tlz import merge\nimport pytest\n\nimport dask\nfrom dask import compute\nfrom dask.delayed import delayed, to_task_dask, Delayed\nfrom dask.utils_test import inc\nfrom dask.dataframe.utils import assert_eq\n\ntry:\n from operator import matmul\nexcept ImportError:\n matmul = None\n\n\nclass Tuple(object):\n __dask_scheduler__ = staticmethod(dask.threaded.get)\n\n def __init__(self, dsk, keys):\n self._dask = dsk\n self._keys = keys\n\n def __dask_tokenize__(self):\n return self._keys\n\n def __dask_graph__(self):\n return self._dask\n\n def __dask_keys__(self):\n return self._keys\n\n def __dask_postcompute__(self):\n return tuple, ()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_to_task_dask_test_to_task_dask.assert_dask_x__dask": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_to_task_dask_test_to_task_dask.assert_dask_x__dask", "embedding": null, "metadata": {"file_path": "dask/tests/test_delayed.py", "file_name": "test_delayed.py", "file_type": "text/x-python", "category": "test", "start_line": 43, "end_line": 82, "span_ids": ["test_to_task_dask"], "tokens": 448}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.filterwarnings(\"ignore:The dask.delayed:UserWarning\")\ndef test_to_task_dask():\n a = delayed(1, name=\"a\")\n b = delayed(2, name=\"b\")\n task, dask = to_task_dask([a, b, 3])\n assert task == [\"a\", \"b\", 3]\n\n task, dask = to_task_dask((a, b, 3))\n assert task == (tuple, [\"a\", \"b\", 3])\n assert dict(dask) == merge(a.dask, b.dask)\n\n task, dask = to_task_dask({a: 1, b: 2})\n assert task == (dict, [[\"b\", 2], [\"a\", 1]]) or task == (dict, [[\"a\", 1], [\"b\", 2]])\n assert dict(dask) == merge(a.dask, b.dask)\n\n f = namedtuple(\"f\", [\"x\", \"y\"])\n x = f(1, 2)\n task, dask = to_task_dask(x)\n assert task == x\n assert dict(dask) == {}\n\n task, dask = to_task_dask(slice(a, b, 3))\n assert task == (slice, \"a\", \"b\", 3)\n assert dict(dask) == merge(a.dask, b.dask)\n\n # Issue https://github.com/dask/dask/issues/2107\n class MyClass(dict):\n pass\n\n task, dask = to_task_dask(MyClass())\n assert type(task) is MyClass\n assert dict(dask) == {}\n\n # Custom dask objects\n x = Tuple({\"a\": 1, \"b\": 2, \"c\": (add, \"a\", \"b\")}, [\"a\", \"b\", \"c\"])\n task, dask = to_task_dask(x)\n assert task in dask\n f = dask.pop(task)\n assert f == (tuple, [\"a\", \"b\", \"c\"])\n assert dask == x._dask", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_delayed_test_delayed.assert_a_key_in_b_dask": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_delayed_test_delayed.assert_a_key_in_b_dask", "embedding": null, "metadata": {"file_path": "dask/tests/test_delayed.py", "file_name": "test_delayed.py", "file_type": "text/x-python", "category": "test", "start_line": 85, "end_line": 95, "span_ids": ["test_delayed"], "tokens": 119}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_delayed():\n add2 = delayed(add)\n assert add2(1, 2).compute() == 3\n assert (add2(1, 2) + 3).compute() == 6\n assert add2(add2(1, 2), 3).compute() == 6\n\n a = delayed(1)\n assert a.compute() == 1\n assert 1 in a.dask.values()\n b = add2(add2(a, 2), 3)\n assert a.key in b.dask", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_delayed_with_dataclass_test_delayed_with_dataclass.assert_final_compute_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_delayed_with_dataclass_test_delayed_with_dataclass.assert_final_compute_", "embedding": null, "metadata": {"file_path": "dask/tests/test_delayed.py", "file_name": "test_delayed.py", "file_type": "text/x-python", "category": "test", "start_line": 98, "end_line": 112, "span_ids": ["test_delayed_with_dataclass"], "tokens": 122}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_delayed_with_dataclass():\n dataclasses = pytest.importorskip(\"dataclasses\")\n\n # Avoid @dataclass decorator as Python < 3.7 fail to interpret the type hints\n ADataClass = dataclasses.make_dataclass(\"ADataClass\", [(\"a\", int)])\n\n literal = dask.delayed(3)\n with_class = dask.delayed({\"a\": ADataClass(a=literal)})\n\n def return_nested(obj):\n return obj[\"a\"].a\n\n final = delayed(return_nested)(with_class)\n\n assert final.compute() == 3", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_operators_test_operators.if_matmul_.assert_eval_c_d_co": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_operators_test_operators.if_matmul_.assert_eval_c_d_co", "embedding": null, "metadata": {"file_path": "dask/tests/test_delayed.py", "file_name": "test_delayed.py", "file_type": "text/x-python", "category": "test", "start_line": 115, "end_line": 138, "span_ids": ["test_operators"], "tokens": 210}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_operators():\n a = delayed([1, 2, 3])\n assert a[0].compute() == 1\n assert (a + a).compute() == [1, 2, 3, 1, 2, 3]\n b = delayed(2)\n assert a[:b].compute() == [1, 2]\n\n a = delayed(10)\n assert (a + 1).compute() == 11\n assert (1 + a).compute() == 11\n assert (a >> 1).compute() == 5\n assert (a > 2).compute()\n assert (a ** 2).compute() == 100\n\n if matmul:\n\n class dummy:\n def __matmul__(self, other):\n return 4\n\n c = delayed(dummy()) # noqa\n d = delayed(dummy()) # noqa\n\n assert (eval(\"c @ d\")).compute() == 4", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_methods_test_np_dtype_of_delayed.assert_delayed_np_array_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_methods_test_np_dtype_of_delayed.assert_delayed_np_array_", "embedding": null, "metadata": {"file_path": "dask/tests/test_delayed.py", "file_name": "test_delayed.py", "file_type": "text/x-python", "category": "test", "start_line": 141, "end_line": 172, "span_ids": ["test_np_dtype_of_delayed", "test_attributes", "test_method_getattr_call_same_task", "test_methods"], "tokens": 331}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_methods():\n a = delayed(\"a b c d e\")\n assert a.split(\" \").compute() == [\"a\", \"b\", \"c\", \"d\", \"e\"]\n assert a.upper().replace(\"B\", \"A\").split().count(\"A\").compute() == 2\n assert a.split(\" \", pure=True).key == a.split(\" \", pure=True).key\n o = a.split(\" \", dask_key_name=\"test\")\n assert o.key == \"test\"\n\n\ndef test_attributes():\n a = delayed(2 + 1j)\n assert a.real._key == a.real._key\n assert a.real.compute() == 2\n assert a.imag.compute() == 1\n assert (a.real + a.imag).compute() == 3\n\n\ndef test_method_getattr_call_same_task():\n a = delayed([1, 2, 3])\n o = a.index(1)\n # Don't getattr the method, then call in separate task\n assert getattr not in set(v[0] for v in o.__dask_graph__().values())\n\n\ndef test_np_dtype_of_delayed():\n # This used to result in a segfault due to recursion, see\n # https://github.com/dask/dask/pull/4374#issuecomment-454381465\n np = pytest.importorskip(\"numpy\")\n x = delayed(1)\n with pytest.raises(TypeError):\n np.dtype(x)\n assert delayed(np.array([1], dtype=\"f8\")).dtype.compute() == np.dtype(\"f8\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_delayed_visualise_warn_test_delayed_visualise_warn.None_1.z_visualise_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_delayed_visualise_warn_test_delayed_visualise_warn.None_1.z_visualise_", "embedding": null, "metadata": {"file_path": "dask/tests/test_delayed.py", "file_name": "test_delayed.py", "file_type": "text/x-python", "category": "test", "start_line": 175, "end_line": 193, "span_ids": ["test_delayed_visualise_warn"], "tokens": 129}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_delayed_visualise_warn():\n # Raise a warning when user calls visualise()\n # instead of visualize()\n def inc(x):\n return x + 1\n\n z = dask.delayed(inc)(1)\n z.compute()\n\n with pytest.warns(\n UserWarning, match=\"dask.delayed objects have no `visualise` method\"\n ):\n z.visualise(file_name=\"desk_graph.svg\")\n\n # with no args\n with pytest.warns(\n UserWarning, match=\"dask.delayed objects have no `visualise` method\"\n ):\n z.visualise()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_delayed_errors_test_delayed_errors.None_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_delayed_errors_test_delayed_errors.None_5", "embedding": null, "metadata": {"file_path": "dask/tests/test_delayed.py", "file_name": "test_delayed.py", "file_type": "text/x-python", "category": "test", "start_line": 196, "end_line": 207, "span_ids": ["test_delayed_errors"], "tokens": 129}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_delayed_errors():\n a = delayed([1, 2, 3])\n # Immutable\n pytest.raises(TypeError, lambda: setattr(a, \"foo\", 1))\n pytest.raises(TypeError, lambda: setitem(a, 1, 0))\n # Can't iterate, or check if contains\n pytest.raises(TypeError, lambda: 1 in a)\n pytest.raises(TypeError, lambda: list(a))\n # No dynamic generation of magic/hidden methods\n pytest.raises(AttributeError, lambda: a._hidden())\n # Truth of delayed forbidden\n pytest.raises(TypeError, lambda: bool(a))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_common_subexpressions_test_lists.assert_c_compute_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_common_subexpressions_test_lists.assert_c_compute_3", "embedding": null, "metadata": {"file_path": "dask/tests/test_delayed.py", "file_name": "test_delayed.py", "file_type": "text/x-python", "category": "test", "start_line": 210, "end_line": 229, "span_ids": ["test_delayed_optimize", "test_lists", "test_common_subexpressions"], "tokens": 186}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_common_subexpressions():\n a = delayed([1, 2, 3])\n res = a[0] + a[0]\n assert a[0].key in res.dask\n assert a.key in res.dask\n assert len(res.dask) == 3\n\n\ndef test_delayed_optimize():\n x = Delayed(\"b\", {\"a\": 1, \"b\": (inc, \"a\"), \"c\": (inc, \"b\")})\n (x2,) = dask.optimize(x)\n # Delayed's __dask_optimize__ culls out 'c'\n assert sorted(x2.dask.keys()) == [\"a\", \"b\"]\n\n\ndef test_lists():\n a = delayed(1)\n b = delayed(2)\n c = delayed(sum)([a, b])\n assert c.compute() == 3", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_literates_test_literates.assert_delayed_lit_a_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_literates_test_literates.assert_delayed_lit_a_", "embedding": null, "metadata": {"file_path": "dask/tests/test_delayed.py", "file_name": "test_delayed.py", "file_type": "text/x-python", "category": "test", "start_line": 232, "end_line": 246, "span_ids": ["test_literates"], "tokens": 230}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_literates():\n a = delayed(1)\n b = a + 1\n lit = (a, b, 3)\n assert delayed(lit).compute() == (1, 2, 3)\n lit = [a, b, 3]\n assert delayed(lit).compute() == [1, 2, 3]\n lit = set((a, b, 3))\n assert delayed(lit).compute() == set((1, 2, 3))\n lit = {a: \"a\", b: \"b\", 3: \"c\"}\n assert delayed(lit).compute() == {1: \"a\", 2: \"b\", 3: \"c\"}\n assert delayed(lit)[a].compute() == \"a\"\n lit = {\"a\": a, \"b\": b, \"c\": 3}\n assert delayed(lit).compute() == {\"a\": 1, \"b\": 2, \"c\": 3}\n assert delayed(lit)[\"a\"].compute() == 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_literates_keys_test_iterators.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_literates_keys_test_iterators.None_1", "embedding": null, "metadata": {"file_path": "dask/tests/test_delayed.py", "file_name": "test_delayed.py", "file_type": "text/x-python", "category": "test", "start_line": 249, "end_line": 276, "span_ids": ["test_lists_are_concrete", "test_iterators", "test_literates_keys"], "tokens": 195}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_literates_keys():\n a = delayed(1)\n b = a + 1\n lit = (a, b, 3)\n assert delayed(lit).key != delayed(lit).key\n assert delayed(lit, pure=True).key == delayed(lit, pure=True).key\n\n\ndef test_lists_are_concrete():\n a = delayed(1)\n b = delayed(2)\n c = delayed(max)([[a, 10], [b, 20]], key=lambda x: x[0])[1]\n\n assert c.compute() == 20\n\n\ndef test_iterators():\n a = delayed(1)\n b = delayed(2)\n c = delayed(sum)(iter([a, b]))\n\n assert c.compute() == 3\n\n def f(seq):\n return sum(seq)\n\n c = delayed(f)(iter([a, b]))\n assert c.compute() == 3", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_traverse_false_test_pure.assert_myrand_key_my": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_traverse_false_test_pure.assert_myrand_key_my", "embedding": null, "metadata": {"file_path": "dask/tests/test_delayed.py", "file_name": "test_delayed.py", "file_type": "text/x-python", "category": "test", "start_line": 279, "end_line": 320, "span_ids": ["test_pure", "test_traverse_false"], "tokens": 342}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_traverse_false():\n # Create a list with a dask value, and test that it's not computed\n def fail(*args):\n raise ValueError(\"shouldn't have computed\")\n\n a = delayed(fail)()\n\n # list\n x = [a, 1, 2, 3]\n res = delayed(x, traverse=False).compute()\n assert len(res) == 4\n assert res[0] is a\n assert res[1:] == x[1:]\n\n # tuple that looks like a task\n x = (fail, a, (fail, a))\n res = delayed(x, traverse=False).compute()\n assert isinstance(res, tuple)\n assert res[0] == fail\n assert res[1] is a\n\n # list containing task-like-things\n x = [1, (fail, a), a]\n res = delayed(x, traverse=False).compute()\n assert isinstance(res, list)\n assert res[0] == 1\n assert res[1][0] == fail and res[1][1] is a\n assert res[2] is a\n\n # traverse=False still hits top level\n b = delayed(1)\n x = delayed(b, traverse=False)\n assert x.compute() == 1\n\n\ndef test_pure():\n v1 = delayed(add, pure=True)(1, 2)\n v2 = delayed(add, pure=True)(1, 2)\n assert v1.key == v2.key\n\n myrand = delayed(random)\n assert myrand().key != myrand().key", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_pure_global_setting_test_pure_global_setting.None_7.assert_element_element": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_pure_global_setting_test_pure_global_setting.None_7.assert_element_element", "embedding": null, "metadata": {"file_path": "dask/tests/test_delayed.py", "file_name": "test_delayed.py", "file_type": "text/x-python", "category": "test", "start_line": 323, "end_line": 360, "span_ids": ["test_pure_global_setting"], "tokens": 369}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_pure_global_setting():\n # delayed functions\n func = delayed(add)\n\n with dask.config.set(delayed_pure=True):\n assert func(1, 2).key == func(1, 2).key\n\n with dask.config.set(delayed_pure=False):\n assert func(1, 2).key != func(1, 2).key\n\n func = delayed(add, pure=True)\n with dask.config.set(delayed_pure=False):\n assert func(1, 2).key == func(1, 2).key\n\n # delayed objects\n assert delayed(1).key != delayed(1).key\n with dask.config.set(delayed_pure=True):\n assert delayed(1).key == delayed(1).key\n\n with dask.config.set(delayed_pure=False):\n assert delayed(1, pure=True).key == delayed(1, pure=True).key\n\n # delayed methods\n data = delayed([1, 2, 3])\n assert data.index(1).key != data.index(1).key\n\n with dask.config.set(delayed_pure=True):\n assert data.index(1).key == data.index(1).key\n assert data.index(1, pure=False).key != data.index(1, pure=False).key\n\n with dask.config.set(delayed_pure=False):\n assert data.index(1, pure=True).key == data.index(1, pure=True).key\n\n # magic methods always pure\n with dask.config.set(delayed_pure=False):\n assert data.index.key == data.index.key\n element = data[0]\n assert (element + element).key == (element + element).key", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_nout_test_nout.assert_x_compute_tup": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_nout_test_nout.assert_x_compute_tup", "embedding": null, "metadata": {"file_path": "dask/tests/test_delayed.py", "file_name": "test_delayed.py", "file_type": "text/x-python", "category": "test", "start_line": 363, "end_line": 394, "span_ids": ["test_nout"], "tokens": 282}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_nout():\n func = delayed(lambda x: (x, -x), nout=2, pure=True)\n x = func(1)\n assert len(x) == 2\n a, b = x\n assert compute(a, b) == (1, -1)\n assert a._length is None\n assert b._length is None\n pytest.raises(TypeError, lambda: len(a))\n pytest.raises(TypeError, lambda: list(a))\n\n pytest.raises(ValueError, lambda: delayed(add, nout=-1))\n pytest.raises(ValueError, lambda: delayed(add, nout=True))\n\n func = delayed(add, nout=None)\n a = func(1)\n assert a._length is None\n pytest.raises(TypeError, lambda: list(a))\n pytest.raises(TypeError, lambda: len(a))\n\n func = delayed(lambda x: (x,), nout=1, pure=True)\n x = func(1)\n assert len(x) == 1\n (a,) = x\n assert a.compute() == 1\n assert a._length is None\n pytest.raises(TypeError, lambda: len(a))\n\n func = delayed(lambda x: tuple(), nout=0, pure=True)\n x = func(1)\n assert len(x) == 0\n assert x.compute() == tuple()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_kwargs_test_kwargs.None_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_kwargs_test_kwargs.None_5", "embedding": null, "metadata": {"file_path": "dask/tests/test_delayed.py", "file_name": "test_delayed.py", "file_type": "text/x-python", "category": "test", "start_line": 397, "end_line": 411, "span_ids": ["test_kwargs"], "tokens": 260}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_kwargs():\n def mysum(a, b, c=(), **kwargs):\n return a + b + sum(c) + sum(kwargs.values())\n\n dmysum = delayed(mysum)\n ten = dmysum(1, 2, c=[delayed(3), 0], four=dmysum(2, 2))\n assert ten.compute() == 10\n dmysum = delayed(mysum, pure=True)\n c = [delayed(3), 0]\n ten = dmysum(1, 2, c=c, four=dmysum(2, 2))\n assert ten.compute() == 10\n assert dmysum(1, 2, c=c, four=dmysum(2, 2)).key == ten.key\n assert dmysum(1, 2, c=c, four=dmysum(2, 3)).key != ten.key\n assert dmysum(1, 2, c=c, four=4).key != ten.key\n assert dmysum(1, 2, c=c, four=4).key != dmysum(2, 2, c=c, four=4).key", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_custom_delayed_test_custom_delayed.assert_compute_n_x2_x_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_custom_delayed_test_custom_delayed.assert_compute_n_x2_x_", "embedding": null, "metadata": {"file_path": "dask/tests/test_delayed.py", "file_name": "test_delayed.py", "file_type": "text/x-python", "category": "test", "start_line": 414, "end_line": 420, "span_ids": ["test_custom_delayed"], "tokens": 157}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_custom_delayed():\n x = Tuple({\"a\": 1, \"b\": 2, \"c\": (add, \"a\", \"b\")}, [\"a\", \"b\", \"c\"])\n x2 = delayed(add, pure=True)(x, (4, 5, 6))\n n = delayed(len, pure=True)(x)\n assert delayed(len, pure=True)(x).key == n.key\n assert x2.compute() == (1, 2, 3, 4, 5, 6)\n assert compute(n, x2, x) == (3, (1, 2, 3, 4, 5, 6), (1, 2, 3))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_array_delayed_test_array_delayed.assert_delayed_arr_compu": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_array_delayed_test_array_delayed.assert_delayed_arr_compu", "embedding": null, "metadata": {"file_path": "dask/tests/test_delayed.py", "file_name": "test_delayed.py", "file_type": "text/x-python", "category": "test", "start_line": 423, "end_line": 444, "span_ids": ["test_array_delayed"], "tokens": 231}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.filterwarnings(\"ignore:The dask.delayed:UserWarning\")\ndef test_array_delayed():\n np = pytest.importorskip(\"numpy\")\n da = pytest.importorskip(\"dask.array\")\n\n arr = np.arange(100).reshape((10, 10))\n darr = da.from_array(arr, chunks=(5, 5))\n val = delayed(sum)([arr, darr, 1])\n assert isinstance(val, Delayed)\n assert np.allclose(val.compute(), arr + arr + 1)\n assert val.sum().compute() == (arr + arr + 1).sum()\n assert val[0, 0].compute() == (arr + arr + 1)[0, 0]\n\n task, dsk = to_task_dask(darr)\n orig = set(darr.dask)\n final = set(dsk)\n assert orig.issubset(final)\n diff = final.difference(orig)\n assert len(diff) == 1\n\n delayed_arr = delayed(darr)\n assert (delayed_arr.compute() == arr).all()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_array_bag_delayed_test_array_bag_delayed.assert_out_compute_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_array_bag_delayed_test_array_bag_delayed.assert_out_compute_2", "embedding": null, "metadata": {"file_path": "dask/tests/test_delayed.py", "file_name": "test_delayed.py", "file_type": "text/x-python", "category": "test", "start_line": 447, "end_line": 459, "span_ids": ["test_array_bag_delayed"], "tokens": 187}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_array_bag_delayed():\n db = pytest.importorskip(\"dask.bag\")\n da = pytest.importorskip(\"dask.array\")\n np = pytest.importorskip(\"numpy\")\n\n arr1 = np.arange(100).reshape((10, 10))\n arr2 = arr1.dot(arr1.T)\n darr1 = da.from_array(arr1, chunks=(5, 5))\n darr2 = da.from_array(arr2, chunks=(5, 5))\n b = db.from_sequence([1, 2, 3])\n seq = [arr1, arr2, darr1, darr2, b]\n out = delayed(sum)([i.sum() for i in seq])\n assert out.compute() == 2 * arr1.sum() + 2 * arr2.sum() + sum([1, 2, 3])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_delayed_picklable_test_delayed_picklable.None_10": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_delayed_picklable_test_delayed_picklable.None_10", "embedding": null, "metadata": {"file_path": "dask/tests/test_delayed.py", "file_name": "test_delayed.py", "file_type": "text/x-python", "category": "test", "start_line": 462, "end_line": 482, "span_ids": ["test_delayed_picklable"], "tokens": 211}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_delayed_picklable():\n # Delayed\n x = delayed(divmod, nout=2, pure=True)(1, 2)\n y = pickle.loads(pickle.dumps(x))\n assert x.dask == y.dask\n assert x._key == y._key\n assert x._length == y._length\n # DelayedLeaf\n x = delayed(1j + 2)\n y = pickle.loads(pickle.dumps(x))\n assert x.dask == y.dask\n assert x._key == y._key\n assert x._nout == y._nout\n assert x._pure == y._pure\n # DelayedAttr\n x = x.real\n y = pickle.loads(pickle.dumps(x))\n assert x._obj._key == y._obj._key\n assert x._obj.dask == y._obj.dask\n assert x._attr == y._attr\n assert x._key == y._key", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_delayed_compute_forward_kwargs_identity.return.x": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_delayed_compute_forward_kwargs_identity.return.x", "embedding": null, "metadata": {"file_path": "dask/tests/test_delayed.py", "file_name": "test_delayed.py", "file_type": "text/x-python", "category": "test", "start_line": 485, "end_line": 524, "span_ids": ["test_delayed_callable", "test_delayed_name_on_call", "test_delayed_method_descriptor", "test_callable_obj", "test_delayed_compute_forward_kwargs", "identity"], "tokens": 238}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_delayed_compute_forward_kwargs():\n x = delayed(1) + 2\n x.compute(bogus_keyword=10)\n\n\ndef test_delayed_method_descriptor():\n delayed(bytes.decode)(b\"\") # does not err\n\n\ndef test_delayed_callable():\n f = delayed(add, pure=True)\n v = f(1, 2)\n assert v.dask == {v.key: (add, 1, 2)}\n\n assert f.dask == {f.key: add}\n assert f.compute() == add\n\n\ndef test_delayed_name_on_call():\n f = delayed(add, pure=True)\n assert f(1, 2, dask_key_name=\"foo\")._key == \"foo\"\n\n\ndef test_callable_obj():\n class Foo(object):\n def __init__(self, a):\n self.a = a\n\n def __call__(self):\n return 2\n\n foo = Foo(1)\n f = delayed(foo)\n assert f.compute() is foo\n assert f.a.compute() == 1\n assert f().compute() == 2\n\n\ndef identity(x):\n return x", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_name_consistent_across_instances_test_name_consistent_across_instances.assert_func_1__key_i": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_name_consistent_across_instances_test_name_consistent_across_instances.assert_func_1__key_i", "embedding": null, "metadata": {"file_path": "dask/tests/test_delayed.py", "file_name": "test_delayed.py", "file_type": "text/x-python", "category": "test", "start_line": 527, "end_line": 535, "span_ids": ["test_name_consistent_across_instances"], "tokens": 128}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_name_consistent_across_instances():\n func = delayed(identity, pure=True)\n\n data = {\"x\": 1, \"y\": 25, \"z\": [1, 2, 3]}\n assert func(data)._key == \"identity-02129ed1acaffa7039deee80c5da547c\"\n\n data = {\"x\": 1, 1: \"x\"}\n assert func(data)._key == func(data)._key\n assert func(1)._key == \"identity-ca2fae46a3b938016331acac1908ae45\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_sensitive_to_partials_test_keys_from_array._check_dsk_xs_0_dask_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_sensitive_to_partials_test_keys_from_array._check_dsk_xs_0_dask_", "embedding": null, "metadata": {"file_path": "dask/tests/test_delayed.py", "file_name": "test_delayed.py", "file_type": "text/x-python", "category": "test", "start_line": 538, "end_line": 578, "span_ids": ["test_sensitive_to_partials", "test_delayed_name", "test_keys_from_array", "test_finalize_name"], "tokens": 293}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_sensitive_to_partials():\n assert (\n delayed(partial(add, 10), pure=True)(2)._key\n != delayed(partial(add, 20), pure=True)(2)._key\n )\n\n\ndef test_delayed_name():\n assert delayed(1)._key.startswith(\"int-\")\n assert delayed(1, pure=True)._key.startswith(\"int-\")\n assert delayed(1, name=\"X\")._key == \"X\"\n\n def myfunc(x):\n return x + 1\n\n assert delayed(myfunc)(1).key.startswith(\"myfunc\")\n\n\ndef test_finalize_name():\n da = pytest.importorskip(\"dask.array\")\n\n x = da.ones(10, chunks=5)\n v = delayed([x])\n assert set(x.dask).issubset(v.dask)\n\n def key(s):\n if isinstance(s, tuple):\n s = s[0]\n return s.split(\"-\")[0]\n\n assert all(key(k).isalpha() for k in v.dask)\n\n\ndef test_keys_from_array():\n da = pytest.importorskip(\"dask.array\")\n from dask.array.utils import _check_dsk\n\n X = da.ones((10, 10), chunks=5).to_delayed().flatten()\n xs = [delayed(inc)(x) for x in X]\n\n _check_dsk(xs[0].dask)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py__Mostly_copied_from_http_test_delayed_decorator_on_method.assert_isinstance_A_addst": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py__Mostly_copied_from_http_test_delayed_decorator_on_method.assert_isinstance_A_addst", "embedding": null, "metadata": {"file_path": "dask/tests/test_delayed.py", "file_name": "test_delayed.py", "file_type": "text/x-python", "category": "test", "start_line": 581, "end_line": 622, "span_ids": ["test_keys_from_array", "test_delayed_decorator_on_method"], "tokens": 352}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# Mostly copied from https://github.com/pytoolz/toolz/pull/220\ndef test_delayed_decorator_on_method():\n class A(object):\n BASE = 10\n\n def __init__(self, base):\n self.BASE = base\n\n @delayed\n def addmethod(self, x, y):\n return self.BASE + x + y\n\n @classmethod\n @delayed\n def addclass(cls, x, y):\n return cls.BASE + x + y\n\n @staticmethod\n @delayed\n def addstatic(x, y):\n return x + y\n\n a = A(100)\n assert a.addmethod(3, 4).compute() == 107\n assert A.addmethod(a, 3, 4).compute() == 107\n\n assert a.addclass(3, 4).compute() == 17\n assert A.addclass(3, 4).compute() == 17\n\n assert a.addstatic(3, 4).compute() == 7\n assert A.addstatic(3, 4).compute() == 7\n\n # We want the decorated methods to be actual methods for instance methods\n # and class methods since their first arguments are the object and the\n # class respectively. Or in other words, the first argument is generated by\n # the runtime based on the object/class before the dot.\n assert isinstance(a.addmethod, types.MethodType)\n assert isinstance(A.addclass, types.MethodType)\n\n # For static methods (and regular functions), the decorated methods should\n # be Delayed objects.\n assert isinstance(A.addstatic, Delayed)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_attribute_of_attribute_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_attribute_of_attribute_", "embedding": null, "metadata": {"file_path": "dask/tests/test_delayed.py", "file_name": "test_delayed.py", "file_type": "text/x-python", "category": "test", "start_line": 625, "end_line": 682, "span_ids": ["test_cloudpickle", "modlevel_delayed1", "test_pickle", "modlevel_delayed2", "test_check_meta_flag", "test_attribute_of_attribute", "modlevel_eager"], "tokens": 389}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_attribute_of_attribute():\n x = delayed(123)\n assert isinstance(x.a, Delayed)\n assert isinstance(x.a.b, Delayed)\n assert isinstance(x.a.b.c, Delayed)\n\n\ndef test_check_meta_flag():\n from pandas import Series\n from dask.delayed import delayed\n from dask.dataframe import from_delayed\n\n a = Series([\"a\", \"b\", \"a\"], dtype=\"category\")\n b = Series([\"a\", \"c\", \"a\"], dtype=\"category\")\n da = delayed(lambda x: x)(a)\n db = delayed(lambda x: x)(b)\n\n c = from_delayed([da, db], verify_meta=False)\n assert_eq(c, c)\n\n\ndef modlevel_eager(x):\n return x + 1\n\n\n@delayed\ndef modlevel_delayed1(x):\n return x + 1\n\n\n@delayed(pure=False)\ndef modlevel_delayed2(x):\n return x + 1\n\n\n@pytest.mark.parametrize(\n \"f\",\n [\n delayed(modlevel_eager),\n pytest.param(modlevel_delayed1, marks=pytest.mark.xfail(reason=\"#3369\")),\n pytest.param(modlevel_delayed2, marks=pytest.mark.xfail(reason=\"#3369\")),\n ],\n)\ndef test_pickle(f):\n d = f(2)\n d = pickle.loads(pickle.dumps(d, protocol=pickle.HIGHEST_PROTOCOL))\n assert d.compute() == 3\n\n\n@pytest.mark.parametrize(\n \"f\", [delayed(modlevel_eager), modlevel_delayed1, modlevel_delayed2]\n)\ndef test_cloudpickle(f):\n cloudpickle = pytest.importorskip(\"cloudpickle\")\n d = f(2)\n d = cloudpickle.loads(cloudpickle.dumps(d, protocol=pickle.HIGHEST_PROTOCOL))\n assert d.compute() == 3", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_pytest_test_persist.assert_y2_key_in_a_data_o": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_pytest_test_persist.assert_y2_key_in_a_data_o", "embedding": null, "metadata": {"file_path": "dask/tests/test_distributed.py", "file_name": "test_distributed.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 48, "span_ids": ["imports", "test_persist", "test_can_import_client"], "tokens": 270}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\n\ndistributed = pytest.importorskip(\"distributed\")\n\nimport asyncio\nfrom functools import partial\nfrom operator import add\n\nfrom tornado import gen\n\nimport dask\nfrom dask import persist, delayed, compute\nfrom dask.delayed import Delayed\nfrom dask.utils import tmpdir, get_named_args\nfrom distributed import futures_of\nfrom distributed.client import wait\nfrom distributed.utils_test import ( # noqa F401\n gen_cluster,\n inc,\n cluster,\n cluster_fixture,\n loop,\n client as c,\n)\n\n\nif \"should_check_state\" in get_named_args(gen_cluster):\n gen_cluster = partial(gen_cluster, should_check_state=False)\n cluster = partial(cluster, should_check_state=False)\n\n\ndef test_can_import_client():\n from dask.distributed import Client # noqa: F401\n\n\n@gen_cluster(client=True)\ndef test_persist(c, s, a, b):\n x = delayed(inc)(1)\n (x2,) = persist(x)\n\n yield wait(x2)\n assert x2.key in a.data or x2.key in b.data\n\n y = delayed(inc)(10)\n y2, one = persist(y, 1)\n\n yield wait(y2)\n assert y2.key in a.data or y2.key in b.data", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_persist_nested_test_persist_nested.assert_res_2_4_5_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_persist_nested_test_persist_nested.assert_res_2_4_5_", "embedding": null, "metadata": {"file_path": "dask/tests/test_distributed.py", "file_name": "test_distributed.py", "file_type": "text/x-python", "category": "test", "start_line": 51, "end_line": 67, "span_ids": ["test_persist_nested"], "tokens": 214}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_persist_nested(c):\n a = delayed(1) + 5\n b = a + 1\n c = a + 2\n result = persist({\"a\": a, \"b\": [1, 2, b]}, (c, 2), 4, [5])\n assert isinstance(result[0][\"a\"], Delayed)\n assert isinstance(result[0][\"b\"][2], Delayed)\n assert isinstance(result[1][0], Delayed)\n\n sol = ({\"a\": 6, \"b\": [1, 2, 7]}, (8, 2), 4, [5])\n assert compute(*result) == sol\n\n res = persist([a, b], c, 4, [5], traverse=False)\n assert res[0][0] is a\n assert res[0][1] is b\n assert res[1].compute() == 8\n assert res[2:] == (4, [5])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_futures_to_delayed_dataframe_test_futures_to_delayed_dataframe.with_pytest_raises_TypeEr.ddf.dd_from_delayed_1_2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_futures_to_delayed_dataframe_test_futures_to_delayed_dataframe.with_pytest_raises_TypeEr.ddf.dd_from_delayed_1_2_", "embedding": null, "metadata": {"file_path": "dask/tests/test_distributed.py", "file_name": "test_distributed.py", "file_type": "text/x-python", "category": "test", "start_line": 70, "end_line": 80, "span_ids": ["test_futures_to_delayed_dataframe"], "tokens": 113}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_futures_to_delayed_dataframe(c):\n pd = pytest.importorskip(\"pandas\")\n dd = pytest.importorskip(\"dask.dataframe\")\n df = pd.DataFrame({\"x\": [1, 2, 3]})\n\n futures = c.scatter([df, df])\n ddf = dd.from_delayed(futures)\n dd.utils.assert_eq(ddf.compute(), pd.concat([df, df], axis=0))\n\n with pytest.raises(TypeError):\n ddf = dd.from_delayed([1, 2])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_futures_to_delayed_bag_test_futures_to_delayed_array.assert_eq_A_compute_np": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_futures_to_delayed_bag_test_futures_to_delayed_array.assert_eq_A_compute_np", "embedding": null, "metadata": {"file_path": "dask/tests/test_distributed.py", "file_name": "test_distributed.py", "file_type": "text/x-python", "category": "test", "start_line": 83, "end_line": 103, "span_ids": ["test_futures_to_delayed_array", "test_futures_to_delayed_bag"], "tokens": 175}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_futures_to_delayed_bag(c):\n db = pytest.importorskip(\"dask.bag\")\n L = [1, 2, 3]\n\n futures = c.scatter([L, L])\n b = db.from_delayed(futures)\n assert list(b) == L + L\n\n\ndef test_futures_to_delayed_array(c):\n da = pytest.importorskip(\"dask.array\")\n from dask.array.utils import assert_eq\n\n np = pytest.importorskip(\"numpy\")\n x = np.arange(5)\n\n futures = c.scatter([x, x])\n A = da.concatenate(\n [da.from_delayed(f, shape=x.shape, dtype=x.dtype) for f in futures], axis=0\n )\n assert_eq(A.compute(), np.concatenate([x, x], axis=0))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_local_get_with_distributed_active_test_to_hdf_distributed.test_to_hdf_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_local_get_with_distributed_active_test_to_hdf_distributed.test_to_hdf_", "embedding": null, "metadata": {"file_path": "dask/tests/test_distributed.py", "file_name": "test_distributed.py", "file_type": "text/x-python", "category": "test", "start_line": 106, "end_line": 121, "span_ids": ["test_to_hdf_distributed", "test_local_get_with_distributed_active"], "tokens": 140}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@gen_cluster(client=True)\ndef test_local_get_with_distributed_active(c, s, a, b):\n with dask.config.set(scheduler=\"sync\"):\n x = delayed(inc)(1).persist()\n yield gen.sleep(0.01)\n assert not s.tasks # scheduler hasn't done anything\n\n x = delayed(inc)(2).persist(scheduler=\"sync\") # noqa F841\n yield gen.sleep(0.01)\n assert not s.tasks # scheduler hasn't done anything\n\n\ndef test_to_hdf_distributed(c):\n from ..dataframe.io.tests.test_hdf import test_to_hdf\n\n test_to_hdf()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_to_hdf_scheduler_distributed_test_to_hdf_scheduler_distributed.test_to_hdf_schedulers_No": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_to_hdf_scheduler_distributed_test_to_hdf_scheduler_distributed.test_to_hdf_schedulers_No", "embedding": null, "metadata": {"file_path": "dask/tests/test_distributed.py", "file_name": "test_distributed.py", "file_type": "text/x-python", "category": "test", "start_line": 124, "end_line": 141, "span_ids": ["test_to_hdf_scheduler_distributed"], "tokens": 122}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"npartitions\",\n [\n 1,\n pytest.param(\n 4,\n marks=pytest.mark.xfail(reason=\"HDF not multi-process safe\", strict=False),\n ),\n pytest.param(\n 10,\n marks=pytest.mark.xfail(reason=\"HDF not multi-process safe\", strict=False),\n ),\n ],\n)\ndef test_to_hdf_scheduler_distributed(npartitions, c):\n from ..dataframe.io.tests.test_hdf import test_to_hdf_schedulers\n\n test_to_hdf_schedulers(None, npartitions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_serializable_groupby_agg_test_serializable_groupby_agg.yield_c_compute_result_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_serializable_groupby_agg_test_serializable_groupby_agg.yield_c_compute_result_", "embedding": null, "metadata": {"file_path": "dask/tests/test_distributed.py", "file_name": "test_distributed.py", "file_type": "text/x-python", "category": "test", "start_line": 144, "end_line": 153, "span_ids": ["test_serializable_groupby_agg"], "tokens": 115}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@gen_cluster(client=True)\ndef test_serializable_groupby_agg(c, s, a, b):\n pd = pytest.importorskip(\"pandas\")\n dd = pytest.importorskip(\"dask.dataframe\")\n df = pd.DataFrame({\"x\": [1, 2, 3, 4], \"y\": [1, 0, 1, 0]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n result = ddf.groupby(\"y\").agg(\"count\")\n\n yield c.compute(result)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_futures_in_graph_test_futures_in_graph.assert_xxyy3_compute_sche": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_futures_in_graph_test_futures_in_graph.assert_xxyy3_compute_sche", "embedding": null, "metadata": {"file_path": "dask/tests/test_distributed.py", "file_name": "test_distributed.py", "file_type": "text/x-python", "category": "test", "start_line": 156, "end_line": 165, "span_ids": ["test_futures_in_graph"], "tokens": 114}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_futures_in_graph(c):\n x, y = delayed(1), delayed(2)\n xx = delayed(add)(x, x)\n yy = delayed(add)(y, y)\n xxyy = delayed(add)(xx, yy)\n\n xxyy2 = c.persist(xxyy)\n xxyy3 = delayed(add)(xxyy2, 10)\n\n assert xxyy3.compute(scheduler=\"dask.distributed\") == ((1 + 1) + (2 + 2)) + 10", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_zarr_distributed_roundtrip_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_zarr_distributed_roundtrip_", "embedding": null, "metadata": {"file_path": "dask/tests/test_distributed.py", "file_name": "test_distributed.py", "file_type": "text/x-python", "category": "test", "start_line": 168, "end_line": 216, "span_ids": ["test_scheduler_equals_client", "test_local_scheduler", "test_zarr_distributed_roundtrip", "test_zarr_in_memory_distributed_err", "test_await"], "tokens": 358}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_zarr_distributed_roundtrip():\n da = pytest.importorskip(\"dask.array\")\n pytest.importorskip(\"zarr\")\n assert_eq = da.utils.assert_eq\n\n with tmpdir() as d:\n a = da.zeros((3, 3), chunks=(1, 1))\n a.to_zarr(d)\n a2 = da.from_zarr(d)\n assert_eq(a, a2)\n assert a2.chunks == a.chunks\n\n\ndef test_zarr_in_memory_distributed_err(c):\n da = pytest.importorskip(\"dask.array\")\n zarr = pytest.importorskip(\"zarr\")\n\n c = (1, 1)\n a = da.ones((3, 3), chunks=c)\n z = zarr.zeros_like(a, chunks=c)\n\n with pytest.raises(RuntimeError):\n a.to_zarr(z)\n\n\ndef test_scheduler_equals_client(c):\n x = delayed(lambda: 1)()\n assert x.compute(scheduler=c) == 1\n assert c.run_on_scheduler(lambda dask_scheduler: dask_scheduler.story(x.key))\n\n\n@gen_cluster(client=True)\nasync def test_await(c, s, a, b):\n x = dask.delayed(inc)(1)\n x = await x.persist()\n assert x.key in s.tasks\n assert a.data or b.data\n assert all(f.done() for f in futures_of(x))\n\n\ndef test_local_scheduler():\n async def f():\n x = dask.delayed(inc)(1)\n y = x + 1\n z = await y.persist()\n assert len(z.dask) == 1\n\n asyncio.get_event_loop().run_until_complete(f())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_dot.py_os_test_task_label.assert_task_label_add_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_dot.py_os_test_task_label.assert_task_label_add_", "embedding": null, "metadata": {"file_path": "dask/tests/test_dot.py", "file_name": "test_dot.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 63, "span_ids": ["test_task_label", "impl:21", "imports", "get_label", "get_shape"], "tokens": 378}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import os\nfrom functools import partial\nimport re\nfrom operator import add, neg\nimport sys\nimport copy\nimport pytest\n\n\nif sys.flags.optimize != 2:\n pytest.importorskip(\"graphviz\")\n from dask.dot import dot_graph, task_label, label, to_graphviz\nelse:\n pytestmark = pytest.mark.skipif(\n True, reason=\"graphviz exception with Python -OO flag\"\n )\n\nfrom dask import delayed\nfrom dask.utils import ensure_not_exists\n\ntry:\n from IPython.display import Image, SVG\nexcept ImportError:\n ipython_not_installed = True\n Image = None\n SVG = None\nelse:\n ipython_not_installed = False\nipython_not_installed_mark = pytest.mark.skipif(\n ipython_not_installed, reason=\"IPython not installed\"\n)\n\n\n# Since graphviz doesn't store a graph, we need to parse the output\nlabel_re = re.compile(r\".*\\[label=(.*?) shape=(.*?)\\]\")\n\n\ndef get_label(line):\n m = label_re.match(line)\n if m:\n return m.group(1)\n\n\ndef get_shape(line):\n m = label_re.match(line)\n if m:\n return m.group(2)\n\n\ndsk = {\n \"a\": 1,\n \"b\": 2,\n \"c\": (neg, \"a\"),\n \"d\": (neg, \"b\"),\n \"e\": (add, \"c\", \"d\"),\n \"f\": (sum, [\"a\", \"e\"]),\n}\n\n\ndef test_task_label():\n assert task_label((partial(add, 1), 1)) == \"add\"\n assert task_label((add, 1)) == \"add\"\n assert task_label((add, (add, 1, 2))) == \"add(...)\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_dot.py_test_label_test_label.None_10": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_dot.py_test_label_test_label.None_10", "embedding": null, "metadata": {"file_path": "dask/tests/test_dot.py", "file_name": "test_dot.py", "file_type": "text/x-python", "category": "test", "start_line": 66, "end_line": 87, "span_ids": ["test_label"], "tokens": 280}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_label():\n assert label(\"x\") == \"x\"\n assert label(\"elemwise-ffcd9aa2231d466b5aa91e8bfa9e9487\") == \"elemwise-#\"\n\n cache = {}\n result = label(\"elemwise-ffcd9aa2231d466b5aa91e8bfa9e9487\", cache=cache)\n assert result == \"elemwise-#0\"\n # cached\n result = label(\"elemwise-ffcd9aa2231d466b5aa91e8bfa9e9487\", cache=cache)\n assert result == \"elemwise-#0\"\n assert len(cache) == 1\n\n result = label(\"elemwise-e890b510984f344edea9a5e5fe05c0db\", cache=cache)\n assert result == \"elemwise-#1\"\n assert len(cache) == 2\n\n result = label(\"elemwise-ffcd9aa2231d466b5aa91e8bfa9e9487\", cache=cache)\n assert result == \"elemwise-#0\"\n assert len(cache) == 2\n\n assert label(\"x\", cache=cache) == \"x\"\n assert len(cache) == 2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_dot.py_test_to_graphviz_test_to_graphviz_custom.assert_set_shapes_set": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_dot.py_test_to_graphviz_test_to_graphviz_custom.assert_set_shapes_set", "embedding": null, "metadata": {"file_path": "dask/tests/test_dot.py", "file_name": "test_dot.py", "file_type": "text/x-python", "category": "test", "start_line": 90, "end_line": 108, "span_ids": ["test_to_graphviz", "test_to_graphviz_custom"], "tokens": 213}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_graphviz():\n g = to_graphviz(dsk)\n labels = list(filter(None, map(get_label, g.body)))\n assert len(labels) == 10 # 10 nodes total\n assert set(labels) == {\"c\", \"d\", \"e\", \"f\", '\"\"'}\n shapes = list(filter(None, map(get_shape, g.body)))\n assert set(shapes) == set((\"box\", \"circle\"))\n\n\ndef test_to_graphviz_custom():\n g = to_graphviz(\n dsk,\n data_attributes={\"a\": {\"shape\": \"square\"}},\n function_attributes={\"c\": {\"label\": \"neg_c\", \"shape\": \"ellipse\"}},\n )\n labels = set(filter(None, map(get_label, g.body)))\n assert labels == {\"neg_c\", \"d\", \"e\", \"f\", '\"\"'}\n shapes = list(filter(None, map(get_shape, g.body)))\n assert set(shapes) == set((\"box\", \"circle\", \"square\", \"ellipse\"))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_dot.py_test_to_graphviz_attributes_test_to_graphviz_collapse_outputs_and_verbose.assert_set_shapes_set": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_dot.py_test_to_graphviz_attributes_test_to_graphviz_collapse_outputs_and_verbose.assert_set_shapes_set", "embedding": null, "metadata": {"file_path": "dask/tests/test_dot.py", "file_name": "test_dot.py", "file_type": "text/x-python", "category": "test", "start_line": 111, "end_line": 149, "span_ids": ["test_to_graphviz_attributes", "test_aliases", "test_to_graphviz_collapse_outputs_and_verbose", "test_to_graphviz_collapse_outputs", "test_to_graphviz_verbose"], "tokens": 473}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_graphviz_attributes():\n assert to_graphviz(dsk).graph_attr[\"rankdir\"] == \"BT\"\n assert to_graphviz(dsk, rankdir=\"LR\").graph_attr[\"rankdir\"] == \"LR\"\n assert to_graphviz(dsk, node_attr={\"color\": \"white\"}).node_attr[\"color\"] == \"white\"\n assert to_graphviz(dsk, edge_attr={\"color\": \"white\"}).edge_attr[\"color\"] == \"white\"\n\n\ndef test_aliases():\n g = to_graphviz({\"x\": 1, \"y\": \"x\"})\n labels = list(filter(None, map(get_label, g.body)))\n assert len(labels) == 2\n assert len(g.body) - len(labels) == 1 # Single edge\n\n\ndef test_to_graphviz_verbose():\n g = to_graphviz(dsk, verbose=True)\n labels = list(filter(None, map(get_label, g.body)))\n assert len(labels) == 10 # 10 nodes total\n assert set(labels) == {\"a\", \"b\", \"c\", \"d\", \"e\", \"f\"}\n shapes = list(filter(None, map(get_shape, g.body)))\n assert set(shapes) == set((\"box\", \"circle\"))\n\n\ndef test_to_graphviz_collapse_outputs():\n g = to_graphviz(dsk, collapse_outputs=True)\n labels = list(filter(None, map(get_label, g.body)))\n assert len(labels) == 6 # 6 nodes total\n assert set(labels) == {\"c\", \"d\", \"e\", \"f\", '\"\"'}\n shapes = list(filter(None, map(get_shape, g.body)))\n assert set(shapes) == set((\"box\", \"circle\"))\n\n\ndef test_to_graphviz_collapse_outputs_and_verbose():\n g = to_graphviz(dsk, collapse_outputs=True, verbose=True)\n labels = list(filter(None, map(get_label, g.body)))\n assert len(labels) == 6 # 6 nodes total\n assert set(labels) == {\"a\", \"b\", \"c\", \"d\", \"e\", \"f\"}\n shapes = list(filter(None, map(get_shape, g.body)))\n assert set(shapes) == set((\"box\", \"circle\"))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_dot.py_test_to_graphviz_with_unconnected_node_test_to_graphviz_with_unconnected_node.None_4": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_dot.py_test_to_graphviz_with_unconnected_node_test_to_graphviz_with_unconnected_node.None_4", "embedding": null, "metadata": {"file_path": "dask/tests/test_dot.py", "file_name": "test_dot.py", "file_type": "text/x-python", "category": "test", "start_line": 152, "end_line": 162, "span_ids": ["test_to_graphviz_with_unconnected_node"], "tokens": 157}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_graphviz_with_unconnected_node():\n dsk[\"g\"] = 3\n g = to_graphviz(dsk, verbose=True)\n labels = list(filter(None, map(get_label, g.body)))\n assert len(labels) == 11 # 11 nodes total\n assert set(labels) == {\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\"}\n\n g = to_graphviz(dsk, verbose=True, collapse_outputs=True)\n labels = list(filter(None, map(get_label, g.body)))\n assert len(labels) == 6 # 6 nodes total\n assert set(labels) == {\"a\", \"b\", \"c\", \"d\", \"e\", \"f\"}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_dot.py_test_dot_graph_test_dot_graph.try_.finally_.ensure_not_exists_target_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_dot.py_test_dot_graph_test_dot_graph.try_.finally_.ensure_not_exists_target_", "embedding": null, "metadata": {"file_path": "dask/tests/test_dot.py", "file_name": "test_dot.py", "file_type": "text/x-python", "category": "test", "start_line": 165, "end_line": 195, "span_ids": ["test_dot_graph"], "tokens": 226}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"format,typ\",\n [\n pytest.param(\"png\", Image, marks=ipython_not_installed_mark),\n pytest.param(\n \"jpeg\",\n Image,\n marks=pytest.mark.xfail(\n reason=\"jpeg not always supported in dot\", strict=False\n ),\n ),\n (\"dot\", type(None)),\n (\"pdf\", type(None)),\n pytest.param(\"svg\", SVG, marks=ipython_not_installed_mark),\n ],\n)\ndef test_dot_graph(tmpdir, format, typ):\n # Use a name that the shell would interpret specially to ensure that we're\n # not vulnerable to shell injection when interacting with `dot`.\n filename = str(tmpdir.join(\"$(touch should_not_get_created.txt)\"))\n\n target = \".\".join([filename, format])\n ensure_not_exists(target)\n try:\n result = dot_graph(dsk, filename=filename, format=format)\n\n assert not os.path.exists(\"should_not_get_created.txt\")\n assert os.path.isfile(target)\n assert isinstance(result, typ)\n finally:\n ensure_not_exists(target)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_dot.py_test_dot_graph_no_filename_test_dot_graph_defaults.try_.finally_.ensure_not_exists_target_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_dot.py_test_dot_graph_no_filename_test_dot_graph_defaults.try_.finally_.ensure_not_exists_target_", "embedding": null, "metadata": {"file_path": "dask/tests/test_dot.py", "file_name": "test_dot.py", "file_type": "text/x-python", "category": "test", "start_line": 198, "end_line": 236, "span_ids": ["test_dot_graph_no_filename", "test_dot_graph_defaults"], "tokens": 253}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"format,typ\",\n [\n pytest.param(\"png\", Image, marks=ipython_not_installed_mark),\n pytest.param(\n \"jpeg\",\n Image,\n marks=pytest.mark.xfail(\n reason=\"jpeg not always supported in dot\", strict=False\n ),\n ),\n (\"dot\", type(None)),\n (\"pdf\", type(None)),\n pytest.param(\"svg\", SVG, marks=ipython_not_installed_mark),\n ],\n)\ndef test_dot_graph_no_filename(tmpdir, format, typ):\n before = tmpdir.listdir()\n result = dot_graph(dsk, filename=None, format=format)\n # We shouldn't write any files if filename is None.\n after = tmpdir.listdir()\n assert before == after\n assert isinstance(result, typ)\n\n\n@ipython_not_installed_mark\ndef test_dot_graph_defaults():\n # Test with default args.\n default_name = \"mydask\"\n default_format = \"png\"\n target = \".\".join([default_name, default_format])\n\n ensure_not_exists(target)\n try:\n result = dot_graph(dsk)\n assert os.path.isfile(target)\n assert isinstance(result, Image)\n finally:\n ensure_not_exists(target)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_dot.py_test_filenames_and_formats_test_filenames_and_formats.assert_isinstance_result_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_dot.py_test_filenames_and_formats_test_filenames_and_formats.assert_isinstance_result_", "embedding": null, "metadata": {"file_path": "dask/tests/test_dot.py", "file_name": "test_dot.py", "file_type": "text/x-python", "category": "test", "start_line": 239, "end_line": 264, "span_ids": ["test_filenames_and_formats"], "tokens": 227}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"filename,format,target,expected_result_type\",\n [\n pytest.param(\n \"mydaskpdf\", \"svg\", \"mydaskpdf.svg\", SVG, marks=ipython_not_installed_mark\n ),\n (\"mydask.pdf\", None, \"mydask.pdf\", type(None)),\n pytest.param(\n \"mydask.pdf\", \"svg\", \"mydask.pdf.svg\", SVG, marks=ipython_not_installed_mark\n ),\n pytest.param(\n \"mydaskpdf\", None, \"mydaskpdf.png\", Image, marks=ipython_not_installed_mark\n ),\n pytest.param(\n \"mydask.pdf.svg\",\n None,\n \"mydask.pdf.svg\",\n SVG,\n marks=ipython_not_installed_mark,\n ),\n ],\n)\ndef test_filenames_and_formats(tmpdir, filename, format, target, expected_result_type):\n result = dot_graph(dsk, filename=str(tmpdir.join(filename)), format=format)\n assert tmpdir.join(target).exists()\n assert isinstance(result, expected_result_type)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_dot.py_test_delayed_kwargs_apply_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_dot.py_test_delayed_kwargs_apply_", "embedding": null, "metadata": {"file_path": "dask/tests/test_dot.py", "file_name": "test_dot.py", "file_type": "text/x-python", "category": "test", "start_line": 267, "end_line": 295, "span_ids": ["test_delayed_kwargs_apply", "test_immutable_attributes"], "tokens": 193}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_delayed_kwargs_apply():\n def f(x, y=True):\n return x + y\n\n x = delayed(f)(1, y=2)\n label = task_label(x.dask[x.key])\n assert \"f\" in label\n assert \"apply\" not in label\n\n\ndef test_immutable_attributes():\n def inc(x):\n return x + 1\n\n dsk = {\"a\": (inc, 1), \"b\": (inc, 2), \"c\": (add, \"a\", \"b\")}\n attrs_func = {\"a\": {}}\n attrs_data = {\"b\": {}}\n attrs_func_test = copy.deepcopy(attrs_func)\n attrs_data_test = copy.deepcopy(attrs_data)\n\n to_graphviz(\n dsk,\n function_attributes=attrs_func,\n data_attributes=attrs_data,\n )\n\n assert attrs_func_test == attrs_func\n assert attrs_data_test == attrs_data", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_hashing.py_pytest_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_hashing.py_pytest_", "embedding": null, "metadata": {"file_path": "dask/tests/test_hashing.py", "file_name": "test_hashing.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 44, "span_ids": ["test_hash_buffer", "imports", "test_hash_buffer_hex", "test_hashers"], "tokens": 303}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\n\nfrom dask.hashing import hashers, hash_buffer, hash_buffer_hex\n\n\nnp = pytest.importorskip(\"numpy\")\n\nbuffers = [\n b\"abc\",\n bytearray(b\"123\"),\n memoryview(b\"456\"),\n np.array(42),\n np.ones((100, 100)),\n np.zeros((100, 100), dtype=[(\"a\", \"i4\"), (\"b\", \"i2\")]),\n np.ones(10000, dtype=np.int8)[1:], # unaligned\n]\n\n\n@pytest.mark.parametrize(\"x\", buffers)\ndef test_hash_buffer(x):\n for hasher in [None] + hashers:\n h = hash_buffer(x, hasher=hasher)\n assert isinstance(h, bytes)\n assert 8 <= len(h) < 32\n assert h == hash_buffer(x, hasher=hasher)\n\n\n@pytest.mark.parametrize(\"x\", buffers)\ndef test_hash_buffer_hex(x):\n for hasher in [None] + hashers:\n h = hash_buffer_hex(x, hasher=hasher)\n assert isinstance(h, str)\n assert 16 <= len(h) < 64\n assert h == hash_buffer_hex(x, hasher=hasher)\n\n\n@pytest.mark.parametrize(\"hasher\", hashers)\ndef test_hashers(hasher):\n # Sanity check\n x = b\"x\"\n h = hasher(x)\n assert isinstance(h, bytes)\n assert 8 <= len(h) < 32", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_local.py_dask_test_start_state.assert_result_expected": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_local.py_dask_test_start_state.assert_result_expected", "embedding": null, "metadata": {"file_path": "dask/tests/test_local.py", "file_name": "test_local.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 31, "span_ids": ["test_start_state", "imports"], "tokens": 319}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import dask\n\nfrom dask.local import start_state_from_dask, get_sync, finish_task, sortkey\nfrom dask.order import order\nfrom dask.utils_test import GetFunctionTestMixin, inc, add\n\n\nfib_dask = {\"f0\": 0, \"f1\": 1, \"f2\": 1, \"f3\": 2, \"f4\": 3, \"f5\": 5, \"f6\": 8}\n\n\ndef test_start_state():\n dsk = {\"x\": 1, \"y\": 2, \"z\": (inc, \"x\"), \"w\": (add, \"z\", \"y\")}\n result = start_state_from_dask(dsk)\n\n expected = {\n \"cache\": {\"x\": 1, \"y\": 2},\n \"dependencies\": {\n \"w\": set([\"y\", \"z\"]),\n \"x\": set([]),\n \"y\": set([]),\n \"z\": set([\"x\"]),\n },\n \"dependents\": {\"w\": set([]), \"x\": set([\"z\"]), \"y\": set([\"w\"]), \"z\": set([\"w\"])},\n \"finished\": set([]),\n \"released\": set([]),\n \"running\": set([]),\n \"ready\": [\"z\"],\n \"waiting\": {\"w\": set([\"z\"])},\n \"waiting_data\": {\"x\": set([\"z\"]), \"y\": set([\"w\"]), \"z\": set([\"w\"])},\n }\n assert result == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_local.py_test_start_state_looks_at_cache_test_start_state_with_independent_but_runnable_tasks.assert_start_state_from_d": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_local.py_test_start_state_looks_at_cache_test_start_state_with_independent_but_runnable_tasks.assert_start_state_from_d", "embedding": null, "metadata": {"file_path": "dask/tests/test_local.py", "file_name": "test_local.py", "file_type": "text/x-python", "category": "test", "start_line": 34, "end_line": 49, "span_ids": ["test_start_state_looks_at_cache", "test_start_state_with_independent_but_runnable_tasks", "test_start_state_with_redirects"], "tokens": 160}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_start_state_looks_at_cache():\n dsk = {\"b\": (inc, \"a\")}\n cache = {\"a\": 1}\n result = start_state_from_dask(dsk, cache)\n assert result[\"dependencies\"][\"b\"] == set([\"a\"])\n assert result[\"ready\"] == [\"b\"]\n\n\ndef test_start_state_with_redirects():\n dsk = {\"x\": 1, \"y\": \"x\", \"z\": (inc, \"y\")}\n result = start_state_from_dask(dsk)\n assert result[\"cache\"] == {\"x\": 1}\n\n\ndef test_start_state_with_independent_but_runnable_tasks():\n assert start_state_from_dask({\"x\": (inc, 1)})[\"ready\"] == [\"x\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_local.py_test_start_state_with_tasks_no_deps_test_start_state_with_tasks_no_deps.assert_state_dependents_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_local.py_test_start_state_with_tasks_no_deps_test_start_state_with_tasks_no_deps.assert_state_dependents_", "embedding": null, "metadata": {"file_path": "dask/tests/test_local.py", "file_name": "test_local.py", "file_type": "text/x-python", "category": "test", "start_line": 52, "end_line": 59, "span_ids": ["test_start_state_with_tasks_no_deps"], "tokens": 125}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_start_state_with_tasks_no_deps():\n dsk = {\"a\": [1, (inc, 2)], \"b\": [1, 2, 3, 4], \"c\": (inc, 3)}\n state = start_state_from_dask(dsk)\n assert list(state[\"cache\"].keys()) == [\"b\"]\n assert \"a\" in state[\"ready\"] and \"c\" in state[\"ready\"]\n deps = dict((k, set()) for k in \"abc\")\n assert state[\"dependencies\"] == deps\n assert state[\"dependents\"] == deps", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_local.py_test_finish_task_test_finish_task.assert_state_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_local.py_test_finish_task_test_finish_task.assert_state_", "embedding": null, "metadata": {"file_path": "dask/tests/test_local.py", "file_name": "test_local.py", "file_type": "text/x-python", "category": "test", "start_line": 62, "end_line": 89, "span_ids": ["test_finish_task"], "tokens": 276}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_finish_task():\n dsk = {\"x\": 1, \"y\": 2, \"z\": (inc, \"x\"), \"w\": (add, \"z\", \"y\")}\n sortkey = order(dsk).get\n state = start_state_from_dask(dsk)\n state[\"ready\"].remove(\"z\")\n state[\"running\"] = set([\"z\", \"other-task\"])\n task = \"z\"\n result = 2\n\n state[\"cache\"][\"z\"] = result\n finish_task(dsk, task, state, set(), sortkey)\n\n assert state == {\n \"cache\": {\"y\": 2, \"z\": 2},\n \"dependencies\": {\n \"w\": set([\"y\", \"z\"]),\n \"x\": set([]),\n \"y\": set([]),\n \"z\": set([\"x\"]),\n },\n \"finished\": set([\"z\"]),\n \"released\": set([\"x\"]),\n \"running\": set([\"other-task\"]),\n \"dependents\": {\"w\": set([]), \"x\": set([\"z\"]), \"y\": set([\"w\"]), \"z\": set([\"w\"])},\n \"ready\": [\"w\"],\n \"waiting\": {},\n \"waiting_data\": {\"y\": set([\"w\"]), \"z\": set([\"w\"])},\n }", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_local.py_TestGetAsync_test_sort_key.assert_sorted_L_key_sort": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_local.py_TestGetAsync_test_sort_key.assert_sorted_L_key_sort", "embedding": null, "metadata": {"file_path": "dask/tests/test_local.py", "file_name": "test_local.py", "file_type": "text/x-python", "category": "test", "start_line": 92, "end_line": 116, "span_ids": ["TestGetAsync.test_get_sync_num_workers", "test_sort_key", "test_cache_options", "TestGetAsync"], "tokens": 192}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestGetAsync(GetFunctionTestMixin):\n get = staticmethod(get_sync)\n\n def test_get_sync_num_workers(self):\n self.get({\"x\": (inc, \"y\"), \"y\": 1}, \"x\", num_workers=2)\n\n\ndef test_cache_options():\n try:\n from chest import Chest\n except ImportError:\n return\n cache = Chest()\n\n def inc2(x):\n assert \"y\" in cache\n return x + 1\n\n with dask.config.set(cache=cache):\n get_sync({\"x\": (inc2, \"y\"), \"y\": 1}, \"x\")\n\n\ndef test_sort_key():\n L = [\"x\", (\"x\", 1), (\"z\", 0), (\"x\", 0)]\n assert sorted(L, key=sortkey) == [\"x\", (\"x\", 0), (\"x\", 1), (\"z\", 0)]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_local.py_test_callback_test_callback.get_dsk_a_start_callb": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_local.py_test_callback_test_callback.get_dsk_a_start_callb", "embedding": null, "metadata": {"file_path": "dask/tests/test_local.py", "file_name": "test_local.py", "file_type": "text/x-python", "category": "test", "start_line": 119, "end_line": 135, "span_ids": ["test_callback"], "tokens": 143}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_callback():\n f = lambda x: x + 1\n dsk = {\"a\": (f, 1)}\n from dask.threaded import get\n\n def start_callback(key, d, state):\n assert key == \"a\" or key is None\n assert d == dsk\n assert isinstance(state, dict)\n\n def end_callback(key, value, d, state, worker_id):\n assert key == \"a\" or key is None\n assert value == 2 or value is None\n assert d == dsk\n assert isinstance(state, dict)\n\n get(dsk, \"a\", start_callback=start_callback, end_callback=end_callback)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_local.py_test_exceptions_propagate_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_local.py_test_exceptions_propagate_", "embedding": null, "metadata": {"file_path": "dask/tests/test_local.py", "file_name": "test_local.py", "file_type": "text/x-python", "category": "test", "start_line": 138, "end_line": 175, "span_ids": ["test_exceptions_propagate", "test_ordering"], "tokens": 215}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_exceptions_propagate():\n class MyException(Exception):\n def __init__(self, a, b):\n self.a = a\n self.b = b\n\n def __str__(self):\n return \"My Exception!\"\n\n def f():\n raise MyException(1, 2)\n\n from dask.threaded import get\n\n try:\n get({\"x\": (f,)}, \"x\")\n assert False\n except MyException as e:\n assert \"My Exception!\" in str(e)\n assert \"a\" in dir(e)\n assert e.a == 1\n assert e.b == 2\n\n\ndef test_ordering():\n L = []\n\n def append(i):\n L.append(i)\n\n dsk = {(\"x\", i): (append, i) for i in range(10)}\n x_keys = sorted(dsk)\n dsk[\"y\"] = (lambda *args: None, list(x_keys))\n\n get_sync(dsk, \"y\")\n\n assert L == sorted(L)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_multiprocessing.py_from_distutils_version_im_test_pickle_locals.assert_b_unrelated_functi": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_multiprocessing.py_from_distutils_version_im_test_pickle_locals.assert_b_unrelated_functi", "embedding": null, "metadata": {"file_path": "dask/tests/test_multiprocessing.py", "file_name": "test_multiprocessing.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 59, "span_ids": ["unrelated_function_global", "test_pickle_globals", "imports", "my_small_function_global", "test_pickle_locals"], "tokens": 339}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from distutils.version import LooseVersion\nimport sys\nimport multiprocessing\nfrom operator import add\nimport pickle\nimport random\n\nimport numpy as np\n\nimport pytest\nimport dask\nfrom dask import compute, delayed\nfrom dask.multiprocessing import get, _dumps, _loads, get_context, remote_exception\nfrom dask.utils_test import inc\n\n\ntry:\n import cloudpickle # noqa: F401\n\n has_cloudpickle = True\nexcept ImportError:\n has_cloudpickle = False\n\nrequires_cloudpickle = pytest.mark.skipif(\n not has_cloudpickle, reason=\"requires cloudpickle\"\n)\nnot_cloudpickle = pytest.mark.skipif(has_cloudpickle, reason=\"cloudpickle is installed\")\n\n\ndef unrelated_function_global(a):\n return np.array([a])\n\n\ndef my_small_function_global(a, b):\n return a + b\n\n\ndef test_pickle_globals():\n \"\"\" Unrelated globals should not be included in serialized bytes \"\"\"\n b = _dumps(my_small_function_global)\n assert b\"my_small_function_global\" in b\n assert b\"unrelated_function_global\" not in b\n assert b\"numpy\" not in b\n\n\n@requires_cloudpickle\ndef test_pickle_locals():\n \"\"\"Unrelated locals should not be included in serialized bytes\"\"\"\n\n def unrelated_function_local(a):\n return np.array([a])\n\n def my_small_function_local(a, b):\n return a + b\n\n b = _dumps(my_small_function_local)\n assert b\"my_small_function_global\" not in b\n assert b\"my_small_function_local\" in b\n assert b\"unrelated_function_local\" not in b", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_multiprocessing.py_test_pickle_kwargs_test_pickle_kwargs.assert_my_small_function_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_multiprocessing.py_test_pickle_kwargs_test_pickle_kwargs.assert_my_small_function_", "embedding": null, "metadata": {"file_path": "dask/tests/test_multiprocessing.py", "file_name": "test_multiprocessing.py", "file_type": "text/x-python", "category": "test", "start_line": 62, "end_line": 75, "span_ids": ["test_pickle_kwargs"], "tokens": 131}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@not_cloudpickle\ndef test_pickle_kwargs():\n \"\"\"Test that out-of-band pickling works\n\n Note cloudpickle does not support this argument:\n\n https://github.com/cloudpipe/cloudpickle/issues/213\n \"\"\"\n b = _dumps(my_small_function_global, fix_imports=True)\n assert b\"my_small_function_global\" in b\n assert b\"unrelated_function_global\" not in b\n assert b\"numpy\" not in b\n my_small_function_global_2 = _loads(b, fix_imports=True)\n assert my_small_function_global_2(2, 3) == 5", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_multiprocessing.py_test_out_of_band_pickling_test_out_of_band_pickling.assert_np_all_a_a2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_multiprocessing.py_test_out_of_band_pickling_test_out_of_band_pickling.assert_np_all_a_a2_", "embedding": null, "metadata": {"file_path": "dask/tests/test_multiprocessing.py", "file_name": "test_multiprocessing.py", "file_type": "text/x-python", "category": "test", "start_line": 78, "end_line": 94, "span_ids": ["test_out_of_band_pickling"], "tokens": 164}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5, reason=\"requires pickle protocol 5\")\ndef test_out_of_band_pickling():\n \"\"\"Test that out-of-band pickling works\"\"\"\n if has_cloudpickle:\n if cloudpickle.__version__ < LooseVersion(\"1.3.0\"):\n pytest.skip(\"when using cloudpickle, it must be version 1.3.0+\")\n\n a = np.arange(5)\n\n l = []\n b = _dumps(a, buffer_callback=l.append)\n assert len(l) == 1\n assert isinstance(l[0], pickle.PickleBuffer)\n assert memoryview(l[0]) == memoryview(a)\n\n a2 = _loads(b, buffers=l)\n assert np.all(a == a2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_multiprocessing.py_bad_test_optimize_graph_false.assert_len_keys_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_multiprocessing.py_bad_test_optimize_graph_false.assert_len_keys_2", "embedding": null, "metadata": {"file_path": "dask/tests/test_multiprocessing.py", "file_name": "test_multiprocessing.py", "file_type": "text/x-python", "category": "test", "start_line": 97, "end_line": 200, "span_ids": ["test_lambda_with_cloudpickle", "lambda_result", "test_dumps_loads", "test_remote_exception", "test_reuse_pool", "NotUnpickleable.__getstate__", "test_fuse_doesnt_clobber_intermediates", "test_lambda_results_without_cloudpickle", "NotUnpickleable", "test_unpicklable_args_generate_errors", "bad", "test_errors_propagate", "test_lambda_results_with_cloudpickle", "NotUnpickleable.__setstate__", "test_optimize_graph_false", "test_lambda_without_cloudpickle"], "tokens": 741}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def bad():\n raise ValueError(\"12345\")\n\n\ndef test_errors_propagate():\n dsk = {\"x\": (bad,)}\n\n with pytest.raises(ValueError) as e:\n get(dsk, \"x\")\n assert \"12345\" in str(e.value)\n\n\ndef test_remote_exception():\n e = TypeError(\"hello\")\n a = remote_exception(e, \"traceback-body\")\n b = remote_exception(e, \"traceback-body\")\n\n assert type(a) == type(b)\n assert isinstance(a, TypeError)\n assert \"hello\" in str(a)\n assert \"Traceback\" in str(a)\n assert \"traceback-body\" in str(a)\n\n\n@requires_cloudpickle\ndef test_lambda_with_cloudpickle():\n dsk = {\"x\": 2, \"y\": (lambda x: x + 1, \"x\")}\n assert get(dsk, \"y\") == 3\n\n\n@not_cloudpickle\ndef test_lambda_without_cloudpickle():\n dsk = {\"x\": 2, \"y\": (lambda x: x + 1, \"x\")}\n with pytest.raises(ModuleNotFoundError) as e:\n get(dsk, \"y\")\n assert \"cloudpickle\" in str(e.value)\n\n\ndef lambda_result():\n return lambda x: x + 1\n\n\n@requires_cloudpickle\ndef test_lambda_results_with_cloudpickle():\n dsk = {\"x\": (lambda_result,)}\n f = get(dsk, \"x\")\n assert f(2) == 3\n\n\n@not_cloudpickle\ndef test_lambda_results_without_cloudpickle():\n dsk = {\"x\": (lambda_result,)}\n with pytest.raises(ModuleNotFoundError) as e:\n get(dsk, \"x\")\n assert \"cloudpickle\" in str(e.value)\n\n\nclass NotUnpickleable(object):\n def __getstate__(self):\n return ()\n\n def __setstate__(self, state):\n raise ValueError(\"Can't unpickle me\")\n\n\ndef test_unpicklable_args_generate_errors():\n a = NotUnpickleable()\n\n dsk = {\"x\": (bool, a)}\n\n with pytest.raises(ValueError):\n get(dsk, \"x\")\n\n dsk = {\"x\": (bool, \"a\"), \"a\": a}\n\n with pytest.raises(ValueError):\n get(dsk, \"x\")\n\n\ndef test_reuse_pool():\n with multiprocessing.Pool() as pool:\n with dask.config.set(pool=pool):\n assert get({\"x\": (inc, 1)}, \"x\") == 2\n assert get({\"x\": (inc, 1)}, \"x\") == 2\n\n\ndef test_dumps_loads():\n with dask.config.set(func_dumps=pickle.dumps, func_loads=pickle.loads):\n assert get({\"x\": 1, \"y\": (add, \"x\", 2)}, \"y\") == 3\n\n\ndef test_fuse_doesnt_clobber_intermediates():\n d = {\"x\": 1, \"y\": (inc, \"x\"), \"z\": (add, 10, \"y\")}\n assert get(d, [\"y\", \"z\"]) == (2, 12)\n\n\ndef test_optimize_graph_false():\n from dask.callbacks import Callback\n\n d = {\"x\": 1, \"y\": (inc, \"x\"), \"z\": (add, 10, \"y\")}\n keys = []\n with Callback(pretask=lambda key, *args: keys.append(key)):\n get(d, \"z\", optimize_graph=False)\n assert len(keys) == 2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_multiprocessing.py_test_random_seeds_check_for_pytest.return._FAKE_MODULE_FOR_TEST_in": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_multiprocessing.py_test_random_seeds_check_for_pytest.return._FAKE_MODULE_FOR_TEST_in", "embedding": null, "metadata": {"file_path": "dask/tests/test_multiprocessing.py", "file_name": "test_multiprocessing.py", "file_type": "text/x-python", "category": "test", "start_line": 203, "end_line": 223, "span_ids": ["check_for_pytest", "test_random_seeds"], "tokens": 142}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@requires_cloudpickle\n@pytest.mark.parametrize(\"random\", [np.random, random])\ndef test_random_seeds(random):\n @delayed(pure=False)\n def f():\n return tuple(random.randint(0, 10000) for i in range(5))\n\n N = 10\n with dask.config.set(scheduler=\"processes\"):\n (results,) = compute([f() for _ in range(N)])\n\n assert len(set(results)) == N\n\n\ndef check_for_pytest():\n \"\"\"We check for spawn by ensuring subprocess doesn't have modules only\n parent process should have:\n \"\"\"\n import sys\n\n return \"FAKE_MODULE_FOR_TEST\" in sys.modules", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_multiprocessing.py_test_custom_context_used_python3_posix_test_custom_context_used_python3_posix.try_.finally_.del_sys_modules_FAKE_MOD": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_multiprocessing.py_test_custom_context_used_python3_posix_test_custom_context_used_python3_posix.try_.finally_.del_sys_modules_FAKE_MOD", "embedding": null, "metadata": {"file_path": "dask/tests/test_multiprocessing.py", "file_name": "test_multiprocessing.py", "file_type": "text/x-python", "category": "test", "start_line": 226, "end_line": 251, "span_ids": ["test_custom_context_used_python3_posix"], "tokens": 194}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n sys.platform == \"win32\", reason=\"Windows doesn't support different contexts\"\n)\ndef test_custom_context_used_python3_posix():\n \"\"\"The 'multiprocessing.context' config is used to create the pool.\n\n We assume default is 'spawn', and therefore test for 'fork'.\n \"\"\"\n pytest.importorskip(\"cloudpickle\")\n # We check for 'fork' by ensuring subprocess doesn't have modules only\n # parent process should have:\n\n def check_for_pytest():\n import sys\n\n return \"FAKE_MODULE_FOR_TEST\" in sys.modules\n\n import sys\n\n sys.modules[\"FAKE_MODULE_FOR_TEST\"] = 1\n try:\n with dask.config.set({\"multiprocessing.context\": \"fork\"}):\n result = get({\"x\": (check_for_pytest,)}, \"x\")\n assert result\n finally:\n del sys.modules[\"FAKE_MODULE_FOR_TEST\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_multiprocessing.py_test_get_context_using_python3_posix_test_get_context_using_python3_posix.None_1.assert_get_context_is_m": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_multiprocessing.py_test_get_context_using_python3_posix_test_get_context_using_python3_posix.None_1.assert_get_context_is_m", "embedding": null, "metadata": {"file_path": "dask/tests/test_multiprocessing.py", "file_name": "test_multiprocessing.py", "file_type": "text/x-python", "category": "test", "start_line": 254, "end_line": 266, "span_ids": ["test_get_context_using_python3_posix"], "tokens": 125}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n sys.platform == \"win32\", reason=\"Windows doesn't support different contexts\"\n)\ndef test_get_context_using_python3_posix():\n \"\"\"get_context() respects configuration.\n\n If default context is changed this test will need to change too.\n \"\"\"\n assert get_context() is multiprocessing.get_context(\"spawn\")\n with dask.config.set({\"multiprocessing.context\": \"forkserver\"}):\n assert get_context() is multiprocessing.get_context(\"forkserver\")\n with dask.config.set({\"multiprocessing.context\": \"fork\"}):\n assert get_context() is multiprocessing.get_context(\"fork\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_multiprocessing.py_test_custom_context_ignored_elsewhere_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_multiprocessing.py_test_custom_context_ignored_elsewhere_", "embedding": null, "metadata": {"file_path": "dask/tests/test_multiprocessing.py", "file_name": "test_multiprocessing.py", "file_type": "text/x-python", "category": "test", "start_line": 269, "end_line": 289, "span_ids": ["test_custom_context_ignored_elsewhere", "test_get_context_always_default"], "tokens": 214}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(sys.platform != \"win32\", reason=\"POSIX supports different contexts\")\ndef test_custom_context_ignored_elsewhere():\n \"\"\"On Windows, setting 'multiprocessing.context' doesn't explode.\n\n Presumption is it's not used since it's unsupported, but mostly we care about\n not breaking anything.\n \"\"\"\n assert get({\"x\": (inc, 1)}, \"x\") == 2\n with pytest.warns(UserWarning):\n with dask.config.set({\"multiprocessing.context\": \"forkserver\"}):\n assert get({\"x\": (inc, 1)}, \"x\") == 2\n\n\n@pytest.mark.skipif(sys.platform != \"win32\", reason=\"POSIX supports different contexts\")\ndef test_get_context_always_default():\n \"\"\" On Python 2/Windows, get_context() always returns same context.\"\"\"\n assert get_context() is multiprocessing\n with pytest.warns(UserWarning):\n with dask.config.set({\"multiprocessing.context\": \"forkserver\"}):\n assert get_context() is multiprocessing", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_itertools_with_deps.return.dsk_k_get_dependencies": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_itertools_with_deps.return.dsk_k_get_dependencies", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 51, "span_ids": ["imports", "fuse2", "test_cull", "with_deps", "double"], "tokens": 411}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import itertools\nimport pickle\nfrom functools import partial\n\nimport pytest\n\nimport dask\nfrom dask.utils_test import add, inc\nfrom dask.core import get_dependencies\nfrom dask.local import get_sync\nfrom dask.optimization import (\n cull,\n fuse,\n inline,\n inline_functions,\n functions_of,\n fuse_linear,\n SubgraphCallable,\n)\nfrom dask.utils import partial_by_order, apply\n\n\ndef double(x):\n return x * 2\n\n\ndef test_cull():\n # 'out' depends on 'x' and 'y', but not 'z'\n d = {\"x\": 1, \"y\": (inc, \"x\"), \"z\": (inc, \"x\"), \"out\": (add, \"y\", 10)}\n culled, dependencies = cull(d, \"out\")\n assert culled == {\"x\": 1, \"y\": (inc, \"x\"), \"out\": (add, \"y\", 10)}\n assert dependencies == {\"x\": [], \"y\": [\"x\"], \"out\": [\"y\"]}\n\n assert cull(d, \"out\") == cull(d, [\"out\"])\n assert cull(d, [\"out\", \"z\"])[0] == d\n assert cull(d, [[\"out\"], [\"z\"]]) == cull(d, [\"out\", \"z\"])\n pytest.raises(KeyError, lambda: cull(d, \"badkey\"))\n\n\ndef fuse2(*args, **kwargs):\n \"\"\"Run both ``fuse`` and ``fuse_linear`` and compare results\"\"\"\n rv1 = fuse_linear(*args, **kwargs)\n if kwargs.get(\"rename_keys\") is not False:\n return rv1\n rv2 = fuse(*args, **kwargs)\n assert rv1 == rv2\n return rv1\n\n\ndef with_deps(dsk):\n return dsk, {k: get_dependencies(dsk, k) for k in dsk}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_test_fuse.None_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_test_fuse.None_5", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 54, "end_line": 139, "span_ids": ["test_fuse"], "tokens": 755}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fuse():\n fuse = fuse2 # tests both `fuse` and `fuse_linear`\n d = {\n \"w\": (inc, \"x\"),\n \"x\": (inc, \"y\"),\n \"y\": (inc, \"z\"),\n \"z\": (add, \"a\", \"b\"),\n \"a\": 1,\n \"b\": 2,\n }\n assert fuse(d, rename_keys=False) == with_deps(\n {\"w\": (inc, (inc, (inc, (add, \"a\", \"b\")))), \"a\": 1, \"b\": 2}\n )\n assert fuse(d, rename_keys=True) == with_deps(\n {\n \"z-y-x-w\": (inc, (inc, (inc, (add, \"a\", \"b\")))),\n \"a\": 1,\n \"b\": 2,\n \"w\": \"z-y-x-w\",\n }\n )\n\n d = {\n \"NEW\": (inc, \"y\"),\n \"w\": (inc, \"x\"),\n \"x\": (inc, \"y\"),\n \"y\": (inc, \"z\"),\n \"z\": (add, \"a\", \"b\"),\n \"a\": 1,\n \"b\": 2,\n }\n assert fuse(d, rename_keys=False) == with_deps(\n {\n \"NEW\": (inc, \"y\"),\n \"w\": (inc, (inc, \"y\")),\n \"y\": (inc, (add, \"a\", \"b\")),\n \"a\": 1,\n \"b\": 2,\n }\n )\n assert fuse(d, rename_keys=True) == with_deps(\n {\n \"NEW\": (inc, \"z-y\"),\n \"x-w\": (inc, (inc, \"z-y\")),\n \"z-y\": (inc, (add, \"a\", \"b\")),\n \"a\": 1,\n \"b\": 2,\n \"w\": \"x-w\",\n \"y\": \"z-y\",\n }\n )\n\n d = {\n \"v\": (inc, \"y\"),\n \"u\": (inc, \"w\"),\n \"w\": (inc, \"x\"),\n \"x\": (inc, \"y\"),\n \"y\": (inc, \"z\"),\n \"z\": (add, \"a\", \"b\"),\n \"a\": (inc, \"c\"),\n \"b\": (inc, \"d\"),\n \"c\": 1,\n \"d\": 2,\n }\n assert fuse(d, rename_keys=False) == with_deps(\n {\n \"u\": (inc, (inc, (inc, \"y\"))),\n \"v\": (inc, \"y\"),\n \"y\": (inc, (add, \"a\", \"b\")),\n \"a\": (inc, 1),\n \"b\": (inc, 2),\n }\n )\n assert fuse(d, rename_keys=True) == with_deps(\n {\n \"x-w-u\": (inc, (inc, (inc, \"z-y\"))),\n \"v\": (inc, \"z-y\"),\n \"z-y\": (inc, (add, \"c-a\", \"d-b\")),\n \"c-a\": (inc, 1),\n \"d-b\": (inc, 2),\n \"a\": \"c-a\",\n \"b\": \"d-b\",\n \"u\": \"x-w-u\",\n \"y\": \"z-y\",\n }\n )\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse.d_4_test_fuse.None_9": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse.d_4_test_fuse.None_9", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 141, "end_line": 169, "span_ids": ["test_fuse"], "tokens": 326}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fuse():\n # ... other code\n\n d = {\n \"a\": (inc, \"x\"),\n \"b\": (inc, \"x\"),\n \"c\": (inc, \"x\"),\n \"d\": (inc, \"c\"),\n \"x\": (inc, \"y\"),\n \"y\": 0,\n }\n assert fuse(d, rename_keys=False) == with_deps(\n {\"a\": (inc, \"x\"), \"b\": (inc, \"x\"), \"d\": (inc, (inc, \"x\")), \"x\": (inc, 0)}\n )\n assert fuse(d, rename_keys=True) == with_deps(\n {\n \"a\": (inc, \"y-x\"),\n \"b\": (inc, \"y-x\"),\n \"c-d\": (inc, (inc, \"y-x\")),\n \"y-x\": (inc, 0),\n \"d\": \"c-d\",\n \"x\": \"y-x\",\n }\n )\n\n d = {\"a\": 1, \"b\": (inc, \"a\"), \"c\": (add, \"b\", \"b\")}\n assert fuse(d, rename_keys=False) == with_deps(\n {\"b\": (inc, 1), \"c\": (add, \"b\", \"b\")}\n )\n assert fuse(d, rename_keys=True) == with_deps(\n {\"a-b\": (inc, 1), \"c\": (add, \"a-b\", \"a-b\"), \"b\": \"a-b\"}\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_keys_test_fuse_keys.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_keys_test_fuse_keys.None_3", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 172, "end_line": 204, "span_ids": ["test_fuse_keys"], "tokens": 353}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fuse_keys():\n fuse = fuse2 # tests both `fuse` and `fuse_linear`\n d = {\"a\": 1, \"b\": (inc, \"a\"), \"c\": (inc, \"b\")}\n keys = [\"b\"]\n assert fuse(d, keys, rename_keys=False) == with_deps(\n {\"b\": (inc, 1), \"c\": (inc, \"b\")}\n )\n assert fuse(d, keys, rename_keys=True) == with_deps(\n {\"a-b\": (inc, 1), \"c\": (inc, \"a-b\"), \"b\": \"a-b\"}\n )\n\n d = {\n \"w\": (inc, \"x\"),\n \"x\": (inc, \"y\"),\n \"y\": (inc, \"z\"),\n \"z\": (add, \"a\", \"b\"),\n \"a\": 1,\n \"b\": 2,\n }\n keys = [\"x\", \"z\"]\n assert fuse(d, keys, rename_keys=False) == with_deps(\n {\"w\": (inc, \"x\"), \"x\": (inc, (inc, \"z\")), \"z\": (add, \"a\", \"b\"), \"a\": 1, \"b\": 2}\n )\n assert fuse(d, keys, rename_keys=True) == with_deps(\n {\n \"w\": (inc, \"y-x\"),\n \"y-x\": (inc, (inc, \"z\")),\n \"z\": (add, \"a\", \"b\"),\n \"a\": 1,\n \"b\": 2,\n \"x\": \"y-x\",\n }\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_inline_test_inline.assert_inline_d_a_inl": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_inline_test_inline.assert_inline_d_a_inl", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 207, "end_line": 239, "span_ids": ["test_inline"], "tokens": 487}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_inline():\n d = {\"a\": 1, \"b\": (inc, \"a\"), \"c\": (inc, \"b\"), \"d\": (add, \"a\", \"c\")}\n assert inline(d) == {\"a\": 1, \"b\": (inc, 1), \"c\": (inc, \"b\"), \"d\": (add, 1, \"c\")}\n assert inline(d, [\"a\", \"b\", \"c\"]) == {\n \"a\": 1,\n \"b\": (inc, 1),\n \"c\": (inc, (inc, 1)),\n \"d\": (add, 1, (inc, (inc, 1))),\n }\n d = {\"x\": 1, \"y\": (inc, \"x\"), \"z\": (add, \"x\", \"y\")}\n assert inline(d) == {\"x\": 1, \"y\": (inc, 1), \"z\": (add, 1, \"y\")}\n assert inline(d, keys=\"y\") == {\"x\": 1, \"y\": (inc, 1), \"z\": (add, 1, (inc, 1))}\n assert inline(d, keys=\"y\", inline_constants=False) == {\n \"x\": 1,\n \"y\": (inc, \"x\"),\n \"z\": (add, \"x\", (inc, \"x\")),\n }\n\n d = {\"a\": 1, \"b\": \"a\", \"c\": \"b\", \"d\": [\"a\", \"b\", \"c\"], \"e\": (add, (len, \"d\"), \"a\")}\n assert inline(d, \"d\") == {\n \"a\": 1,\n \"b\": 1,\n \"c\": 1,\n \"d\": [1, 1, 1],\n \"e\": (add, (len, [1, 1, 1]), 1),\n }\n assert inline(d, \"a\", inline_constants=False) == {\n \"a\": 1,\n \"b\": 1,\n \"c\": \"b\",\n \"d\": [1, \"b\", \"c\"],\n \"e\": (add, (len, \"d\"), 1),\n }", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_inline_functions_test_inline_ignores_curries_and_partials.assert_a_not_in_result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_inline_functions_test_inline_ignores_curries_and_partials.assert_a_not_in_result", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 242, "end_line": 256, "span_ids": ["test_inline_functions", "test_inline_ignores_curries_and_partials"], "tokens": 199}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_inline_functions():\n x, y, i, d = \"xyid\"\n dsk = {\"out\": (add, i, d), i: (inc, x), d: (double, y), x: 1, y: 1}\n\n result = inline_functions(dsk, [], fast_functions=set([inc]))\n expected = {\"out\": (add, (inc, x), d), d: (double, y), x: 1, y: 1}\n assert result == expected\n\n\ndef test_inline_ignores_curries_and_partials():\n dsk = {\"x\": 1, \"y\": 2, \"a\": (partial(add, 1), \"x\"), \"b\": (inc, \"a\")}\n\n result = inline_functions(dsk, [], fast_functions=set([add]))\n assert result[\"b\"] == (inc, dsk[\"a\"])\n assert \"a\" not in result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_inline_functions_non_hashable_test_inline_functions_non_hashable.assert_b_not_in_result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_inline_functions_non_hashable_test_inline_functions_non_hashable.assert_b_not_in_result", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 259, "end_line": 273, "span_ids": ["test_inline_functions_non_hashable"], "tokens": 133}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_inline_functions_non_hashable():\n class NonHashableCallable(object):\n def __call__(self, a):\n return a + 1\n\n def __hash__(self):\n raise TypeError(\"Not hashable\")\n\n nohash = NonHashableCallable()\n\n dsk = {\"a\": 1, \"b\": (inc, \"a\"), \"c\": (nohash, \"b\"), \"d\": (inc, \"c\")}\n\n result = inline_functions(dsk, [], fast_functions={inc})\n assert result[\"c\"] == (nohash, dsk[\"b\"])\n assert \"b\" not in result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_inline_doesnt_shrink_fast_functions_at_top_test_inline_traverses_lists.assert_result_expected": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_inline_doesnt_shrink_fast_functions_at_top_test_inline_traverses_lists.assert_result_expected", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 276, "end_line": 287, "span_ids": ["test_inline_traverses_lists", "test_inline_doesnt_shrink_fast_functions_at_top"], "tokens": 166}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_inline_doesnt_shrink_fast_functions_at_top():\n dsk = {\"x\": (inc, \"y\"), \"y\": 1}\n result = inline_functions(dsk, [], fast_functions=set([inc]))\n assert result == dsk\n\n\ndef test_inline_traverses_lists():\n x, y, i, d = \"xyid\"\n dsk = {\"out\": (sum, [i, d]), i: (inc, x), d: (double, y), x: 1, y: 1}\n expected = {\"out\": (sum, [(inc, x), d]), d: (double, y), x: 1, y: 1}\n result = inline_functions(dsk, [], fast_functions=set([inc]))\n assert result == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_inline_functions_protects_output_keys_test_inline_cull_dependencies.inline_d2_b_depende": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_inline_functions_protects_output_keys_test_inline_cull_dependencies.inline_d2_b_depende", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 290, "end_line": 312, "span_ids": ["test_functions_of", "test_inline_functions_protects_output_keys", "test_inline_cull_dependencies"], "tokens": 305}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_inline_functions_protects_output_keys():\n dsk = {\"x\": (inc, 1), \"y\": (double, \"x\")}\n assert inline_functions(dsk, [], [inc]) == {\"y\": (double, (inc, 1))}\n assert inline_functions(dsk, [\"x\"], [inc]) == {\"y\": (double, \"x\"), \"x\": (inc, 1)}\n\n\ndef test_functions_of():\n a = lambda x: x\n b = lambda x: x\n assert functions_of((a, 1)) == set([a])\n assert functions_of((a, (b, 1))) == set([a, b])\n assert functions_of((a, [(b, 1)])) == set([a, b])\n assert functions_of((a, [[[(b, 1)]]])) == set([a, b])\n assert functions_of(1) == set()\n assert functions_of(a) == set()\n assert functions_of((a,)) == set([a])\n\n\ndef test_inline_cull_dependencies():\n d = {\"a\": 1, \"b\": \"a\", \"c\": \"b\", \"d\": [\"a\", \"b\", \"c\"], \"e\": (add, (len, \"d\"), \"a\")}\n\n d2, dependencies = cull(d, [\"d\", \"e\"])\n inline(d2, {\"b\"}, dependencies=dependencies)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input_test_fuse_reductions_single_input.d_3._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input_test_fuse_reductions_single_input.d_3._", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 315, "end_line": 367, "span_ids": ["test_fuse_reductions_single_input"], "tokens": 771}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fuse_reductions_single_input():\n def f(*args):\n return args\n\n d = {\"a\": 1, \"b1\": (f, \"a\"), \"b2\": (f, \"a\", \"a\"), \"c\": (f, \"b1\", \"b2\")}\n assert fuse(d, ave_width=1.9, rename_keys=False) == with_deps(d)\n assert fuse(d, ave_width=1.9, rename_keys=True) == with_deps(d)\n assert fuse(d, ave_width=2, rename_keys=False) == with_deps(\n {\"a\": 1, \"c\": (f, (f, \"a\"), (f, \"a\", \"a\"))}\n )\n assert fuse(d, ave_width=2, rename_keys=True) == with_deps(\n {\"a\": 1, \"b1-b2-c\": (f, (f, \"a\"), (f, \"a\", \"a\")), \"c\": \"b1-b2-c\"}\n )\n\n d = {\n \"a\": 1,\n \"b1\": (f, \"a\"),\n \"b2\": (f, \"a\", \"a\"),\n \"b3\": (f, \"a\", \"a\", \"a\"),\n \"c\": (f, \"b1\", \"b2\", \"b3\"),\n }\n assert fuse(d, ave_width=2.9, rename_keys=False) == with_deps(d)\n assert fuse(d, ave_width=2.9, rename_keys=True) == with_deps(d)\n assert fuse(d, ave_width=3, rename_keys=False) == with_deps(\n {\"a\": 1, \"c\": (f, (f, \"a\"), (f, \"a\", \"a\"), (f, \"a\", \"a\", \"a\"))}\n )\n assert fuse(d, ave_width=3, rename_keys=True) == with_deps(\n {\n \"a\": 1,\n \"b1-b2-b3-c\": (f, (f, \"a\"), (f, \"a\", \"a\"), (f, \"a\", \"a\", \"a\")),\n \"c\": \"b1-b2-b3-c\",\n }\n )\n\n d = {\"a\": 1, \"b1\": (f, \"a\"), \"b2\": (f, \"a\"), \"c\": (f, \"a\", \"b1\", \"b2\")}\n assert fuse(d, ave_width=1.9, rename_keys=False) == with_deps(d)\n assert fuse(d, ave_width=1.9, rename_keys=True) == with_deps(d)\n assert fuse(d, ave_width=2, rename_keys=False) == with_deps(\n {\"a\": 1, \"c\": (f, \"a\", (f, \"a\"), (f, \"a\"))}\n )\n assert fuse(d, ave_width=2, rename_keys=True) == with_deps(\n {\"a\": 1, \"b1-b2-c\": (f, \"a\", (f, \"a\"), (f, \"a\")), \"c\": \"b1-b2-c\"}\n )\n\n d = {\n \"a\": 1,\n \"b1\": (f, \"a\"),\n \"b2\": (f, \"a\"),\n \"c\": (f, \"b1\", \"b2\"),\n \"d1\": (f, \"c\"),\n \"d2\": (f, \"c\"),\n \"e\": (f, \"d1\", \"d2\"),\n }\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.None_12_test_fuse_reductions_single_input.None_22": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.None_12_test_fuse_reductions_single_input.None_22", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 368, "end_line": 419, "span_ids": ["test_fuse_reductions_single_input"], "tokens": 690}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fuse_reductions_single_input():\n # ... other code\n assert fuse(d, ave_width=1.9, rename_keys=False) == with_deps(d)\n assert fuse(d, ave_width=1.9, rename_keys=True) == with_deps(d)\n assert fuse(d, ave_width=2, rename_keys=False) == with_deps(\n {\"a\": 1, \"c\": (f, (f, \"a\"), (f, \"a\")), \"e\": (f, (f, \"c\"), (f, \"c\"))}\n )\n assert fuse(d, ave_width=2, rename_keys=True) == with_deps(\n {\n \"a\": 1,\n \"b1-b2-c\": (f, (f, \"a\"), (f, \"a\")),\n \"d1-d2-e\": (f, (f, \"c\"), (f, \"c\")),\n \"c\": \"b1-b2-c\",\n \"e\": \"d1-d2-e\",\n }\n )\n\n d = {\n \"a\": 1,\n \"b1\": (f, \"a\"),\n \"b2\": (f, \"a\"),\n \"b3\": (f, \"a\"),\n \"b4\": (f, \"a\"),\n \"c1\": (f, \"b1\", \"b2\"),\n \"c2\": (f, \"b3\", \"b4\"),\n \"d\": (f, \"c1\", \"c2\"),\n }\n assert fuse(d, ave_width=1.9, rename_keys=False) == with_deps(d)\n assert fuse(d, ave_width=1.9, rename_keys=True) == with_deps(d)\n expected = with_deps(\n {\n \"a\": 1,\n \"c1\": (f, (f, \"a\"), (f, \"a\")),\n \"c2\": (f, (f, \"a\"), (f, \"a\")),\n \"d\": (f, \"c1\", \"c2\"),\n }\n )\n assert fuse(d, ave_width=2, rename_keys=False) == expected\n assert fuse(d, ave_width=2.9, rename_keys=False) == expected\n expected = with_deps(\n {\n \"a\": 1,\n \"b1-b2-c1\": (f, (f, \"a\"), (f, \"a\")),\n \"b3-b4-c2\": (f, (f, \"a\"), (f, \"a\")),\n \"d\": (f, \"c1\", \"c2\"),\n \"c1\": \"b1-b2-c1\",\n \"c2\": \"b3-b4-c2\",\n }\n )\n assert fuse(d, ave_width=2, rename_keys=True) == expected\n assert fuse(d, ave_width=2.9, rename_keys=True) == expected\n assert fuse(d, ave_width=3, rename_keys=False) == with_deps(\n {\"a\": 1, \"d\": (f, (f, (f, \"a\"), (f, \"a\")), (f, (f, \"a\"), (f, \"a\")))}\n )\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.None_23_test_fuse_reductions_single_input.None_27": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.None_23_test_fuse_reductions_single_input.None_27", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 420, "end_line": 465, "span_ids": ["test_fuse_reductions_single_input"], "tokens": 561}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fuse_reductions_single_input():\n # ... other code\n assert fuse(d, ave_width=3, rename_keys=True) == with_deps(\n {\n \"a\": 1,\n \"b1-b2-b3-b4-c1-c2-d\": (\n f,\n (f, (f, \"a\"), (f, \"a\")),\n (f, (f, \"a\"), (f, \"a\")),\n ),\n \"d\": \"b1-b2-b3-b4-c1-c2-d\",\n }\n )\n\n d = {\n \"a\": 1,\n \"b1\": (f, \"a\"),\n \"b2\": (f, \"a\"),\n \"b3\": (f, \"a\"),\n \"b4\": (f, \"a\"),\n \"b5\": (f, \"a\"),\n \"b6\": (f, \"a\"),\n \"b7\": (f, \"a\"),\n \"b8\": (f, \"a\"),\n \"c1\": (f, \"b1\", \"b2\"),\n \"c2\": (f, \"b3\", \"b4\"),\n \"c3\": (f, \"b5\", \"b6\"),\n \"c4\": (f, \"b7\", \"b8\"),\n \"d1\": (f, \"c1\", \"c2\"),\n \"d2\": (f, \"c3\", \"c4\"),\n \"e\": (f, \"d1\", \"d2\"),\n }\n assert fuse(d, ave_width=1.9, rename_keys=False) == with_deps(d)\n assert fuse(d, ave_width=1.9, rename_keys=True) == with_deps(d)\n expected = with_deps(\n {\n \"a\": 1,\n \"c1\": (f, (f, \"a\"), (f, \"a\")),\n \"c2\": (f, (f, \"a\"), (f, \"a\")),\n \"c3\": (f, (f, \"a\"), (f, \"a\")),\n \"c4\": (f, (f, \"a\"), (f, \"a\")),\n \"d1\": (f, \"c1\", \"c2\"),\n \"d2\": (f, \"c3\", \"c4\"),\n \"e\": (f, \"d1\", \"d2\"),\n }\n )\n assert fuse(d, ave_width=2, rename_keys=False) == expected\n assert fuse(d, ave_width=2.9, rename_keys=False) == expected\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.expected_9_test_fuse_reductions_single_input.None_34": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.expected_9_test_fuse_reductions_single_input.None_34", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 466, "end_line": 523, "span_ids": ["test_fuse_reductions_single_input"], "tokens": 754}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fuse_reductions_single_input():\n # ... other code\n expected = with_deps(\n {\n \"a\": 1,\n \"b1-b2-c1\": (f, (f, \"a\"), (f, \"a\")),\n \"b3-b4-c2\": (f, (f, \"a\"), (f, \"a\")),\n \"b5-b6-c3\": (f, (f, \"a\"), (f, \"a\")),\n \"b7-b8-c4\": (f, (f, \"a\"), (f, \"a\")),\n \"d1\": (f, \"c1\", \"c2\"),\n \"d2\": (f, \"c3\", \"c4\"),\n \"e\": (f, \"d1\", \"d2\"),\n \"c1\": \"b1-b2-c1\",\n \"c2\": \"b3-b4-c2\",\n \"c3\": \"b5-b6-c3\",\n \"c4\": \"b7-b8-c4\",\n }\n )\n assert fuse(d, ave_width=2, rename_keys=True) == expected\n assert fuse(d, ave_width=2.9, rename_keys=True) == expected\n expected = with_deps(\n {\n \"a\": 1,\n \"d1\": (f, (f, (f, \"a\"), (f, \"a\")), (f, (f, \"a\"), (f, \"a\"))),\n \"d2\": (f, (f, (f, \"a\"), (f, \"a\")), (f, (f, \"a\"), (f, \"a\"))),\n \"e\": (f, \"d1\", \"d2\"),\n }\n )\n assert fuse(d, ave_width=3, rename_keys=False) == expected\n assert fuse(d, ave_width=4.6, rename_keys=False) == expected\n expected = with_deps(\n {\n \"a\": 1,\n \"b1-b2-b3-b4-c1-c2-d1\": (\n f,\n (f, (f, \"a\"), (f, \"a\")),\n (f, (f, \"a\"), (f, \"a\")),\n ),\n \"b5-b6-b7-b8-c3-c4-d2\": (\n f,\n (f, (f, \"a\"), (f, \"a\")),\n (f, (f, \"a\"), (f, \"a\")),\n ),\n \"e\": (f, \"d1\", \"d2\"),\n \"d1\": \"b1-b2-b3-b4-c1-c2-d1\",\n \"d2\": \"b5-b6-b7-b8-c3-c4-d2\",\n }\n )\n assert fuse(d, ave_width=3, rename_keys=True) == expected\n assert fuse(d, ave_width=4.6, rename_keys=True) == expected\n assert fuse(d, ave_width=4.7, rename_keys=False) == with_deps(\n {\n \"a\": 1,\n \"e\": (\n f,\n (f, (f, (f, \"a\"), (f, \"a\")), (f, (f, \"a\"), (f, \"a\"))),\n (f, (f, (f, \"a\"), (f, \"a\")), (f, (f, \"a\"), (f, \"a\"))),\n ),\n }\n )\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.None_35_test_fuse_reductions_single_input.None_37": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.None_35_test_fuse_reductions_single_input.None_37", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 524, "end_line": 571, "span_ids": ["test_fuse_reductions_single_input"], "tokens": 667}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fuse_reductions_single_input():\n # ... other code\n assert fuse(d, ave_width=4.7, rename_keys=True) == with_deps(\n {\n \"a\": 1,\n \"b1-b2-b3-b4-b5-b6-b7-b8-c1-c2-c3-c4-d1-d2-e\": (\n f,\n (f, (f, (f, \"a\"), (f, \"a\")), (f, (f, \"a\"), (f, \"a\"))),\n (f, (f, (f, \"a\"), (f, \"a\")), (f, (f, \"a\"), (f, \"a\"))),\n ),\n \"e\": \"b1-b2-b3-b4-b5-b6-b7-b8-c1-c2-c3-c4-d1-d2-e\",\n }\n )\n\n d = {\n \"a\": 1,\n \"b1\": (f, \"a\"),\n \"b2\": (f, \"a\"),\n \"b3\": (f, \"a\"),\n \"b4\": (f, \"a\"),\n \"b5\": (f, \"a\"),\n \"b6\": (f, \"a\"),\n \"b7\": (f, \"a\"),\n \"b8\": (f, \"a\"),\n \"b9\": (f, \"a\"),\n \"b10\": (f, \"a\"),\n \"b11\": (f, \"a\"),\n \"b12\": (f, \"a\"),\n \"b13\": (f, \"a\"),\n \"b14\": (f, \"a\"),\n \"b15\": (f, \"a\"),\n \"b16\": (f, \"a\"),\n \"c1\": (f, \"b1\", \"b2\"),\n \"c2\": (f, \"b3\", \"b4\"),\n \"c3\": (f, \"b5\", \"b6\"),\n \"c4\": (f, \"b7\", \"b8\"),\n \"c5\": (f, \"b9\", \"b10\"),\n \"c6\": (f, \"b11\", \"b12\"),\n \"c7\": (f, \"b13\", \"b14\"),\n \"c8\": (f, \"b15\", \"b16\"),\n \"d1\": (f, \"c1\", \"c2\"),\n \"d2\": (f, \"c3\", \"c4\"),\n \"d3\": (f, \"c5\", \"c6\"),\n \"d4\": (f, \"c7\", \"c8\"),\n \"e1\": (f, \"d1\", \"d2\"),\n \"e2\": (f, \"d3\", \"d4\"),\n \"f\": (f, \"e1\", \"e2\"),\n }\n assert fuse(d, ave_width=1.9, rename_keys=False) == with_deps(d)\n assert fuse(d, ave_width=1.9, rename_keys=True) == with_deps(d)\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.expected_13_test_fuse_reductions_single_input.None_39": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.expected_13_test_fuse_reductions_single_input.None_39", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 572, "end_line": 593, "span_ids": ["test_fuse_reductions_single_input"], "tokens": 347}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fuse_reductions_single_input():\n # ... other code\n expected = with_deps(\n {\n \"a\": 1,\n \"c1\": (f, (f, \"a\"), (f, \"a\")),\n \"c2\": (f, (f, \"a\"), (f, \"a\")),\n \"c3\": (f, (f, \"a\"), (f, \"a\")),\n \"c4\": (f, (f, \"a\"), (f, \"a\")),\n \"c5\": (f, (f, \"a\"), (f, \"a\")),\n \"c6\": (f, (f, \"a\"), (f, \"a\")),\n \"c7\": (f, (f, \"a\"), (f, \"a\")),\n \"c8\": (f, (f, \"a\"), (f, \"a\")),\n \"d1\": (f, \"c1\", \"c2\"),\n \"d2\": (f, \"c3\", \"c4\"),\n \"d3\": (f, \"c5\", \"c6\"),\n \"d4\": (f, \"c7\", \"c8\"),\n \"e1\": (f, \"d1\", \"d2\"),\n \"e2\": (f, \"d3\", \"d4\"),\n \"f\": (f, \"e1\", \"e2\"),\n }\n )\n assert fuse(d, ave_width=2, rename_keys=False) == expected\n assert fuse(d, ave_width=2.9, rename_keys=False) == expected\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.expected_14_test_fuse_reductions_single_input.None_43": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.expected_14_test_fuse_reductions_single_input.None_43", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 594, "end_line": 637, "span_ids": ["test_fuse_reductions_single_input"], "tokens": 741}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fuse_reductions_single_input():\n # ... other code\n expected = with_deps(\n {\n \"a\": 1,\n \"b1-b2-c1\": (f, (f, \"a\"), (f, \"a\")),\n \"b3-b4-c2\": (f, (f, \"a\"), (f, \"a\")),\n \"b5-b6-c3\": (f, (f, \"a\"), (f, \"a\")),\n \"b7-b8-c4\": (f, (f, \"a\"), (f, \"a\")),\n \"b10-b9-c5\": (f, (f, \"a\"), (f, \"a\")),\n \"b11-b12-c6\": (f, (f, \"a\"), (f, \"a\")),\n \"b13-b14-c7\": (f, (f, \"a\"), (f, \"a\")),\n \"b15-b16-c8\": (f, (f, \"a\"), (f, \"a\")),\n \"d1\": (f, \"c1\", \"c2\"),\n \"d2\": (f, \"c3\", \"c4\"),\n \"d3\": (f, \"c5\", \"c6\"),\n \"d4\": (f, \"c7\", \"c8\"),\n \"e1\": (f, \"d1\", \"d2\"),\n \"e2\": (f, \"d3\", \"d4\"),\n \"f\": (f, \"e1\", \"e2\"),\n \"c1\": \"b1-b2-c1\",\n \"c2\": \"b3-b4-c2\",\n \"c3\": \"b5-b6-c3\",\n \"c4\": \"b7-b8-c4\",\n \"c5\": \"b10-b9-c5\",\n \"c6\": \"b11-b12-c6\",\n \"c7\": \"b13-b14-c7\",\n \"c8\": \"b15-b16-c8\",\n }\n )\n assert fuse(d, ave_width=2, rename_keys=True) == expected\n assert fuse(d, ave_width=2.9, rename_keys=True) == expected\n expected = with_deps(\n {\n \"a\": 1,\n \"d1\": (f, (f, (f, \"a\"), (f, \"a\")), (f, (f, \"a\"), (f, \"a\"))),\n \"d2\": (f, (f, (f, \"a\"), (f, \"a\")), (f, (f, \"a\"), (f, \"a\"))),\n \"d3\": (f, (f, (f, \"a\"), (f, \"a\")), (f, (f, \"a\"), (f, \"a\"))),\n \"d4\": (f, (f, (f, \"a\"), (f, \"a\")), (f, (f, \"a\"), (f, \"a\"))),\n \"e1\": (f, \"d1\", \"d2\"),\n \"e2\": (f, \"d3\", \"d4\"),\n \"f\": (f, \"e1\", \"e2\"),\n }\n )\n assert fuse(d, ave_width=3, rename_keys=False) == expected\n assert fuse(d, ave_width=4.6, rename_keys=False) == expected\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.expected_16_test_fuse_reductions_single_input.None_47": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.expected_16_test_fuse_reductions_single_input.None_47", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 638, "end_line": 689, "span_ids": ["test_fuse_reductions_single_input"], "tokens": 661}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fuse_reductions_single_input():\n # ... other code\n expected = with_deps(\n {\n \"a\": 1,\n \"b1-b2-b3-b4-c1-c2-d1\": (\n f,\n (f, (f, \"a\"), (f, \"a\")),\n (f, (f, \"a\"), (f, \"a\")),\n ),\n \"b5-b6-b7-b8-c3-c4-d2\": (\n f,\n (f, (f, \"a\"), (f, \"a\")),\n (f, (f, \"a\"), (f, \"a\")),\n ),\n \"b10-b11-b12-b9-c5-c6-d3\": (\n f,\n (f, (f, \"a\"), (f, \"a\")),\n (f, (f, \"a\"), (f, \"a\")),\n ),\n \"b13-b14-b15-b16-c7-c8-d4\": (\n f,\n (f, (f, \"a\"), (f, \"a\")),\n (f, (f, \"a\"), (f, \"a\")),\n ),\n \"e1\": (f, \"d1\", \"d2\"),\n \"e2\": (f, \"d3\", \"d4\"),\n \"f\": (f, \"e1\", \"e2\"),\n \"d1\": \"b1-b2-b3-b4-c1-c2-d1\",\n \"d2\": \"b5-b6-b7-b8-c3-c4-d2\",\n \"d3\": \"b10-b11-b12-b9-c5-c6-d3\",\n \"d4\": \"b13-b14-b15-b16-c7-c8-d4\",\n }\n )\n assert fuse(d, ave_width=3, rename_keys=True) == expected\n assert fuse(d, ave_width=4.6, rename_keys=True) == expected\n expected = with_deps(\n {\n \"a\": 1,\n \"e1\": (\n f,\n (f, (f, (f, \"a\"), (f, \"a\")), (f, (f, \"a\"), (f, \"a\"))),\n (f, (f, (f, \"a\"), (f, \"a\")), (f, (f, \"a\"), (f, \"a\"))),\n ),\n \"e2\": (\n f,\n (f, (f, (f, \"a\"), (f, \"a\")), (f, (f, \"a\"), (f, \"a\"))),\n (f, (f, (f, \"a\"), (f, \"a\")), (f, (f, \"a\"), (f, \"a\"))),\n ),\n \"f\": (f, \"e1\", \"e2\"),\n }\n )\n assert fuse(d, ave_width=4.7, rename_keys=False) == expected\n assert fuse(d, ave_width=7.4, rename_keys=False) == expected\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.expected_18_test_fuse_reductions_single_input.None_50": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.expected_18_test_fuse_reductions_single_input.None_50", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 690, "end_line": 727, "span_ids": ["test_fuse_reductions_single_input"], "tokens": 582}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fuse_reductions_single_input():\n # ... other code\n expected = with_deps(\n {\n \"a\": 1,\n \"b1-b2-b3-b4-b5-b6-b7-b8-c1-c2-c3-c4-d1-d2-e1\": (\n f,\n (f, (f, (f, \"a\"), (f, \"a\")), (f, (f, \"a\"), (f, \"a\"))),\n (f, (f, (f, \"a\"), (f, \"a\")), (f, (f, \"a\"), (f, \"a\"))),\n ),\n \"b10-b11-b12-b13-b14-b15-b16-b9-c5-c6-c7-c8-d3-d4-e2\": (\n f,\n (f, (f, (f, \"a\"), (f, \"a\")), (f, (f, \"a\"), (f, \"a\"))),\n (f, (f, (f, \"a\"), (f, \"a\")), (f, (f, \"a\"), (f, \"a\"))),\n ),\n \"f\": (f, \"e1\", \"e2\"),\n \"e1\": \"b1-b2-b3-b4-b5-b6-b7-b8-c1-c2-c3-c4-d1-d2-e1\",\n \"e2\": \"b10-b11-b12-b13-b14-b15-b16-b9-c5-c6-c7-c8-d3-d4-e2\",\n }\n )\n assert fuse(d, ave_width=4.7, rename_keys=True) == expected\n assert fuse(d, ave_width=7.4, rename_keys=True) == expected\n assert fuse(d, ave_width=7.5, rename_keys=False) == with_deps(\n {\n \"a\": 1,\n \"f\": (\n f,\n (\n f,\n (f, (f, (f, \"a\"), (f, \"a\")), (f, (f, \"a\"), (f, \"a\"))),\n (f, (f, (f, \"a\"), (f, \"a\")), (f, (f, \"a\"), (f, \"a\"))),\n ),\n (\n f,\n (f, (f, (f, \"a\"), (f, \"a\")), (f, (f, \"a\"), (f, \"a\"))),\n (f, (f, (f, \"a\"), (f, \"a\")), (f, (f, \"a\"), (f, \"a\"))),\n ),\n ),\n }\n )\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.None_51_test_fuse_reductions_single_input.None_57": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.None_51_test_fuse_reductions_single_input.None_57", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 728, "end_line": 766, "span_ids": ["test_fuse_reductions_single_input"], "tokens": 694}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fuse_reductions_single_input():\n # ... other code\n assert fuse(d, ave_width=7.5, rename_keys=True) == with_deps(\n {\n \"a\": 1,\n \"b1-b10-b11-b12-b13-b14-b15-b16-b2-b3-b4-b5-b6-b7-b8-b9-c1-c2-c3-c4-c5-c6-c7-c8-d1-d2-d3-d4-e1-e2-f\": (\n f,\n (\n f,\n (f, (f, (f, \"a\"), (f, \"a\")), (f, (f, \"a\"), (f, \"a\"))),\n (f, (f, (f, \"a\"), (f, \"a\")), (f, (f, \"a\"), (f, \"a\"))),\n ),\n (\n f,\n (f, (f, (f, \"a\"), (f, \"a\")), (f, (f, \"a\"), (f, \"a\"))),\n (f, (f, (f, \"a\"), (f, \"a\")), (f, (f, \"a\"), (f, \"a\"))),\n ),\n ),\n \"f\": \"b1-b10-b11-b12-b13-b14-b15-b16-b2-b3-b4-b5-b6-b7-b8-b9-c1-c2-c3-c4-c5-c6-c7-c8-d1-d2-d3-d4-e1-e2-f\",\n }\n )\n\n d = {\"a\": 1, \"b\": (f, \"a\")}\n assert fuse(d, ave_width=1, rename_keys=False) == with_deps({\"b\": (f, 1)})\n assert fuse(d, ave_width=1, rename_keys=True) == with_deps(\n {\"a-b\": (f, 1), \"b\": \"a-b\"}\n )\n\n d = {\"a\": 1, \"b\": (f, \"a\"), \"c\": (f, \"b\"), \"d\": (f, \"c\")}\n assert fuse(d, ave_width=1, rename_keys=False) == with_deps({\"d\": (f, (f, (f, 1)))})\n assert fuse(d, ave_width=1, rename_keys=True) == with_deps(\n {\"a-b-c-d\": (f, (f, (f, 1))), \"d\": \"a-b-c-d\"}\n )\n\n d = {\"a\": 1, \"b\": (f, \"a\"), \"c\": (f, \"a\", \"b\"), \"d\": (f, \"a\", \"c\")}\n assert fuse(d, ave_width=1, rename_keys=False) == with_deps(\n {\"a\": 1, \"d\": (f, \"a\", (f, \"a\", (f, \"a\")))}\n )\n assert fuse(d, ave_width=1, rename_keys=True) == with_deps(\n {\"a\": 1, \"b-c-d\": (f, \"a\", (f, \"a\", (f, \"a\"))), \"d\": \"b-c-d\"}\n )\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.d_22_test_fuse_reductions_single_input.expected_27.with_deps_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.d_22_test_fuse_reductions_single_input.expected_27.with_deps_", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 768, "end_line": 831, "span_ids": ["test_fuse_reductions_single_input"], "tokens": 779}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fuse_reductions_single_input():\n # ... other code\n\n d = {\n \"a\": 1,\n \"b1\": (f, \"a\"),\n \"b2\": (f, \"a\"),\n \"c1\": (f, \"b1\"),\n \"d1\": (f, \"c1\"),\n \"e1\": (f, \"d1\"),\n \"f\": (f, \"e1\", \"b2\"),\n }\n expected = with_deps(\n {\"a\": 1, \"b2\": (f, \"a\"), \"e1\": (f, (f, (f, (f, \"a\")))), \"f\": (f, \"e1\", \"b2\")}\n )\n assert fuse(d, ave_width=1, rename_keys=False) == expected\n assert fuse(d, ave_width=1.9, rename_keys=False) == expected\n expected = with_deps(\n {\n \"a\": 1,\n \"b2\": (f, \"a\"),\n \"b1-c1-d1-e1\": (f, (f, (f, (f, \"a\")))),\n \"f\": (f, \"e1\", \"b2\"),\n \"e1\": \"b1-c1-d1-e1\",\n }\n )\n assert fuse(d, ave_width=1, rename_keys=True) == expected\n assert fuse(d, ave_width=1.9, rename_keys=True) == expected\n assert fuse(d, ave_width=2, rename_keys=False) == with_deps(\n {\"a\": 1, \"f\": (f, (f, (f, (f, (f, \"a\")))), (f, \"a\"))}\n )\n assert fuse(d, ave_width=2, rename_keys=True) == with_deps(\n {\n \"a\": 1,\n \"b1-b2-c1-d1-e1-f\": (f, (f, (f, (f, (f, \"a\")))), (f, \"a\")),\n \"f\": \"b1-b2-c1-d1-e1-f\",\n }\n )\n\n d = {\n \"a\": 1,\n \"b1\": (f, \"a\"),\n \"b2\": (f, \"a\"),\n \"c1\": (f, \"a\", \"b1\"),\n \"d1\": (f, \"a\", \"c1\"),\n \"e1\": (f, \"a\", \"d1\"),\n \"f\": (f, \"a\", \"e1\", \"b2\"),\n }\n expected = with_deps(\n {\n \"a\": 1,\n \"b2\": (f, \"a\"),\n \"e1\": (f, \"a\", (f, \"a\", (f, \"a\", (f, \"a\")))),\n \"f\": (f, \"a\", \"e1\", \"b2\"),\n }\n )\n assert fuse(d, ave_width=1, rename_keys=False) == expected\n assert fuse(d, ave_width=1.9, rename_keys=False) == expected\n expected = with_deps(\n {\n \"a\": 1,\n \"b2\": (f, \"a\"),\n \"b1-c1-d1-e1\": (f, \"a\", (f, \"a\", (f, \"a\", (f, \"a\")))),\n \"f\": (f, \"a\", \"e1\", \"b2\"),\n \"e1\": \"b1-c1-d1-e1\",\n }\n )\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.None_66_test_fuse_reductions_single_input.d_29._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.None_66_test_fuse_reductions_single_input.d_29._", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 832, "end_line": 896, "span_ids": ["test_fuse_reductions_single_input"], "tokens": 750}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fuse_reductions_single_input():\n # ... other code\n assert fuse(d, ave_width=1, rename_keys=True) == expected\n assert fuse(d, ave_width=1.9, rename_keys=True) == expected\n assert fuse(d, ave_width=2, rename_keys=False) == with_deps(\n {\"a\": 1, \"f\": (f, \"a\", (f, \"a\", (f, \"a\", (f, \"a\", (f, \"a\")))), (f, \"a\"))}\n )\n assert fuse(d, ave_width=2, rename_keys=True) == with_deps(\n {\n \"a\": 1,\n \"b1-b2-c1-d1-e1-f\": (\n f,\n \"a\",\n (f, \"a\", (f, \"a\", (f, \"a\", (f, \"a\")))),\n (f, \"a\"),\n ),\n \"f\": \"b1-b2-c1-d1-e1-f\",\n }\n )\n\n d = {\n \"a\": 1,\n \"b1\": (f, \"a\"),\n \"b2\": (f, \"a\"),\n \"b3\": (f, \"a\"),\n \"c1\": (f, \"b1\"),\n \"c2\": (f, \"b2\"),\n \"c3\": (f, \"b3\"),\n \"d1\": (f, \"c1\"),\n \"d2\": (f, \"c2\"),\n \"d3\": (f, \"c3\"),\n \"e\": (f, \"d1\", \"d2\", \"d3\"),\n \"f\": (f, \"e\"),\n \"g\": (f, \"f\"),\n }\n assert fuse(d, ave_width=1, rename_keys=False) == with_deps(\n {\n \"a\": 1,\n \"d1\": (f, (f, (f, \"a\"))),\n \"d2\": (f, (f, (f, \"a\"))),\n \"d3\": (f, (f, (f, \"a\"))),\n \"g\": (f, (f, (f, \"d1\", \"d2\", \"d3\"))),\n }\n )\n assert fuse(d, ave_width=1, rename_keys=True) == with_deps(\n {\n \"a\": 1,\n \"b1-c1-d1\": (f, (f, (f, \"a\"))),\n \"b2-c2-d2\": (f, (f, (f, \"a\"))),\n \"b3-c3-d3\": (f, (f, (f, \"a\"))),\n \"e-f-g\": (f, (f, (f, \"d1\", \"d2\", \"d3\"))),\n \"d1\": \"b1-c1-d1\",\n \"d2\": \"b2-c2-d2\",\n \"d3\": \"b3-c3-d3\",\n \"g\": \"e-f-g\",\n }\n )\n\n d = {\n \"a\": 1,\n \"b\": (f, \"a\"),\n \"c\": (f, \"b\"),\n \"d\": (f, \"b\", \"c\"),\n \"e\": (f, \"d\"),\n \"f\": (f, \"e\"),\n \"g\": (f, \"d\", \"f\"),\n }\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.None_72_test_fuse_reductions_single_input.None_73": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.None_72_test_fuse_reductions_single_input.None_73", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 897, "end_line": 909, "span_ids": ["test_fuse_reductions_single_input"], "tokens": 179}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fuse_reductions_single_input():\n # ... other code\n assert fuse(d, ave_width=1, rename_keys=False) == with_deps(\n {\"b\": (f, 1), \"d\": (f, \"b\", (f, \"b\")), \"g\": (f, \"d\", (f, (f, \"d\")))}\n )\n assert fuse(d, ave_width=1, rename_keys=True) == with_deps(\n {\n \"a-b\": (f, 1),\n \"c-d\": (f, \"b\", (f, \"b\")),\n \"e-f-g\": (f, \"d\", (f, (f, \"d\"))),\n \"b\": \"a-b\",\n \"d\": \"c-d\",\n \"g\": \"e-f-g\",\n }\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_stressed_test_fuse_stressed.d._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_stressed_test_fuse_stressed.d._", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 912, "end_line": 973, "span_ids": ["test_fuse_stressed"], "tokens": 1058}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fuse_stressed():\n def f(*args):\n return args\n\n d = {\n \"array-original-27b9f9d257a80fa6adae06a98faf71eb\": 1,\n (\"cholesky-upper-26a6b670a8aabb7e2f8936db7ccb6a88\", 0, 0): (\n f,\n (\"cholesky-26a6b670a8aabb7e2f8936db7ccb6a88\", 0, 0),\n ),\n (\"cholesky-26a6b670a8aabb7e2f8936db7ccb6a88\", 1, 0): (\n f,\n (\"cholesky-upper-26a6b670a8aabb7e2f8936db7ccb6a88\", 0, 1),\n ),\n (\"array-27b9f9d257a80fa6adae06a98faf71eb\", 0, 0): (\n f,\n \"array-original-27b9f9d257a80fa6adae06a98faf71eb\",\n (slice(0, 10, None), slice(0, 10, None)),\n ),\n (\"cholesky-upper-26a6b670a8aabb7e2f8936db7ccb6a88\", 1, 0): (\n \"cholesky-26a6b670a8aabb7e2f8936db7ccb6a88\",\n 0,\n 1,\n ),\n (\"cholesky-26a6b670a8aabb7e2f8936db7ccb6a88\", 1, 1): (\n f,\n (\n f,\n (\"array-27b9f9d257a80fa6adae06a98faf71eb\", 1, 1),\n (f, [(\"cholesky-lt-dot-26a6b670a8aabb7e2f8936db7ccb6a88\", 1, 0, 1, 0)]),\n ),\n ),\n (\"cholesky-lt-dot-26a6b670a8aabb7e2f8936db7ccb6a88\", 1, 0, 1, 0): (\n f,\n (\"cholesky-26a6b670a8aabb7e2f8936db7ccb6a88\", 1, 0),\n (\"cholesky-upper-26a6b670a8aabb7e2f8936db7ccb6a88\", 0, 1),\n ),\n (\"array-27b9f9d257a80fa6adae06a98faf71eb\", 0, 1): (\n f,\n \"array-original-27b9f9d257a80fa6adae06a98faf71eb\",\n (slice(0, 10, None), slice(10, 20, None)),\n ),\n (\"cholesky-upper-26a6b670a8aabb7e2f8936db7ccb6a88\", 1, 1): (\n f,\n (\"cholesky-26a6b670a8aabb7e2f8936db7ccb6a88\", 1, 1),\n ),\n (\"cholesky-26a6b670a8aabb7e2f8936db7ccb6a88\", 0, 1): (f, (10, 10)),\n (\"array-27b9f9d257a80fa6adae06a98faf71eb\", 1, 1): (\n f,\n \"array-original-27b9f9d257a80fa6adae06a98faf71eb\",\n (slice(10, 20, None), slice(10, 20, None)),\n ),\n (\"cholesky-upper-26a6b670a8aabb7e2f8936db7ccb6a88\", 0, 1): (\n f,\n (\"cholesky-26a6b670a8aabb7e2f8936db7ccb6a88\", 0, 0),\n (\"array-27b9f9d257a80fa6adae06a98faf71eb\", 0, 1),\n ),\n (\"cholesky-26a6b670a8aabb7e2f8936db7ccb6a88\", 0, 0): (\n f,\n (\"array-27b9f9d257a80fa6adae06a98faf71eb\", 0, 0),\n ),\n }\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_stressed.keys_test_fuse_stressed.assert_rv_with_deps_rv": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_stressed.keys_test_fuse_stressed.assert_rv_with_deps_rv", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 974, "end_line": 981, "span_ids": ["test_fuse_stressed"], "tokens": 188}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fuse_stressed():\n # ... other code\n keys = {\n (\"cholesky-upper-26a6b670a8aabb7e2f8936db7ccb6a88\", 0, 0),\n (\"cholesky-upper-26a6b670a8aabb7e2f8936db7ccb6a88\", 0, 1),\n (\"cholesky-upper-26a6b670a8aabb7e2f8936db7ccb6a88\", 1, 0),\n (\"cholesky-upper-26a6b670a8aabb7e2f8936db7ccb6a88\", 1, 1),\n }\n rv = fuse(d, keys=keys, ave_width=2, rename_keys=True)\n assert rv == with_deps(rv[0])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_multiple_input_test_fuse_reductions_multiple_input.None_11": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_multiple_input_test_fuse_reductions_multiple_input.None_11", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 984, "end_line": 1035, "span_ids": ["test_fuse_reductions_multiple_input"], "tokens": 713}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fuse_reductions_multiple_input():\n def f(*args):\n return args\n\n d = {\"a1\": 1, \"a2\": 2, \"b\": (f, \"a1\", \"a2\"), \"c\": (f, \"b\")}\n assert fuse(d, ave_width=2, rename_keys=False) == with_deps({\"c\": (f, (f, 1, 2))})\n assert fuse(d, ave_width=2, rename_keys=True) == with_deps(\n {\"a1-a2-b-c\": (f, (f, 1, 2)), \"c\": \"a1-a2-b-c\"}\n )\n assert fuse(d, ave_width=1, rename_keys=False) == with_deps(\n {\"a1\": 1, \"a2\": 2, \"c\": (f, (f, \"a1\", \"a2\"))}\n )\n assert fuse(d, ave_width=1, rename_keys=True) == with_deps(\n {\"a1\": 1, \"a2\": 2, \"b-c\": (f, (f, \"a1\", \"a2\")), \"c\": \"b-c\"}\n )\n\n d = {\n \"a1\": 1,\n \"a2\": 2,\n \"b1\": (f, \"a1\"),\n \"b2\": (f, \"a1\", \"a2\"),\n \"b3\": (f, \"a2\"),\n \"c\": (f, \"b1\", \"b2\", \"b3\"),\n }\n expected = with_deps(d)\n assert fuse(d, ave_width=1, rename_keys=False) == expected\n assert fuse(d, ave_width=2.9, rename_keys=False) == expected\n assert fuse(d, ave_width=1, rename_keys=True) == expected\n assert fuse(d, ave_width=2.9, rename_keys=True) == expected\n assert fuse(d, ave_width=3, rename_keys=False) == with_deps(\n {\"a1\": 1, \"a2\": 2, \"c\": (f, (f, \"a1\"), (f, \"a1\", \"a2\"), (f, \"a2\"))}\n )\n assert fuse(d, ave_width=3, rename_keys=True) == with_deps(\n {\n \"a1\": 1,\n \"a2\": 2,\n \"b1-b2-b3-c\": (f, (f, \"a1\"), (f, \"a1\", \"a2\"), (f, \"a2\")),\n \"c\": \"b1-b2-b3-c\",\n }\n )\n\n d = {\n \"a1\": 1,\n \"a2\": 2,\n \"b1\": (f, \"a1\"),\n \"b2\": (f, \"a1\", \"a2\"),\n \"b3\": (f, \"a2\"),\n \"c1\": (f, \"b1\", \"b2\"),\n \"c2\": (f, \"b2\", \"b3\"),\n }\n assert fuse(d, ave_width=1, rename_keys=False) == with_deps(d)\n assert fuse(d, ave_width=1, rename_keys=True) == with_deps(d)\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_multiple_input.None_12_test_fuse_reductions_multiple_input.None_17": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_multiple_input.None_12_test_fuse_reductions_multiple_input.None_17", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 1036, "end_line": 1088, "span_ids": ["test_fuse_reductions_multiple_input"], "tokens": 619}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fuse_reductions_multiple_input():\n # ... other code\n assert fuse(d, ave_width=2, rename_keys=False) == with_deps(\n {\n \"a1\": 1,\n \"a2\": 2,\n \"b2\": (f, \"a1\", \"a2\"),\n \"c1\": (f, (f, \"a1\"), \"b2\"),\n \"c2\": (f, \"b2\", (f, \"a2\")),\n }\n )\n assert fuse(d, ave_width=2, rename_keys=True) == with_deps(\n {\n \"a1\": 1,\n \"a2\": 2,\n \"b2\": (f, \"a1\", \"a2\"),\n \"b1-c1\": (f, (f, \"a1\"), \"b2\"),\n \"b3-c2\": (f, \"b2\", (f, \"a2\")),\n \"c1\": \"b1-c1\",\n \"c2\": \"b3-c2\",\n }\n )\n\n d = {\n \"a1\": 1,\n \"a2\": 2,\n \"b1\": (f, \"a1\"),\n \"b2\": (f, \"a1\", \"a2\"),\n \"b3\": (f, \"a2\"),\n \"c1\": (f, \"b1\", \"b2\"),\n \"c2\": (f, \"b2\", \"b3\"),\n \"d\": (f, \"c1\", \"c2\"),\n }\n assert fuse(d, ave_width=1, rename_keys=False) == with_deps(d)\n assert fuse(d, ave_width=1, rename_keys=True) == with_deps(d)\n\n # A more aggressive heuristic could do this at `ave_width=2`. Perhaps\n # we can improve this. Nevertheless, this is behaving as intended.\n assert fuse(d, ave_width=3, rename_keys=False) == with_deps(\n {\n \"a1\": 1,\n \"a2\": 2,\n \"b2\": (f, \"a1\", \"a2\"),\n \"d\": (f, (f, (f, \"a1\"), \"b2\"), (f, \"b2\", (f, \"a2\"))),\n }\n )\n assert fuse(d, ave_width=3, rename_keys=True) == with_deps(\n {\n \"a1\": 1,\n \"a2\": 2,\n \"b2\": (f, \"a1\", \"a2\"),\n \"b1-b3-c1-c2-d\": (f, (f, (f, \"a1\"), \"b2\"), (f, \"b2\", (f, \"a2\"))),\n \"d\": \"b1-b3-c1-c2-d\",\n }\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_func_with_kwargs_test_SubgraphCallable.assert_f2_1_2_f_1_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_func_with_kwargs_test_SubgraphCallable.assert_f2_1_2_f_1_2", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 1091, "end_line": 1137, "span_ids": ["test_SubgraphCallable", "func_with_kwargs"], "tokens": 454}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def func_with_kwargs(a, b, c=2):\n return a + b + c\n\n\ndef test_SubgraphCallable():\n non_hashable = [1, 2, 3]\n\n dsk = {\n \"a\": (apply, add, [\"in1\", 2]),\n \"b\": (\n apply,\n partial_by_order,\n [\"in2\"],\n {\"function\": func_with_kwargs, \"other\": [(1, 20)], \"c\": 4},\n ),\n \"c\": (\n apply,\n partial_by_order,\n [\"in2\", \"in1\"],\n {\"function\": func_with_kwargs, \"other\": [(1, 20)]},\n ),\n \"d\": (inc, \"a\"),\n \"e\": (add, \"c\", \"d\"),\n \"f\": [\"a\", 2, \"b\", (add, \"b\", (sum, non_hashable))],\n \"h\": (add, (sum, \"f\"), (sum, [\"a\", \"b\"])),\n }\n\n f = SubgraphCallable(dsk, \"h\", [\"in1\", \"in2\"], name=\"test\")\n\n assert f.name == \"test\"\n assert repr(f) == \"test\"\n\n f2 = SubgraphCallable(dsk, \"h\", [\"in1\", \"in2\"], name=\"test\")\n assert f == f2\n\n f3 = SubgraphCallable(dsk, \"g\", [\"in1\", \"in2\"], name=\"test\")\n assert f != f3\n\n assert hash(SubgraphCallable(None, None, [None]))\n assert hash(f3) != hash(f2)\n dsk2 = dsk.copy()\n dsk2.update({\"in1\": 1, \"in2\": 2})\n assert f(1, 2) == get_sync(cull(dsk2, [\"h\"])[0], [\"h\"])[0]\n assert f(1, 2) == f(1, 2)\n\n f2 = pickle.loads(pickle.dumps(f))\n assert f2(1, 2) == f(1, 2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_subgraphs_test_fuse_subgraphs.sols._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_subgraphs_test_fuse_subgraphs.sols._", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 1131, "end_line": 1203, "span_ids": ["test_fuse_subgraphs"], "tokens": 662}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fuse_subgraphs():\n dsk = {\n \"x-1\": 1,\n \"inc-1\": (inc, \"x-1\"),\n \"inc-2\": (inc, \"inc-1\"),\n \"add-1\": (add, \"x-1\", \"inc-2\"),\n \"inc-3\": (inc, \"add-1\"),\n \"inc-4\": (inc, \"inc-3\"),\n \"add-2\": (add, \"add-1\", \"inc-4\"),\n \"inc-5\": (inc, \"add-2\"),\n \"inc-6\": (inc, \"inc-5\"),\n }\n\n res = fuse(dsk, \"inc-6\", fuse_subgraphs=True)\n sol = with_deps(\n {\n \"inc-6\": \"add-inc-x-1\",\n \"add-inc-x-1\": (\n SubgraphCallable(\n {\n \"x-1\": 1,\n \"add-1\": (add, \"x-1\", (inc, (inc, \"x-1\"))),\n \"inc-6\": (inc, (inc, (add, \"add-1\", (inc, (inc, \"add-1\"))))),\n },\n \"inc-6\",\n (),\n ),\n ),\n }\n )\n assert res == sol\n\n res = fuse(dsk, \"inc-6\", fuse_subgraphs=True, rename_keys=False)\n sol = with_deps(\n {\n \"inc-6\": (\n SubgraphCallable(\n {\n \"x-1\": 1,\n \"add-1\": (add, \"x-1\", (inc, (inc, \"x-1\"))),\n \"inc-6\": (inc, (inc, (add, \"add-1\", (inc, (inc, \"add-1\"))))),\n },\n \"inc-6\",\n (),\n ),\n )\n }\n )\n assert res == sol\n\n res = fuse(dsk, \"add-2\", fuse_subgraphs=True)\n sol = with_deps(\n {\n \"add-inc-x-1\": (\n SubgraphCallable(\n {\n \"x-1\": 1,\n \"add-1\": (add, \"x-1\", (inc, (inc, \"x-1\"))),\n \"add-2\": (add, \"add-1\", (inc, (inc, \"add-1\"))),\n },\n \"add-2\",\n (),\n ),\n ),\n \"add-2\": \"add-inc-x-1\",\n \"inc-6\": (inc, (inc, \"add-2\")),\n }\n )\n assert res == sol\n\n res = fuse(dsk, \"inc-2\", fuse_subgraphs=True)\n # ordering of arguments is unstable, check all permutations\n sols = []\n # ordering of arguments is unstable, check all permutations\n # ... other code\n for inkeys in itertools.permutations((\"x-1\", \"inc-2\")):\n # ... other code\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_subgraphs.for_inkeys_in_itertools_p_test_fuse_subgraphs.None_4": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_subgraphs.for_inkeys_in_itertools_p_test_fuse_subgraphs.None_4", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 1204, "end_line": 1255, "span_ids": ["test_fuse_subgraphs"], "tokens": 414}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fuse_subgraphs():\n # ... other code\n assert res == sol\n # ... other code\n for inkeys in itertools.permutations((\"x-1\", \"inc-2\")):\n sols.append(\n with_deps(\n {\n \"x-1\": 1,\n \"inc-2\": (inc, (inc, \"x-1\")),\n \"inc-6\": \"inc-add-1\",\n \"inc-add-1\": (\n SubgraphCallable(\n {\n \"add-1\": (add, \"x-1\", \"inc-2\"),\n \"inc-6\": (\n inc,\n (inc, (add, \"add-1\", (inc, (inc, \"add-1\")))),\n ),\n },\n \"inc-6\",\n inkeys,\n ),\n )\n + inkeys,\n }\n )\n )\n assert res in sols\n\n res = fuse(dsk, [\"inc-2\", \"add-2\"], fuse_subgraphs=True)\n # ordering of arguments is unstable, check all permutations\n sols = []\n for inkeys in itertools.permutations((\"x-1\", \"inc-2\")):\n sols.append(\n with_deps(\n {\n \"x-1\": 1,\n \"inc-2\": (inc, (inc, \"x-1\")),\n \"inc-add-1\": (\n SubgraphCallable(\n {\n \"add-1\": (add, \"x-1\", \"inc-2\"),\n \"add-2\": (add, \"add-1\", (inc, (inc, \"add-1\"))),\n },\n \"add-2\",\n inkeys,\n ),\n )\n + inkeys,\n \"add-2\": \"inc-add-1\",\n \"inc-6\": (inc, (inc, \"add-2\")),\n }\n )\n )\n assert res in sols", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_subgraphs_linear_chains_of_duplicate_deps_test_fuse_subgraphs_linear_chains_of_duplicate_deps.assert_res_sol": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_subgraphs_linear_chains_of_duplicate_deps_test_fuse_subgraphs_linear_chains_of_duplicate_deps.assert_res_sol", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 1258, "end_line": 1288, "span_ids": ["test_fuse_subgraphs_linear_chains_of_duplicate_deps"], "tokens": 303}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fuse_subgraphs_linear_chains_of_duplicate_deps():\n dsk = {\n \"x-1\": 1,\n \"add-1\": (add, \"x-1\", \"x-1\"),\n \"add-2\": (add, \"add-1\", \"add-1\"),\n \"add-3\": (add, \"add-2\", \"add-2\"),\n \"add-4\": (add, \"add-3\", \"add-3\"),\n \"add-5\": (add, \"add-4\", \"add-4\"),\n }\n\n res = fuse(dsk, \"add-5\", fuse_subgraphs=True)\n sol = with_deps(\n {\n \"add-x-1\": (\n SubgraphCallable(\n {\n \"x-1\": 1,\n \"add-1\": (add, \"x-1\", \"x-1\"),\n \"add-2\": (add, \"add-1\", \"add-1\"),\n \"add-3\": (add, \"add-2\", \"add-2\"),\n \"add-4\": (add, \"add-3\", \"add-3\"),\n \"add-5\": (add, \"add-4\", \"add-4\"),\n },\n \"add-5\",\n (),\n ),\n ),\n \"add-5\": \"add-x-1\",\n }\n )\n assert res == sol", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_dont_fuse_numpy_arrays_test_fuse_config.with_dask_config_set_op.assert_fuse_d_b_depen": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_dont_fuse_numpy_arrays_test_fuse_config.with_dask_config_set_op.assert_fuse_d_b_depen", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 1291, "end_line": 1310, "span_ids": ["test_dont_fuse_numpy_arrays", "test_fuse_config"], "tokens": 151}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_dont_fuse_numpy_arrays():\n \"\"\"\n Some types should stay in the graph bare\n\n This helps with things like serialization\n \"\"\"\n np = pytest.importorskip(\"numpy\")\n dsk = {\"x\": np.arange(5), \"y\": (inc, \"x\")}\n\n assert fuse(dsk, \"y\")[0] == dsk\n\n\ndef test_fuse_config():\n with dask.config.set({\"optimization.fuse.active\": False}):\n d = {\n \"a\": 1,\n \"b\": (inc, \"a\"),\n }\n dependencies = {\"b\": (\"a\",)}\n assert fuse(d, \"b\", dependencies=dependencies) == (d, dependencies)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fused_keys_max_length_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fused_keys_max_length_", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 1313, "end_line": 1347, "span_ids": ["test_fused_keys_max_length"], "tokens": 295}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fused_keys_max_length(): # generic fix for gh-5999\n d = {\n \"u-looooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong\": (\n inc,\n \"v-looooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong\",\n ),\n \"v-looooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong\": (\n inc,\n \"w-looooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong\",\n ),\n \"w-looooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong\": (\n inc,\n \"x-looooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong\",\n ),\n \"x-looooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong\": (\n inc,\n \"y-looooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong\",\n ),\n \"y-looooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong\": (\n inc,\n \"z-looooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong\",\n ),\n \"z-looooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong\": (\n add,\n \"a\",\n \"b\",\n ),\n \"a\": 1,\n \"b\": 2,\n }\n\n fused, deps = fuse(d, rename_keys=True)\n for key in fused:\n assert len(key) < 150", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_pytest_test_ordering_keeps_groups_together.assert_abs_o_a_1_o_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_pytest_test_ordering_keeps_groups_together.assert_abs_o_a_1_o_", "embedding": null, "metadata": {"file_path": "dask/tests/test_order.py", "file_name": "test_order.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 36, "span_ids": ["f", "imports", "abcde", "issorted", "test_ordering_keeps_groups_together"], "tokens": 345}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\n\nimport dask\nfrom dask.order import ndependencies, order\nfrom dask.core import get_deps\nfrom dask.utils_test import add, inc\n\n\n@pytest.fixture(params=[\"abcde\", \"edcba\"])\ndef abcde(request):\n return request.param\n\n\ndef issorted(L, reverse=False):\n return sorted(L, reverse=reverse) == L\n\n\ndef f(*args):\n pass\n\n\ndef test_ordering_keeps_groups_together(abcde):\n a, b, c, d, e = abcde\n d = dict(((a, i), (f,)) for i in range(4))\n d.update({(b, 0): (f, (a, 0), (a, 1)), (b, 1): (f, (a, 2), (a, 3))})\n o = order(d)\n\n assert abs(o[(a, 0)] - o[(a, 1)]) == 1\n assert abs(o[(a, 2)] - o[(a, 3)]) == 1\n\n d = dict(((a, i), (f,)) for i in range(4))\n d.update({(b, 0): (f, (a, 0), (a, 2)), (b, 1): (f, (a, 1), (a, 3))})\n o = order(d)\n\n assert abs(o[(a, 0)] - o[(a, 2)]) == 1\n assert abs(o[(a, 1)] - o[(a, 3)]) == 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_avoid_broker_nodes_test_avoid_broker_nodes.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_avoid_broker_nodes_test_avoid_broker_nodes.None_3", "embedding": null, "metadata": {"file_path": "dask/tests/test_order.py", "file_name": "test_order.py", "file_type": "text/x-python", "category": "test", "start_line": 39, "end_line": 79, "span_ids": ["test_avoid_broker_nodes"], "tokens": 397}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_avoid_broker_nodes(abcde):\n r\"\"\"\n\n b0 b1 b2\n | \\ /\n a0 a1\n\n a0 should be run before a1\n \"\"\"\n a, b, c, d, e = abcde\n dsk = {\n (a, 0): (f,),\n (a, 1): (f,),\n (b, 0): (f, (a, 0)),\n (b, 1): (f, (a, 1)),\n (b, 2): (f, (a, 1)),\n }\n o = order(dsk)\n assert o[(a, 0)] < o[(a, 1)]\n\n # Switch name of 0, 1 to ensure that this isn't due to string comparison\n dsk = {\n (a, 1): (f,),\n (a, 0): (f,),\n (b, 0): (f, (a, 1)),\n (b, 1): (f, (a, 0)),\n (b, 2): (f, (a, 0)),\n }\n o = order(dsk)\n assert o[(a, 0)] > o[(a, 1)]\n\n # Switch name of 0, 1 for \"b\"s too\n dsk = {\n (a, 0): (f,),\n (a, 1): (f,),\n (b, 1): (f, (a, 0)),\n (b, 0): (f, (a, 1)),\n (b, 2): (f, (a, 1)),\n }\n o = order(dsk)\n assert o[(a, 0)] < o[(a, 1)]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_base_of_reduce_preferred_test_base_of_reduce_preferred.assert_o_b_1_6": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_base_of_reduce_preferred_test_base_of_reduce_preferred.assert_o_b_1_6", "embedding": null, "metadata": {"file_path": "dask/tests/test_order.py", "file_name": "test_order.py", "file_type": "text/x-python", "category": "test", "start_line": 82, "end_line": 107, "span_ids": ["test_base_of_reduce_preferred"], "tokens": 233}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_base_of_reduce_preferred(abcde):\n r\"\"\"\n a3\n /|\n a2 |\n /| |\n a1 | |\n /| | |\n a0 | | |\n | | | |\n b0 b1 b2 b3\n \\ \\ / /\n c\n\n We really want to run b0 quickly\n \"\"\"\n a, b, c, d, e = abcde\n dsk = {(a, i): (f, (a, i - 1), (b, i)) for i in [1, 2, 3]}\n dsk[(a, 0)] = (f, (b, 0))\n dsk.update({(b, i): (f, c, 1) for i in [0, 1, 2, 3]})\n dsk[c] = 1\n\n o = order(dsk)\n\n assert o[(b, 0)] <= 4\n assert o[(b, 1)] <= 6", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_avoid_upwards_branching_test_avoid_upwards_branching.assert_o_b_1_o_c_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_avoid_upwards_branching_test_avoid_upwards_branching.assert_o_b_1_o_c_", "embedding": null, "metadata": {"file_path": "dask/tests/test_order.py", "file_name": "test_order.py", "file_type": "text/x-python", "category": "test", "start_line": 110, "end_line": 140, "span_ids": ["test_avoid_upwards_branching"], "tokens": 262}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.xfail(reason=\"Can't please 'em all\")\ndef test_avoid_upwards_branching(abcde):\n r\"\"\"\n a1\n |\n a2\n |\n a3 d1\n / \\ /\n b1 c1\n | |\n b2 c2\n |\n c3\n\n Prefer b1 over c1 because it won't stick around waiting for d1 to complete\n \"\"\"\n a, b, c, d, e = abcde\n dsk = {\n (a, 1): (f, (a, 2)),\n (a, 2): (f, (a, 3)),\n (a, 3): (f, (b, 1), (c, 1)),\n (b, 1): (f, (b, 2)),\n (c, 1): (f, (c, 2)),\n (c, 2): (f, (c, 3)),\n (d, 1): (f, (c, 1)),\n }\n\n o = order(dsk)\n\n assert o[(b, 1)] < o[(c, 1)]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_avoid_upwards_branching_complex_test_avoid_upwards_branching_complex.assert_abs_o_d_2_o_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_avoid_upwards_branching_complex_test_avoid_upwards_branching_complex.assert_abs_o_d_2_o_", "embedding": null, "metadata": {"file_path": "dask/tests/test_order.py", "file_name": "test_order.py", "file_type": "text/x-python", "category": "test", "start_line": 143, "end_line": 179, "span_ids": ["test_avoid_upwards_branching_complex"], "tokens": 378}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_avoid_upwards_branching_complex(abcde):\n r\"\"\"\n a1\n |\n e2 a2 d2 d3\n | | \\ /\n e1 a3 d1\n \\ / \\ /\n b1 c1\n | |\n b2 c2\n |\n c3\n\n Prefer c1 over b1 because c1 will stay in memory less long while b1\n computes\n \"\"\"\n a, b, c, d, e = abcde\n dsk = {\n (a, 1): (f, (a, 2)),\n (a, 2): (f, (a, 3)),\n (a, 3): (f, (b, 1), (c, 1)),\n (b, 1): (f, (b, 2)),\n (b, 2): (f,),\n (c, 1): (f, (c, 2)),\n (c, 2): (f, (c, 3)),\n (c, 3): (f,),\n (d, 1): (f, (c, 1)),\n (d, 2): (f, (d, 1)),\n (d, 3): (f, (d, 1)),\n (e, 1): (f, (b, 1)),\n (e, 2): (f, (e, 1)),\n }\n\n o = order(dsk)\n assert o[(c, 1)] < o[(b, 1)]\n assert abs(o[(d, 2)] - o[(d, 3)]) == 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_deep_bases_win_over_dependents_test_deep_bases_win_over_dependents.assert_o_b_o_c_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_deep_bases_win_over_dependents_test_deep_bases_win_over_dependents.assert_o_b_o_c_", "embedding": null, "metadata": {"file_path": "dask/tests/test_order.py", "file_name": "test_order.py", "file_type": "text/x-python", "category": "test", "start_line": 182, "end_line": 203, "span_ids": ["test_deep_bases_win_over_dependents"], "tokens": 211}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_deep_bases_win_over_dependents(abcde):\n r\"\"\"\n It's not clear who should run first, e or d\n\n 1. d is nicer because it exposes parallelism\n 2. e is nicer (hypothetically) because it will be sooner released\n (though in this case we need d to run first regardless)\n\n Regardless of e or d first, we should run b before c.\n\n a\n / | \\ .\n b c |\n / \\ | /\n e d\n \"\"\"\n a, b, c, d, e = abcde\n dsk = {a: (f, b, c, d), b: (f, d, e), c: (f, d), d: 1, e: 2}\n\n o = order(dsk)\n assert o[e] < o[d] # ambiguous, but this is what we currently expect\n assert o[b] < o[c]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_prefer_deep_test_prefer_deep.assert_o_b_o_d_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_prefer_deep_test_prefer_deep.assert_o_b_o_d_", "embedding": null, "metadata": {"file_path": "dask/tests/test_order.py", "file_name": "test_order.py", "file_type": "text/x-python", "category": "test", "start_line": 206, "end_line": 221, "span_ids": ["test_prefer_deep"], "tokens": 119}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_prefer_deep(abcde):\n \"\"\"\n c\n |\n e b\n | |\n d a\n\n Prefer longer chains first so we should start with c\n \"\"\"\n a, b, c, d, e = abcde\n dsk = {a: 1, b: (f, a), c: (f, b), d: 1, e: (f, d)}\n\n o = order(dsk)\n assert o[a] < o[d]\n assert o[b] < o[d]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_stacklimit_test_order_doesnt_fail_on_mixed_type_keys.order_x_inc_1_y": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_stacklimit_test_order_doesnt_fail_on_mixed_type_keys.order_x_inc_1_y", "embedding": null, "metadata": {"file_path": "dask/tests/test_order.py", "file_name": "test_order.py", "file_type": "text/x-python", "category": "test", "start_line": 224, "end_line": 244, "span_ids": ["test_stacklimit", "test_order_doesnt_fail_on_mixed_type_keys", "test_break_ties_by_str"], "tokens": 222}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_stacklimit(abcde):\n dsk = dict((\"x%s\" % (i + 1), (inc, \"x%s\" % i)) for i in range(10000))\n dependencies, dependents = get_deps(dsk)\n ndependencies(dependencies, dependents)\n\n\ndef test_break_ties_by_str(abcde):\n a, b, c, d, e = abcde\n dsk = {(\"x\", i): (inc, i) for i in range(10)}\n x_keys = sorted(dsk)\n dsk[\"y\"] = list(x_keys)\n\n o = order(dsk)\n expected = {\"y\": 10}\n expected.update({k: i for i, k in enumerate(x_keys)})\n\n assert o == expected\n\n\ndef test_order_doesnt_fail_on_mixed_type_keys(abcde):\n order({\"x\": (inc, 1), (\"y\", 0): (inc, 2), \"z\": (add, \"x\", (\"y\", 0))})", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_gh_3055_test_gh_3055._operate_in_order": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_gh_3055_test_gh_3055._operate_in_order", "embedding": null, "metadata": {"file_path": "dask/tests/test_order.py", "file_name": "test_order.py", "file_type": "text/x-python", "category": "test", "start_line": 247, "end_line": 264, "span_ids": ["test_gh_3055"], "tokens": 235}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_gh_3055():\n da = pytest.importorskip(\"dask.array\")\n A, B = 20, 99\n orig = x = da.random.normal(size=(A, B), chunks=(1, None))\n for _ in range(2):\n y = (x[:, None, :] * x[:, :, None]).cumsum(axis=0)\n x = x.cumsum(axis=0)\n w = (y * x[:, None]).sum(axis=(1, 2))\n\n dsk = dict(w.__dask_graph__())\n o = order(dsk)\n L = [o[k] for k in w.__dask_keys__()]\n assert sum(x < len(o) / 2 for x in L) > len(L) / 3 # some complete quickly\n\n L = [o[k] for kk in orig.__dask_keys__() for k in kk]\n assert sum(x > len(o) / 2 for x in L) > len(L) / 3 # some start later\n\n assert sorted(L) == L # operate in order", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_type_comparisions_ok_test_prefer_short_dependents.assert_o_e_o_b_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_type_comparisions_ok_test_prefer_short_dependents.assert_o_e_o_b_", "embedding": null, "metadata": {"file_path": "dask/tests/test_order.py", "file_name": "test_order.py", "file_type": "text/x-python", "category": "test", "start_line": 267, "end_line": 290, "span_ids": ["test_type_comparisions_ok", "test_prefer_short_dependents"], "tokens": 204}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_type_comparisions_ok(abcde):\n a, b, c, d, e = abcde\n dsk = {a: 1, (a, 1): 2, (a, b, 1): 3}\n order(dsk) # this doesn't err\n\n\ndef test_prefer_short_dependents(abcde):\n r\"\"\"\n\n a\n |\n d b e\n \\ | /\n c\n\n Prefer to finish d and e before starting b. That way c can be released\n during the long computations.\n \"\"\"\n a, b, c, d, e = abcde\n dsk = {c: (f,), d: (f, c), e: (f, c), b: (f, c), a: (f, b)}\n\n o = order(dsk)\n assert o[d] < o[b]\n assert o[e] < o[b]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_run_smaller_sections_test_run_smaller_sections.assert_log_expected": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_run_smaller_sections_test_run_smaller_sections.assert_log_expected", "embedding": null, "metadata": {"file_path": "dask/tests/test_order.py", "file_name": "test_order.py", "file_type": "text/x-python", "category": "test", "start_line": 293, "end_line": 331, "span_ids": ["test_run_smaller_sections"], "tokens": 275}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.xfail(reason=\"This is challenging to do precisely\")\ndef test_run_smaller_sections(abcde):\n r\"\"\"\n aa\n / |\n b d bb dd\n / \\ /| | /\n a c e cc\n\n Prefer to run acb first because then we can get that out of the way\n \"\"\"\n a, b, c, d, e = abcde\n aa, bb, cc, dd = [x * 2 for x in [a, b, c, d]]\n\n expected = [a, c, b, e, d, cc, bb, aa, dd]\n\n log = []\n\n def f(x):\n def _(*args):\n log.append(x)\n\n return _\n\n dsk = {\n a: (f(a),),\n c: (f(c),),\n e: (f(e),),\n cc: (f(cc),),\n b: (f(b), a, c),\n d: (f(d), c, e),\n bb: (f(bb), cc),\n aa: (f(aa), d, bb),\n dd: (f(dd), cc),\n }\n\n dask.get(dsk, [aa, b, dd]) # trigger computation\n\n assert log == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_local_parents_of_reduction_test_local_parents_of_reduction.assert_log_expected": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_local_parents_of_reduction_test_local_parents_of_reduction.assert_log_expected", "embedding": null, "metadata": {"file_path": "dask/tests/test_order.py", "file_name": "test_order.py", "file_type": "text/x-python", "category": "test", "start_line": 334, "end_line": 379, "span_ids": ["test_local_parents_of_reduction"], "tokens": 348}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_local_parents_of_reduction(abcde):\n \"\"\"\n\n c1\n |\n b1 c2\n | /|\n a1 b2 c3\n | /|\n a2 b3\n |\n a3\n\n Prefer to finish a1 stack before proceeding to b2\n \"\"\"\n a, b, c, d, e = abcde\n a1, a2, a3 = [a + i for i in \"123\"]\n b1, b2, b3 = [b + i for i in \"123\"]\n c1, c2, c3 = [c + i for i in \"123\"]\n\n expected = [a3, a2, a1, b3, b2, b1, c3, c2, c1]\n\n log = []\n\n def f(x):\n def _(*args):\n log.append(x)\n\n return _\n\n dsk = {\n a3: (f(a3),),\n a2: (f(a2), a3),\n a1: (f(a1), a2),\n b3: (f(b3),),\n b2: (f(b2), b3, a2),\n b1: (f(b1), b2),\n c3: (f(c3),),\n c2: (f(c2), c3, b2),\n c1: (f(c1), c2),\n }\n\n order(dsk)\n dask.get(dsk, [a1, b1, c1]) # trigger computation\n\n assert log == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_nearest_neighbor_test_nearest_neighbor.assert_o_min_b1_b2_b3_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_nearest_neighbor_test_nearest_neighbor.assert_o_min_b1_b2_b3_", "embedding": null, "metadata": {"file_path": "dask/tests/test_order.py", "file_name": "test_order.py", "file_type": "text/x-python", "category": "test", "start_line": 382, "end_line": 416, "span_ids": ["test_nearest_neighbor"], "tokens": 393}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_nearest_neighbor(abcde):\n r\"\"\"\n\n a1 a2 a3 a4 a5 a6 a7 a8 a9\n \\ | / \\ | / \\ | / \\ | /\n b1 b2 b3 b4\n\n Want to finish off a local group before moving on.\n This is difficult because all groups are connected.\n \"\"\"\n a, b, c, _, _ = abcde\n a1, a2, a3, a4, a5, a6, a7, a8, a9 = [a + i for i in \"123456789\"]\n b1, b2, b3, b4 = [b + i for i in \"1234\"]\n\n dsk = {\n b1: (f,),\n b2: (f,),\n b3: (f,),\n b4: (f,),\n a1: (f, b1),\n a2: (f, b1),\n a3: (f, b1, b2),\n a4: (f, b2),\n a5: (f, b2, b3),\n a6: (f, b3),\n a7: (f, b3, b4),\n a8: (f, b4),\n a9: (f, b4),\n }\n\n o = order(dsk)\n\n assert 3 < sum(o[a + i] < len(o) / 2 for i in \"123456789\") < 7\n assert 1 < sum(o[b + i] < len(o) / 2 for i in \"1234\") < 4\n assert o[min([b1, b2, b3, b4])] == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_string_ordering_test_string_ordering_dependents.assert_o_b_0_a_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_string_ordering_test_string_ordering_dependents.assert_o_b_0_a_", "embedding": null, "metadata": {"file_path": "dask/tests/test_order.py", "file_name": "test_order.py", "file_type": "text/x-python", "category": "test", "start_line": 419, "end_line": 430, "span_ids": ["test_string_ordering", "test_string_ordering_dependents"], "tokens": 200}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_string_ordering():\n \"\"\" Prefer ordering tasks by name first \"\"\"\n dsk = {(\"a\", 1): (f,), (\"a\", 2): (f,), (\"a\", 3): (f,)}\n o = order(dsk)\n assert o == {(\"a\", 1): 0, (\"a\", 2): 1, (\"a\", 3): 2}\n\n\ndef test_string_ordering_dependents():\n \"\"\" Prefer ordering tasks by name first even when in dependencies \"\"\"\n dsk = {(\"a\", 1): (f, \"b\"), (\"a\", 2): (f, \"b\"), (\"a\", 3): (f, \"b\"), \"b\": (f,)}\n o = order(dsk)\n assert o == {\"b\": 0, (\"a\", 1): 1, (\"a\", 2): 2, (\"a\", 3): 3}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_prefer_short_narrow_test_prefer_short_narrow.assert_o_c_1_o_c_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_prefer_short_narrow_test_prefer_short_narrow.assert_o_c_1_o_c_", "embedding": null, "metadata": {"file_path": "dask/tests/test_order.py", "file_name": "test_order.py", "file_type": "text/x-python", "category": "test", "start_line": 433, "end_line": 448, "span_ids": ["test_prefer_short_narrow"], "tokens": 209}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_prefer_short_narrow(abcde):\n # See test_prefer_short_ancestor for a fail case.\n a, b, c, _, _ = abcde\n dsk = {\n (a, 0): 0,\n (b, 0): 0,\n (c, 0): 0,\n (c, 1): (f, (c, 0), (a, 0), (b, 0)),\n (a, 1): 1,\n (b, 1): 1,\n (c, 2): (f, (c, 1), (a, 1), (b, 1)),\n }\n o = order(dsk)\n assert o[(b, 0)] < o[(b, 1)]\n assert o[(b, 0)] < o[(c, 2)]\n assert o[(c, 1)] < o[(c, 2)]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_prefer_short_ancestor_test_prefer_short_ancestor.assert_o_c_1_o_a_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_prefer_short_ancestor_test_prefer_short_ancestor.assert_o_c_1_o_a_", "embedding": null, "metadata": {"file_path": "dask/tests/test_order.py", "file_name": "test_order.py", "file_type": "text/x-python", "category": "test", "start_line": 451, "end_line": 508, "span_ids": ["test_prefer_short_ancestor"], "tokens": 517}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_prefer_short_ancestor(abcde):\n r\"\"\"\n From https://github.com/dask/dask-ml/issues/206#issuecomment-395869929\n\n Two cases, one where chunks of an array are independent, and one where the\n chunks of an array have a shared source. We handled the independent one\n \"well\" earlier.\n\n Good:\n\n c2\n / \\ \\\n / \\ \\\n c1 \\ \\\n / | \\ \\ \\\n c0 a0 b0 a1 b1\n\n Bad:\n\n c2\n / \\ \\\n / \\ \\\n c1 \\ \\\n / | \\ \\ \\\n c0 a0 b0 a1 b1\n \\ \\ / /\n \\ \\ / /\n a-b\n\n\n The difference is that all the `a` and `b` tasks now have a common\n ancestor.\n\n We would like to choose c1 *before* a1, and b1 because\n\n * we can release a0 and b0 once c1 is done\n * we don't need a1 and b1 to compute c1.\n \"\"\"\n a, b, c, _, _ = abcde\n ab = a + b\n\n dsk = {\n ab: 0,\n (a, 0): (f, ab, 0, 0),\n (b, 0): (f, ab, 0, 1),\n (c, 0): 0,\n (c, 1): (f, (c, 0), (a, 0), (b, 0)),\n (a, 1): (f, ab, 1, 0),\n (b, 1): (f, ab, 1, 1),\n (c, 2): (f, (c, 1), (a, 1), (b, 1)),\n }\n o = order(dsk)\n\n assert o[(a, 0)] < o[(a, 1)]\n assert o[(b, 0)] < o[(b, 1)]\n assert o[(b, 0)] < o[(c, 2)]\n assert o[(c, 1)] < o[(c, 2)]\n assert o[(c, 1)] < o[(a, 1)]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_map_overlap_test_map_overlap.assert_o_b_1_o_e_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_map_overlap_test_map_overlap.assert_o_b_1_o_e_", "embedding": null, "metadata": {"file_path": "dask/tests/test_order.py", "file_name": "test_order.py", "file_type": "text/x-python", "category": "test", "start_line": 511, "end_line": 545, "span_ids": ["test_map_overlap"], "tokens": 438}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_map_overlap(abcde):\n r\"\"\"\n b1 b3 b5\n |\\ / | \\ / |\n c1 c2 c3 c4 c5\n |/ | \\ | / | \\|\n d1 d2 d3 d4 d5\n | | |\n e1 e2 e5\n\n Want to finish b1 before we start on e5\n \"\"\"\n a, b, c, d, e = abcde\n dsk = {\n (e, 1): (f,),\n (d, 1): (f, (e, 1)),\n (c, 1): (f, (d, 1)),\n (b, 1): (f, (c, 1), (c, 2)),\n (d, 2): (f,),\n (c, 2): (f, (d, 1), (d, 2), (d, 3)),\n (e, 3): (f,),\n (d, 3): (f, (e, 3)),\n (c, 3): (f, (d, 3)),\n (b, 3): (f, (c, 2), (c, 3), (c, 4)),\n (d, 4): (f,),\n (c, 4): (f, (d, 3), (d, 4), (d, 5)),\n (e, 5): (f,),\n (d, 5): (f, (e, 5)),\n (c, 5): (f, (d, 5)),\n (b, 5): (f, (c, 4), (c, 5)),\n }\n\n o = order(dsk)\n\n assert o[(b, 1)] < o[(e, 5)] or o[(b, 5)] < o[(e, 1)]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_use_structure_not_keys_test_use_structure_not_keys.if_Bs_0_3_.else_.assert_Bs_1_3_5_7_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_use_structure_not_keys_test_use_structure_not_keys.if_Bs_0_3_.else_.assert_Bs_1_3_5_7_", "embedding": null, "metadata": {"file_path": "dask/tests/test_order.py", "file_name": "test_order.py", "file_type": "text/x-python", "category": "test", "start_line": 548, "end_line": 585, "span_ids": ["test_use_structure_not_keys"], "tokens": 644}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_use_structure_not_keys(abcde):\n \"\"\"See https://github.com/dask/dask/issues/5584#issuecomment-554963958\n\n We were using key names to infer structure, which could result in funny behavior.\n \"\"\"\n a, b, _, _, _ = abcde\n dsk = {\n (a, 0): (f,),\n (a, 1): (f,),\n (a, 2): (f,),\n (a, 3): (f,),\n (a, 4): (f,),\n (a, 5): (f,),\n (a, 6): (f,),\n (a, 7): (f,),\n (a, 8): (f,),\n (a, 9): (f,),\n (b, 5): (f, (a, 2)),\n (b, 7): (f, (a, 0), (a, 2)),\n (b, 9): (f, (a, 7), (a, 0), (a, 2)),\n (b, 1): (f, (a, 4), (a, 7), (a, 0)),\n (b, 2): (f, (a, 9), (a, 4), (a, 7)),\n (b, 4): (f, (a, 6), (a, 9), (a, 4)),\n (b, 3): (f, (a, 5), (a, 6), (a, 9)),\n (b, 8): (f, (a, 1), (a, 5), (a, 6)),\n (b, 6): (f, (a, 8), (a, 1), (a, 5)),\n (b, 0): (f, (a, 3), (a, 8), (a, 1)),\n }\n o = order(dsk)\n As = sorted(val for (letter, _), val in o.items() if letter == a)\n Bs = sorted(val for (letter, _), val in o.items() if letter == b)\n assert Bs[0] in {1, 3}\n if Bs[0] == 3:\n assert As == [0, 1, 2, 4, 6, 8, 10, 12, 14, 16]\n assert Bs == [3, 5, 7, 9, 11, 13, 15, 17, 18, 19]\n else:\n assert As == [0, 2, 4, 6, 8, 10, 12, 14, 16, 18]\n assert Bs == [1, 3, 5, 7, 9, 11, 13, 15, 17, 19]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_dont_run_all_dependents_too_early_test_dont_run_all_dependents_too_early.assert_expected_actual": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_dont_run_all_dependents_too_early_test_dont_run_all_dependents_too_early.assert_expected_actual", "embedding": null, "metadata": {"file_path": "dask/tests/test_order.py", "file_name": "test_order.py", "file_type": "text/x-python", "category": "test", "start_line": 588, "end_line": 600, "span_ids": ["test_dont_run_all_dependents_too_early"], "tokens": 260}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_dont_run_all_dependents_too_early(abcde):\n \"\"\" From https://github.com/dask/dask-ml/issues/206#issuecomment-395873372 \"\"\"\n a, b, c, d, e = abcde\n depth = 10\n dsk = {(a, 0): 0, (b, 0): 1, (c, 0): 2, (d, 0): (f, (a, 0), (b, 0), (c, 0))}\n for i in range(1, depth):\n dsk[(b, i)] = (f, (b, 0))\n dsk[(c, i)] = (f, (c, 0))\n dsk[(d, i)] = (f, (d, i - 1), (b, i), (c, i))\n o = order(dsk)\n expected = [3, 6, 9, 12, 15, 18, 21, 24, 27, 30]\n actual = sorted(v for (letter, num), v in o.items() if letter == d)\n assert expected == actual", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_many_branches_use_ndependencies_test_many_branches_use_ndependencies.assert_o_c_1_o_a_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_many_branches_use_ndependencies_test_many_branches_use_ndependencies.assert_o_c_1_o_a_", "embedding": null, "metadata": {"file_path": "dask/tests/test_order.py", "file_name": "test_order.py", "file_type": "text/x-python", "category": "test", "start_line": 603, "end_line": 639, "span_ids": ["test_many_branches_use_ndependencies"], "tokens": 545}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_many_branches_use_ndependencies(abcde):\n \"\"\"From https://github.com/dask/dask/pull/5646#issuecomment-562700533\n\n Sometimes we need larger or wider DAGs to test behavior. This test\n ensures we choose the branch with more work twice in successtion.\n This is important, because ``order`` may search along dependencies\n and then along dependents.\n\n \"\"\"\n a, b, c, d, e = abcde\n dd = d + d\n ee = e + e\n dsk = {\n (a, 0): 0,\n (a, 1): (f, (a, 0)),\n (a, 2): (f, (a, 1)),\n (b, 1): (f, (a, 0)),\n (b, 2): (f, (b, 1)),\n (c, 1): (f, (a, 0)), # most short and thin; should go last\n (d, 1): (f, (a, 0)),\n (d, 2): (f, (d, 1)),\n (dd, 1): (f, (a, 0)),\n (dd, 2): (f, (dd, 1)),\n (dd, 3): (f, (d, 2), (dd, 2)),\n (e, 1): (f, (a, 0)),\n (e, 2): (f, (e, 1)),\n (ee, 1): (f, (a, 0)),\n (ee, 2): (f, (ee, 1)),\n (ee, 3): (f, (e, 2), (ee, 2)),\n (a, 3): (f, (a, 2), (b, 2), (c, 1), (dd, 3), (ee, 3)),\n }\n o = order(dsk)\n # run all d's and e's first\n expected = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n actual = sorted(v for (letter, _), v in o.items() if letter in {d, dd, e, ee})\n assert actual == expected\n assert o[(c, 1)] == o[(a, 3)] - 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_order_cycle_test_order_empty.assert_order_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_order_cycle_test_order_empty.assert_order_", "embedding": null, "metadata": {"file_path": "dask/tests/test_order.py", "file_name": "test_order.py", "file_type": "text/x-python", "category": "test", "start_line": 642, "end_line": 658, "span_ids": ["test_order_cycle", "test_order_empty"], "tokens": 274}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_order_cycle():\n with pytest.raises(RuntimeError, match=\"Cycle detected\"):\n dask.get({\"a\": (f, \"a\")}, \"a\") # we encounter this in `get`\n with pytest.raises(RuntimeError, match=\"Cycle detected\"):\n order({\"a\": (f, \"a\")}) # trivial self-loop\n with pytest.raises(RuntimeError, match=\"Cycle detected\"):\n order({(\"a\", 0): (f, (\"a\", 0))}) # non-string\n with pytest.raises(RuntimeError, match=\"Cycle detected\"):\n order({\"a\": (f, \"b\"), \"b\": (f, \"c\"), \"c\": (f, \"a\")}) # non-trivial loop\n with pytest.raises(RuntimeError, match=\"Cycle detected\"):\n order({\"a\": (f, \"b\"), \"b\": (f, \"c\"), \"c\": (f, \"a\", \"d\"), \"d\": 1})\n with pytest.raises(RuntimeError, match=\"Cycle detected\"):\n order({\"a\": (f, \"b\"), \"b\": (f, \"c\"), \"c\": (f, \"a\", \"d\"), \"d\": (f, \"b\")})\n\n\ndef test_order_empty():\n assert order({}) == {}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_switching_dependents_test_switching_dependents.assert_o_a_5_o_e_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_switching_dependents_test_switching_dependents.assert_o_a_5_o_e_", "embedding": null, "metadata": {"file_path": "dask/tests/test_order.py", "file_name": "test_order.py", "file_type": "text/x-python", "category": "test", "start_line": 661, "end_line": 713, "span_ids": ["test_switching_dependents"], "tokens": 564}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_switching_dependents(abcde):\n r\"\"\"\n\n a7 a8 <-- do these last\n | /\n a6 e6\n | /\n a5 c5 d5 e5\n | | / /\n a4 c4 d4 e4\n | \\ | / /\n a3 b3---/\n |\n a2\n |\n a1\n |\n a0 <-- start here\n\n Test that we are able to switch to better dependents.\n In this graph, we expect to start at a0. To compute a4, we need to compute b3.\n After computing b3, three \"better\" paths become available.\n Confirm that we take the better paths before continuing down `a` path.\n\n This test is pretty specific to how `order` is implemented\n and is intended to increase code coverage.\n \"\"\"\n a, b, c, d, e = abcde\n dsk = {\n (a, 0): 0,\n (a, 1): (f, (a, 0)),\n (a, 2): (f, (a, 1)),\n (a, 3): (f, (a, 2)),\n (a, 4): (f, (a, 3), (b, 3)),\n (a, 5): (f, (a, 4)),\n (a, 6): (f, (a, 5)),\n (a, 7): (f, (a, 6)),\n (a, 8): (f, (a, 6)),\n (b, 3): 1,\n (c, 4): (f, (b, 3)),\n (c, 5): (f, (c, 4)),\n (d, 4): (f, (b, 3)),\n (d, 5): (f, (d, 4)),\n (e, 4): (f, (b, 3)),\n (e, 5): (f, (e, 4)),\n (e, 6): (f, (e, 5)),\n }\n o = order(dsk)\n\n assert o[(a, 0)] == 0 # probably\n assert o[(a, 5)] > o[(c, 5)]\n assert o[(a, 5)] > o[(d, 5)]\n assert o[(a, 5)] > o[(e, 6)]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_order_with_equal_dependents_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_order_with_equal_dependents_", "embedding": null, "metadata": {"file_path": "dask/tests/test_order.py", "file_name": "test_order.py", "file_type": "text/x-python", "category": "test", "start_line": 716, "end_line": 794, "span_ids": ["test_order_with_equal_dependents"], "tokens": 881}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_order_with_equal_dependents(abcde):\n \"\"\"From https://github.com/dask/dask/issues/5859#issuecomment-608422198\n\n See the visualization of `(maxima, argmax)` example from the above comment.\n\n This DAG has enough structure to exercise more parts of `order`\n\n \"\"\"\n a, b, c, d, e = abcde\n dsk = {}\n abc = [a, b, c, d]\n for x in abc:\n dsk.update(\n {\n (x, 0): 0,\n (x, 1): (f, (x, 0)),\n (x, 2, 0): (f, (x, 0)),\n (x, 2, 1): (f, (x, 1)),\n }\n )\n for i, y in enumerate(abc):\n dsk.update(\n {\n (x, 3, i): (f, (x, 2, 0), (y, 2, 1)), # cross x and y\n (x, 4, i): (f, (x, 3, i)),\n (x, 5, i, 0): (f, (x, 4, i)),\n (x, 5, i, 1): (f, (x, 4, i)),\n (x, 6, i, 0): (f, (x, 5, i, 0)),\n (x, 6, i, 1): (f, (x, 5, i, 1)),\n }\n )\n o = order(dsk)\n total = 0\n for x in abc:\n for i in range(len(abc)):\n val = o[(x, 6, i, 1)] - o[(x, 6, i, 0)]\n assert val > 0 # ideally, val == 2\n total += val\n assert total <= 32 # ideally, this should be 2 * 16 = 32\n\n # Add one to the end of the nine bundles\n dsk2 = dict(dsk)\n for x in abc:\n for i in range(len(abc)):\n dsk2[(x, 7, i, 0)] = (f, (x, 6, i, 0))\n o = order(dsk2)\n total = 0\n for x in abc:\n for i in range(len(abc)):\n val = o[(x, 7, i, 0)] - o[(x, 6, i, 1)]\n assert val > 0 # ideally, val == 3\n total += val\n assert total <= 165 # ideally, this should be 3 * 16 == 48\n\n # Remove one from each of the nine bundles\n dsk3 = dict(dsk)\n for x in abc:\n for i in range(len(abc)):\n del dsk3[(x, 6, i, 1)]\n o = order(dsk3)\n total = 0\n for x in abc:\n for i in range(len(abc)):\n val = o[(x, 6, i, 0)] - o[(x, 5, i, 1)]\n assert val > 0 # ideally, val == 2\n total += val\n assert total <= 119 # ideally, this should be 2 * 16 == 32\n\n # Remove another one from each of the nine bundles\n dsk4 = dict(dsk3)\n for x in abc:\n for i in range(len(abc)):\n del dsk4[(x, 6, i, 0)]\n o = order(dsk4)\n total = 0\n for x in abc:\n for i in range(len(abc)):\n assert o[(x, 5, i, 1)] - o[(x, 5, i, 0)] == 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_rewrite.py_from_dask_rewrite_import__test_args.assert_args_1_2_3_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_rewrite.py_from_dask_rewrite_import__test_args.assert_args_1_2_3_", "embedding": null, "metadata": {"file_path": "dask/tests/test_rewrite.py", "file_name": "test_rewrite.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 20, "span_ids": ["imports", "test_head", "test_args", "double"], "tokens": 176}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from dask.rewrite import RewriteRule, RuleSet, head, args, VAR, Traverser\nfrom dask.utils_test import inc, add\n\n\ndef double(x):\n return x * 2\n\n\ndef test_head():\n assert head((inc, 1)) == inc\n assert head((add, 1, 2)) == add\n assert head((add, (inc, 1), (inc, 1))) == add\n assert head([1, 2, 3]) == list\n\n\ndef test_args():\n assert args((inc, 1)) == (1,)\n assert args((add, 1, 2)) == (1, 2)\n assert args(1) == ()\n assert args([1, 2, 3]) == [1, 2, 3]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_rewrite.py_test_traverser_test_traverser.assert_list_t2_add_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_rewrite.py_test_traverser_test_traverser.assert_list_t2_add_", "embedding": null, "metadata": {"file_path": "dask/tests/test_rewrite.py", "file_name": "test_rewrite.py", "file_type": "text/x-python", "category": "test", "start_line": 23, "end_line": 36, "span_ids": ["test_traverser"], "tokens": 130}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_traverser():\n term = (add, (inc, 1), (double, (inc, 1), 2))\n t = Traverser(term)\n t2 = t.copy()\n assert t.current == add\n t.next()\n assert t.current == inc\n # Ensure copies aren't advanced when the original advances\n assert t2.current == add\n t.skip()\n assert t.current == double\n t.next()\n assert t.current == inc\n assert list(t2) == [add, inc, 1, double, inc, 1, 2]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_rewrite.py_vars_rule6.RewriteRule_list_x_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_rewrite.py_vars_rule6.RewriteRule_list_x_", "embedding": null, "metadata": {"file_path": "dask/tests/test_rewrite.py", "file_name": "test_rewrite.py", "file_type": "text/x-python", "category": "test", "start_line": 39, "end_line": 61, "span_ids": ["repl_list", "impl:13", "impl"], "tokens": 298}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "vars = (\"a\", \"b\", \"c\")\n# add(a, 1) -> inc(a)\nrule1 = RewriteRule((add, \"a\", 1), (inc, \"a\"), vars)\n# add(a, a) -> double(a)\nrule2 = RewriteRule((add, \"a\", \"a\"), (double, \"a\"), vars)\n# add(inc(a), inc(a)) -> add(double(a), 2)\nrule3 = RewriteRule((add, (inc, \"a\"), (inc, \"a\")), (add, (double, \"a\"), 2), vars)\n# add(inc(b), inc(a)) -> add(add(a, b), 2)\nrule4 = RewriteRule((add, (inc, \"b\"), (inc, \"a\")), (add, (add, \"a\", \"b\"), 2), vars)\n# sum([c, b, a]) -> add(add(a, b), c)\nrule5 = RewriteRule((sum, [\"c\", \"b\", \"a\"]), (add, (add, \"a\", \"b\"), \"c\"), vars)\n# list(x) -> x if x is a list\n\n\ndef repl_list(sd):\n x = sd[\"x\"]\n if isinstance(x, list):\n return x\n else:\n return (list, x)\n\n\nrule6 = RewriteRule((list, \"x\"), repl_list, (\"x\",))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_rewrite.py_test_RewriteRule_test_RewriteRule.assert_rule5__varlist_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_rewrite.py_test_RewriteRule_test_RewriteRule.assert_rule5__varlist_", "embedding": null, "metadata": {"file_path": "dask/tests/test_rewrite.py", "file_name": "test_rewrite.py", "file_type": "text/x-python", "category": "test", "start_line": 64, "end_line": 75, "span_ids": ["test_RewriteRule"], "tokens": 148}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_RewriteRule():\n # Test extraneous vars are removed, varlist is correct\n assert rule1.vars == (\"a\",)\n assert rule1._varlist == [\"a\"]\n assert rule2.vars == (\"a\",)\n assert rule2._varlist == [\"a\", \"a\"]\n assert rule3.vars == (\"a\",)\n assert rule3._varlist == [\"a\", \"a\"]\n assert rule4.vars == (\"a\", \"b\")\n assert rule4._varlist == [\"b\", \"a\"]\n assert rule5.vars == (\"a\", \"b\", \"c\")\n assert rule5._varlist == [\"c\", \"b\", \"a\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_rewrite.py_test_RewriteRuleSubs_test_RuleSet.assert_rs_rules_rules": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_rewrite.py_test_RewriteRuleSubs_test_RuleSet.assert_rs_rules_rules", "embedding": null, "metadata": {"file_path": "dask/tests/test_rewrite.py", "file_name": "test_rewrite.py", "file_type": "text/x-python", "category": "test", "start_line": 78, "end_line": 104, "span_ids": ["test_RuleSet", "impl:15", "test_RewriteRuleSubs"], "tokens": 227}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_RewriteRuleSubs():\n # Test both rhs substitution and callable rhs\n assert rule1.subs({\"a\": 1}) == (inc, 1)\n assert rule6.subs({\"x\": [1, 2, 3]}) == [1, 2, 3]\n\n\nrules = [rule1, rule2, rule3, rule4, rule5, rule6]\nrs = RuleSet(*rules)\n\n\ndef test_RuleSet():\n net = (\n {\n add: (\n {\n VAR: ({VAR: ({}, [1]), 1: ({}, [0])}, []),\n inc: ({VAR: ({inc: ({VAR: ({}, [2, 3])}, [])}, [])}, []),\n },\n [],\n ),\n list: ({VAR: ({}, [5])}, []),\n sum: ({list: ({VAR: ({VAR: ({VAR: ({}, [4])}, [])}, [])}, [])}, []),\n },\n [],\n )\n assert rs._net == net\n assert rs.rules == rules", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_rewrite.py_test_matches_test_matches.assert_len_matches_0": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_rewrite.py_test_matches_test_matches.assert_len_matches_0", "embedding": null, "metadata": {"file_path": "dask/tests/test_rewrite.py", "file_name": "test_rewrite.py", "file_type": "text/x-python", "category": "test", "start_line": 107, "end_line": 134, "span_ids": ["test_matches"], "tokens": 342}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_matches():\n term = (add, 2, 1)\n matches = list(rs.iter_matches(term))\n assert len(matches) == 1\n assert matches[0] == (rule1, {\"a\": 2})\n # Test matches specific before general\n term = (add, 1, 1)\n matches = list(rs.iter_matches(term))\n assert len(matches) == 2\n assert matches[0] == (rule1, {\"a\": 1})\n assert matches[1] == (rule2, {\"a\": 1})\n # Test matches unhashable. What it's getting rewritten to doesn't make\n # sense, this is just to test that it works. :)\n term = (add, [1], [1])\n matches = list(rs.iter_matches(term))\n assert len(matches) == 1\n assert matches[0] == (rule2, {\"a\": [1]})\n # Test match at depth\n term = (add, (inc, 1), (inc, 1))\n matches = list(rs.iter_matches(term))\n assert len(matches) == 3\n assert matches[0] == (rule3, {\"a\": 1})\n assert matches[1] == (rule4, {\"a\": 1, \"b\": 1})\n assert matches[2] == (rule2, {\"a\": (inc, 1)})\n # Test non-linear pattern checking\n term = (add, 2, 3)\n matches = list(rs.iter_matches(term))\n assert len(matches) == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_rewrite.py_test_rewrite_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_rewrite.py_test_rewrite_", "embedding": null, "metadata": {"file_path": "dask/tests/test_rewrite.py", "file_name": "test_rewrite.py", "file_type": "text/x-python", "category": "test", "start_line": 137, "end_line": 156, "span_ids": ["test_rewrite"], "tokens": 285}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_rewrite():\n # Rewrite inside list\n term = (sum, [(add, 1, 1), (add, 1, 1), (add, 1, 1)])\n new_term = rs.rewrite(term)\n assert new_term == (add, (add, (inc, 1), (inc, 1)), (inc, 1))\n # Rules aren't applied to exhaustion, this can be further simplified\n new_term = rs.rewrite(new_term)\n assert new_term == (add, (add, (double, 1), 2), (inc, 1))\n term = (\n add,\n (add, (add, (add, 1, 2), (add, 1, 2)), (add, (add, 1, 2), (add, 1, 2))),\n 1,\n )\n assert rs.rewrite(term) == (inc, (double, (double, (add, 1, 2))))\n # Callable RewriteRule rhs\n term = (list, [1, 2, 3])\n assert rs.rewrite(term) == [1, 2, 3]\n term = (list, (map, inc, [1, 2, 3]))\n assert rs.rewrite(term) == term", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_sizeof.py_sys_test_numpy_0_strided.assert_sizeof_x_8": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_sizeof.py_sys_test_numpy_0_strided.assert_sizeof_x_8", "embedding": null, "metadata": {"file_path": "dask/tests/test_sizeof.py", "file_name": "test_sizeof.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 39, "span_ids": ["test_bytes_like", "imports", "test_containers", "test_numpy_0_strided", "test_base", "test_name", "test_numpy"], "tokens": 273}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import sys\nfrom array import array\n\nimport pytest\n\nfrom dask.sizeof import sizeof, getsizeof\nfrom dask.utils import funcname\n\n\ndef test_base():\n assert sizeof(1) == getsizeof(1)\n\n\ndef test_name():\n assert funcname(sizeof) == \"sizeof\"\n\n\ndef test_containers():\n assert sizeof([1, 2, [3]]) > (getsizeof(3) * 3 + getsizeof([]))\n\n\ndef test_bytes_like():\n assert 1000 <= sizeof(bytes(1000)) <= 2000\n assert 1000 <= sizeof(bytearray(1000)) <= 2000\n assert 1000 <= sizeof(memoryview(bytes(1000))) <= 2000\n assert 8000 <= sizeof(array(\"d\", range(1000))) <= 9000\n\n\ndef test_numpy():\n np = pytest.importorskip(\"numpy\")\n assert 8000 <= sizeof(np.empty(1000, dtype=\"f8\")) <= 9000\n dt = np.dtype(\"f8\")\n assert sizeof(dt) == sys.getsizeof(dt)\n\n\ndef test_numpy_0_strided():\n np = pytest.importorskip(\"numpy\")\n x = np.broadcast_to(1, (100, 100, 100))\n assert sizeof(x) <= 8", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_sizeof.py_test_pandas_test_pandas.None_6": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_sizeof.py_test_pandas_test_pandas.None_6", "embedding": null, "metadata": {"file_path": "dask/tests/test_sizeof.py", "file_name": "test_sizeof.py", "file_type": "text/x-python", "category": "test", "start_line": 42, "end_line": 55, "span_ids": ["test_pandas"], "tokens": 151}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_pandas():\n pd = pytest.importorskip(\"pandas\")\n df = pd.DataFrame(\n {\"x\": [1, 2, 3], \"y\": [\"a\" * 100, \"b\" * 100, \"c\" * 100]}, index=[10, 20, 30]\n )\n\n assert sizeof(df) >= sizeof(df.x) + sizeof(df.y) - sizeof(df.index)\n assert sizeof(df.x) >= sizeof(df.index)\n assert sizeof(df.y) >= 100 * 3\n assert sizeof(df.index) >= 20\n\n assert isinstance(sizeof(df), int)\n assert isinstance(sizeof(df.x), int)\n assert isinstance(sizeof(df.index), int)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_sizeof.py_test_pandas_multiindex_test_pandas_repeated_column.assert_sizeof_df_x_x": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_sizeof.py_test_pandas_multiindex_test_pandas_repeated_column.assert_sizeof_df_x_x", "embedding": null, "metadata": {"file_path": "dask/tests/test_sizeof.py", "file_name": "test_sizeof.py", "file_type": "text/x-python", "category": "test", "start_line": 58, "end_line": 71, "span_ids": ["test_pandas_repeated_column", "test_pandas_multiindex"], "tokens": 151}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_pandas_multiindex():\n pd = pytest.importorskip(\"pandas\")\n index = pd.MultiIndex.from_product([range(5), [\"a\", \"b\", \"c\", \"d\", \"e\"]])\n actual_size = sys.getsizeof(index) + 1000 # adjust for serialization overhead\n\n assert 0.5 * actual_size < sizeof(index) < 2 * actual_size\n assert isinstance(sizeof(index), int)\n\n\ndef test_pandas_repeated_column():\n pd = pytest.importorskip(\"pandas\")\n df = pd.DataFrame({\"x\": [1, 2, 3]})\n\n assert sizeof(df[[\"x\", \"x\", \"x\"]]) > sizeof(df)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_sizeof.py_test_sparse_matrix_test_sparse_matrix.assert_sizeof_sp_tolil_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_sizeof.py_test_sparse_matrix_test_sparse_matrix.assert_sizeof_sp_tolil_", "embedding": null, "metadata": {"file_path": "dask/tests/test_sizeof.py", "file_name": "test_sizeof.py", "file_type": "text/x-python", "category": "test", "start_line": 74, "end_line": 84, "span_ids": ["test_sparse_matrix"], "tokens": 122}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_sparse_matrix():\n sparse = pytest.importorskip(\"scipy.sparse\")\n sp = sparse.eye(10)\n # These are the 32-bit Python 2.7 values.\n assert sizeof(sp.todia()) >= 152\n assert sizeof(sp.tobsr()) >= 232\n assert sizeof(sp.tocoo()) >= 240\n assert sizeof(sp.tocsc()) >= 232\n assert sizeof(sp.tocsr()) >= 232\n assert sizeof(sp.todok()) >= 192\n assert sizeof(sp.tolil()) >= 204", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_sizeof.py_test_serires_object_dtype_test_dataframe_object_dtype.assert_sizeof_s_100000": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_sizeof.py_test_serires_object_dtype_test_dataframe_object_dtype.assert_sizeof_s_100000", "embedding": null, "metadata": {"file_path": "dask/tests/test_sizeof.py", "file_name": "test_sizeof.py", "file_type": "text/x-python", "category": "test", "start_line": 87, "end_line": 102, "span_ids": ["test_dataframe_object_dtype", "test_serires_object_dtype"], "tokens": 175}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_serires_object_dtype():\n pd = pytest.importorskip(\"pandas\")\n s = pd.Series([\"a\"] * 1000)\n assert sizeof(\"a\") * 1000 < sizeof(s) < 2 * sizeof(\"a\") * 1000\n\n s = pd.Series([\"a\" * 1000] * 1000)\n assert sizeof(s) > 1000000\n\n\ndef test_dataframe_object_dtype():\n pd = pytest.importorskip(\"pandas\")\n df = pd.DataFrame({\"x\": [\"a\"] * 1000})\n assert sizeof(\"a\") * 1000 < sizeof(df) < 2 * sizeof(\"a\") * 1000\n\n s = pd.Series([\"a\" * 1000] * 1000)\n assert sizeof(s) > 1000000", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_sizeof.py_test_empty_test_empty.assert_sizeof_empty_index": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_sizeof.py_test_empty_test_empty.assert_sizeof_empty_index", "embedding": null, "metadata": {"file_path": "dask/tests/test_sizeof.py", "file_name": "test_sizeof.py", "file_type": "text/x-python", "category": "test", "start_line": 105, "end_line": 115, "span_ids": ["test_empty"], "tokens": 116}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_empty():\n pd = pytest.importorskip(\"pandas\")\n df = pd.DataFrame(\n {\"x\": [1, 2, 3], \"y\": [\"a\" * 100, \"b\" * 100, \"c\" * 100]}, index=[10, 20, 30]\n )\n empty = df.head(0)\n\n assert sizeof(empty) > 0\n assert sizeof(empty.x) > 0\n assert sizeof(empty.y) > 0\n assert sizeof(empty.index) > 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_sizeof.py_test_pyarrow_table_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_sizeof.py_test_pyarrow_table_", "embedding": null, "metadata": {"file_path": "dask/tests/test_sizeof.py", "file_name": "test_sizeof.py", "file_type": "text/x-python", "category": "test", "start_line": 118, "end_line": 150, "span_ids": ["test_dict", "test_pyarrow_table"], "tokens": 308}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_pyarrow_table():\n pd = pytest.importorskip(\"pandas\")\n pa = pytest.importorskip(\"pyarrow\")\n df = pd.DataFrame(\n {\"x\": [1, 2, 3], \"y\": [\"a\" * 100, \"b\" * 100, \"c\" * 100]}, index=[10, 20, 30]\n )\n table = pa.Table.from_pandas(df)\n\n assert sizeof(table) > sizeof(table.schema.metadata)\n assert isinstance(sizeof(table), int)\n assert isinstance(sizeof(table.columns[0]), int)\n assert isinstance(sizeof(table.columns[1]), int)\n assert isinstance(sizeof(table.columns[2]), int)\n\n empty = pa.Table.from_pandas(df.head(0))\n\n assert sizeof(empty) > sizeof(empty.schema.metadata)\n assert sizeof(empty.columns[0]) > 0\n assert sizeof(empty.columns[1]) > 0\n assert sizeof(empty.columns[2]) > 0\n\n\ndef test_dict():\n np = pytest.importorskip(\"numpy\")\n x = np.ones(10000)\n assert sizeof({\"x\": x}) > x.nbytes\n assert sizeof({\"x\": [x]}) > x.nbytes\n assert sizeof({\"x\": [{\"y\": x}]}) > x.nbytes\n\n d = {i: x for i in range(100)}\n assert sizeof(d) > x.nbytes * 100\n assert isinstance(sizeof(d), int)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_system.py_builtins_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_system.py_builtins_", "embedding": null, "metadata": {"file_path": "dask/tests/test_system.py", "file_name": "test_system.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 56, "span_ids": ["test_cpu_count", "imports", "test_cpu_count_cgroups"], "tokens": 334}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import builtins\nimport io\nimport os\nimport sys\n\nimport pytest\n\nfrom dask.system import cpu_count\n\npsutil = pytest.importorskip(\"psutil\")\n\n\ndef test_cpu_count():\n count = cpu_count()\n assert isinstance(count, int)\n assert count <= os.cpu_count()\n assert count >= 1\n\n\n@pytest.mark.parametrize(\"dirname\", [\"cpuacct,cpu\", \"cpu,cpuacct\", None])\ndef test_cpu_count_cgroups(dirname, monkeypatch):\n def mycpu_count():\n # Absurdly high, unlikely to match real value\n return 250\n\n monkeypatch.setattr(os, \"cpu_count\", mycpu_count)\n\n class MyProcess(object):\n def cpu_affinity(self):\n # No affinity set\n return []\n\n monkeypatch.setattr(psutil, \"Process\", MyProcess)\n\n if dirname:\n paths = {\n \"/sys/fs/cgroup/%s/cpu.cfs_quota_us\" % dirname: io.StringIO(\"2005\"),\n \"/sys/fs/cgroup/%s/cpu.cfs_period_us\" % dirname: io.StringIO(\"10\"),\n }\n builtin_open = builtins.open\n\n def myopen(path, *args, **kwargs):\n if path in paths:\n return paths.get(path)\n return builtin_open(path, *args, **kwargs)\n\n monkeypatch.setattr(builtins, \"open\", myopen)\n monkeypatch.setattr(sys, \"platform\", \"linux\")\n\n count = cpu_count()\n if dirname:\n # Rounds up\n assert count == 201\n else:\n assert count == 250", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_threaded.py_os_test_pool_kwarg.with_ThreadPool_3_as_poo.assert_get_dsk_x_pool": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_threaded.py_os_test_pool_kwarg.with_ThreadPool_3_as_poo.assert_get_dsk_x_pool", "embedding": null, "metadata": {"file_path": "dask/tests/test_threaded.py", "file_name": "test_threaded.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 74, "span_ids": ["test_reuse_pool", "imports", "test_get", "test_pool_kwarg", "test_exceptions_rise_to_top", "bad", "test_get_without_computation", "test_broken_callback", "test_nested_get"], "tokens": 535}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import os\nimport sys\nimport signal\nimport threading\nfrom multiprocessing.pool import ThreadPool\nfrom time import time, sleep\n\nimport pytest\n\nimport dask\nfrom dask.system import CPU_COUNT\nfrom dask.threaded import get\nfrom dask.utils_test import inc, add\n\n\ndef test_get():\n dsk = {\"x\": 1, \"y\": 2, \"z\": (inc, \"x\"), \"w\": (add, \"z\", \"y\")}\n assert get(dsk, \"w\") == 4\n assert get(dsk, [\"w\", \"z\"]) == (4, 2)\n\n\ndef test_nested_get():\n dsk = {\"x\": 1, \"y\": 2, \"a\": (add, \"x\", \"y\"), \"b\": (sum, [\"x\", \"y\"])}\n assert get(dsk, [\"a\", \"b\"]) == (3, 3)\n\n\ndef test_get_without_computation():\n dsk = {\"x\": 1}\n assert get(dsk, \"x\") == 1\n\n\ndef test_broken_callback():\n from dask.callbacks import Callback\n\n def _f_ok(*args, **kwargs):\n pass\n\n def _f_broken(*args, **kwargs):\n raise ValueError(\"my_exception\")\n\n dsk = {\"x\": 1}\n\n with Callback(start=_f_broken, finish=_f_ok):\n with Callback(start=_f_ok, finish=_f_ok):\n with pytest.raises(ValueError, match=\"my_exception\"):\n get(dsk, \"x\")\n\n\ndef bad(x):\n raise ValueError()\n\n\ndef test_exceptions_rise_to_top():\n dsk = {\"x\": 1, \"y\": (bad, \"x\")}\n pytest.raises(ValueError, lambda: get(dsk, \"y\"))\n\n\ndef test_reuse_pool():\n with ThreadPool() as pool:\n with dask.config.set(pool=pool):\n assert get({\"x\": (inc, 1)}, \"x\") == 2\n assert get({\"x\": (inc, 1)}, \"x\") == 2\n\n\ndef test_pool_kwarg():\n def f():\n sleep(0.01)\n return threading.get_ident()\n\n dsk = {(\"x\", i): (f,) for i in range(30)}\n dsk[\"x\"] = (len, (set, [(\"x\", i) for i in range(len(dsk))]))\n\n with ThreadPool(3) as pool:\n assert get(dsk, \"x\", pool=pool) == 3", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_threaded.py_test_threaded_within_thread_test_threaded_within_thread.while_threading_active_co.assert_time_start_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_threaded.py_test_threaded_within_thread_test_threaded_within_thread.while_threading_active_co.assert_time_start_5", "embedding": null, "metadata": {"file_path": "dask/tests/test_threaded.py", "file_name": "test_threaded.py", "file_type": "text/x-python", "category": "test", "start_line": 77, "end_line": 97, "span_ids": ["test_threaded_within_thread"], "tokens": 138}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_threaded_within_thread():\n L = []\n\n def f(i):\n result = get({\"x\": (lambda: i,)}, \"x\", num_workers=2)\n L.append(result)\n\n before = threading.active_count()\n\n for i in range(20):\n t = threading.Thread(target=f, args=(1,))\n t.daemon = True\n t.start()\n t.join()\n assert L == [1]\n del L[:]\n\n start = time() # wait for most threads to join\n while threading.active_count() > before + 10:\n sleep(0.01)\n assert time() < start + 5", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_threaded.py_test_dont_spawn_too_many_threads_test_thread_safety.assert_L_1_20": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_threaded.py_test_dont_spawn_too_many_threads_test_thread_safety.assert_L_1_20", "embedding": null, "metadata": {"file_path": "dask/tests/test_threaded.py", "file_name": "test_threaded.py", "file_type": "text/x-python", "category": "test", "start_line": 100, "end_line": 147, "span_ids": ["test_dont_spawn_too_many_threads_CPU_COUNT", "test_thread_safety", "test_dont_spawn_too_many_threads"], "tokens": 297}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_dont_spawn_too_many_threads():\n before = threading.active_count()\n\n dsk = {(\"x\", i): (lambda: i,) for i in range(10)}\n dsk[\"x\"] = (sum, list(dsk))\n for i in range(20):\n get(dsk, \"x\", num_workers=4)\n\n after = threading.active_count()\n\n assert after <= before + 8\n\n\ndef test_dont_spawn_too_many_threads_CPU_COUNT():\n before = threading.active_count()\n\n dsk = {(\"x\", i): (lambda: i,) for i in range(10)}\n dsk[\"x\"] = (sum, list(dsk))\n for i in range(20):\n get(dsk, \"x\")\n\n after = threading.active_count()\n\n assert after <= before + CPU_COUNT * 2\n\n\ndef test_thread_safety():\n def f(x):\n return 1\n\n dsk = {\"x\": (sleep, 0.05), \"y\": (f, \"x\")}\n\n L = []\n\n def test_f():\n L.append(get(dsk, \"y\"))\n\n threads = []\n for i in range(20):\n t = threading.Thread(target=test_f)\n t.daemon = True\n t.start()\n threads.append(t)\n\n for thread in threads:\n thread.join()\n\n assert L == [1] * 20", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_threaded.py_test_interrupt_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_threaded.py_test_interrupt_", "embedding": null, "metadata": {"file_path": "dask/tests/test_threaded.py", "file_name": "test_threaded.py", "file_type": "text/x-python", "category": "test", "start_line": 150, "end_line": 184, "span_ids": ["test_interrupt"], "tokens": 252}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.xfail(\n \"xdist\" in sys.modules,\n reason=\"This test fails intermittently when using pytest-xdist (maybe)\",\n strict=False,\n)\ndef test_interrupt():\n # Windows implements `queue.get` using polling,\n # which means we can set an exception to interrupt the call to `get`.\n # Python 3 on other platforms requires sending SIGINT to the main thread.\n if os.name == \"nt\":\n from _thread import interrupt_main\n else:\n main_thread = threading.get_ident()\n\n def interrupt_main():\n signal.pthread_kill(main_thread, signal.SIGINT)\n\n def long_task():\n sleep(5)\n\n dsk = {(\"x\", i): (long_task,) for i in range(20)}\n dsk[\"x\"] = (len, list(dsk.keys()))\n try:\n interrupter = threading.Timer(0.5, interrupt_main)\n interrupter.start()\n start = time()\n get(dsk, \"x\")\n except KeyboardInterrupt:\n pass\n except Exception:\n assert False, \"Failed to interrupt\"\n stop = time()\n if stop - start > 4:\n assert False, \"Failed to interrupt\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_datetime_from_dask_highlevelgraph_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_datetime_from_dask_highlevelgraph_", "embedding": null, "metadata": {"file_path": "dask/tests/test_utils.py", "file_name": "test_utils.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 34, "span_ids": ["imports"], "tokens": 141}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import datetime\nimport functools\nimport operator\nimport pickle\n\nimport numpy as np\nimport pytest\n\nfrom dask.utils import (\n getargspec,\n takes_multiple_arguments,\n Dispatch,\n random_state_data,\n memory_repr,\n methodcaller,\n M,\n skip_doctest,\n SerializableLock,\n funcname,\n ndeepmap,\n ensure_dict,\n extra_titles,\n asciitable,\n itemgetter,\n partial_by_order,\n has_keyword,\n derived_from,\n parse_timedelta,\n parse_bytes,\n is_arraylike,\n iter_chunks,\n)\nfrom dask.utils_test import inc\nfrom dask.highlevelgraph import HighLevelGraph", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_getargspec_test_getargspec.assert_getargspec_MyType_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_getargspec_test_getargspec.assert_getargspec_MyType_", "embedding": null, "metadata": {"file_path": "dask/tests/test_utils.py", "file_name": "test_utils.py", "file_type": "text/x-python", "category": "test", "start_line": 37, "end_line": 57, "span_ids": ["test_getargspec"], "tokens": 147}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_getargspec():\n def func(x, y):\n pass\n\n assert getargspec(func).args == [\"x\", \"y\"]\n\n func2 = functools.partial(func, 2)\n # this is a bit of a lie, but maybe close enough\n assert getargspec(func2).args == [\"x\", \"y\"]\n\n def wrapper(*args, **kwargs):\n pass\n\n wrapper.__wrapped__ = func\n assert getargspec(wrapper).args == [\"x\", \"y\"]\n\n class MyType(object):\n def __init__(self, x, y):\n pass\n\n assert getargspec(MyType).args == [\"self\", \"x\", \"y\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_takes_multiple_arguments_test_takes_multiple_arguments.None_7": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_takes_multiple_arguments_test_takes_multiple_arguments.None_7", "embedding": null, "metadata": {"file_path": "dask/tests/test_utils.py", "file_name": "test_utils.py", "file_type": "text/x-python", "category": "test", "start_line": 60, "end_line": 88, "span_ids": ["test_takes_multiple_arguments"], "tokens": 146}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_takes_multiple_arguments():\n assert takes_multiple_arguments(map)\n assert not takes_multiple_arguments(sum)\n\n def multi(a, b, c):\n return a, b, c\n\n class Singular(object):\n def __init__(self, a):\n pass\n\n class Multi(object):\n def __init__(self, a, b):\n pass\n\n assert takes_multiple_arguments(multi)\n assert not takes_multiple_arguments(Singular)\n assert takes_multiple_arguments(Multi)\n\n def f():\n pass\n\n assert not takes_multiple_arguments(f)\n\n def vararg(*args):\n pass\n\n assert takes_multiple_arguments(vararg)\n assert not takes_multiple_arguments(vararg, varargs=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_dispatch_test_dispatch.assert_foo___doc___f__": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_dispatch_test_dispatch.assert_foo___doc___f__", "embedding": null, "metadata": {"file_path": "dask/tests/test_utils.py", "file_name": "test_utils.py", "file_type": "text/x-python", "category": "test", "start_line": 91, "end_line": 113, "span_ids": ["test_dispatch"], "tokens": 166}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_dispatch():\n foo = Dispatch()\n foo.register(int, lambda a: a + 1)\n foo.register(float, lambda a: a - 1)\n foo.register(tuple, lambda a: tuple(foo(i) for i in a))\n\n def f(a):\n \"\"\" My Docstring \"\"\"\n return a\n\n foo.register(object, f)\n\n class Bar(object):\n pass\n\n b = Bar()\n assert foo(1) == 2\n assert foo.dispatch(int)(1) == 2\n assert foo(1.0) == 0.0\n assert foo(b) == b\n assert foo((1, 2.0, b)) == (2, 1.0, b)\n\n assert foo.__doc__ == f.__doc__", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_dispatch_kwargs_test_dispatch_variadic_on_first_argument.assert_foo_1_0_2_0_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_dispatch_kwargs_test_dispatch_variadic_on_first_argument.assert_foo_1_0_2_0_", "embedding": null, "metadata": {"file_path": "dask/tests/test_utils.py", "file_name": "test_utils.py", "file_type": "text/x-python", "category": "test", "start_line": 116, "end_line": 129, "span_ids": ["test_dispatch_kwargs", "test_dispatch_variadic_on_first_argument"], "tokens": 112}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_dispatch_kwargs():\n foo = Dispatch()\n foo.register(int, lambda a, b=10: a + b)\n\n assert foo(1, b=20) == 21\n\n\ndef test_dispatch_variadic_on_first_argument():\n foo = Dispatch()\n foo.register(int, lambda a, b: a + b)\n foo.register(float, lambda a, b: a - b)\n\n assert foo(1, 2) == 3\n assert foo(1.0, 2.0) == -1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_dispatch_lazy_test_dispatch_lazy.assert_foo_1_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_dispatch_lazy_test_dispatch_lazy.assert_foo_1_1", "embedding": null, "metadata": {"file_path": "dask/tests/test_utils.py", "file_name": "test_utils.py", "file_type": "text/x-python", "category": "test", "start_line": 132, "end_line": 152, "span_ids": ["test_dispatch_lazy"], "tokens": 131}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_dispatch_lazy():\n # this tests the recursive component of dispatch\n foo = Dispatch()\n foo.register(int, lambda a: a)\n\n import decimal\n\n # keep it outside lazy dec for test\n def foo_dec(a):\n return a + 1\n\n @foo.register_lazy(\"decimal\")\n def register_decimal():\n import decimal\n\n foo.register(decimal.Decimal, foo_dec)\n\n # This test needs to be *before* any other calls\n assert foo.dispatch(decimal.Decimal) == foo_dec\n assert foo(decimal.Decimal(1)) == decimal.Decimal(2)\n assert foo(1) == 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_random_state_data_test_random_state_data.None_1.assert_s1_s2_all_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_random_state_data_test_random_state_data.None_1.assert_s1_s2_all_", "embedding": null, "metadata": {"file_path": "dask/tests/test_utils.py", "file_name": "test_utils.py", "file_type": "text/x-python", "category": "test", "start_line": 155, "end_line": 175, "span_ids": ["test_random_state_data"], "tokens": 164}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_random_state_data():\n seed = 37\n state = np.random.RandomState(seed)\n n = 10000\n\n # Use an integer\n states = random_state_data(n, seed)\n assert len(states) == n\n\n # Use RandomState object\n states2 = random_state_data(n, state)\n for s1, s2 in zip(states, states2):\n assert s1.shape == (624,)\n assert (s1 == s2).all()\n\n # Consistent ordering\n states = random_state_data(10, 1234)\n states2 = random_state_data(20, 1234)[:10]\n\n for s1, s2 in zip(states, states2):\n assert (s1 == s2).all()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_memory_repr_test_method_caller.assert_count_in_repr_me": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_memory_repr_test_method_caller.assert_count_in_repr_me", "embedding": null, "metadata": {"file_path": "dask/tests/test_utils.py", "file_name": "test_utils.py", "file_type": "text/x-python", "category": "test", "start_line": 178, "end_line": 193, "span_ids": ["test_memory_repr", "test_method_caller"], "tokens": 160}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_memory_repr():\n for power, mem_repr in enumerate([\"1.0 bytes\", \"1.0 KB\", \"1.0 MB\", \"1.0 GB\"]):\n assert memory_repr(1024 ** power) == mem_repr\n\n\ndef test_method_caller():\n a = [1, 2, 3, 3, 3]\n f = methodcaller(\"count\")\n assert f(a, 3) == a.count(3)\n assert methodcaller(\"count\") is f\n assert M.count is f\n assert pickle.loads(pickle.dumps(f)) is f\n assert \"count\" in dir(M)\n\n assert \"count\" in str(methodcaller(\"count\"))\n assert \"count\" in repr(methodcaller(\"count\"))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_skip_doctest_test_skip_doctest.assert_res_expected": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_skip_doctest_test_skip_doctest.assert_res_expected", "embedding": null, "metadata": {"file_path": "dask/tests/test_utils.py", "file_name": "test_utils.py", "file_type": "text/x-python", "category": "test", "start_line": 196, "end_line": 221, "span_ids": ["test_skip_doctest"], "tokens": 138}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_skip_doctest():\n example = \"\"\">>> xxx\n>>>\n>>> # comment\n>>> xxx\"\"\"\n\n res = skip_doctest(example)\n assert (\n res\n == \"\"\">>> xxx # doctest: +SKIP\n>>>\n>>> # comment\n>>> xxx # doctest: +SKIP\"\"\"\n )\n\n assert skip_doctest(None) == \"\"\n\n example = \"\"\"\n>>> 1 + 2 # doctest: +ELLIPSES\n3\"\"\"\n\n expected = \"\"\"\n>>> 1 + 2 # doctest: +ELLIPSES, +SKIP\n3\"\"\"\n res = skip_doctest(example)\n assert res == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_extra_titles_test_asciitable.assert_res_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_extra_titles_test_asciitable.assert_res_", "embedding": null, "metadata": {"file_path": "dask/tests/test_utils.py", "file_name": "test_utils.py", "file_type": "text/x-python", "category": "test", "start_line": 224, "end_line": 270, "span_ids": ["test_extra_titles", "test_asciitable"], "tokens": 198}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_extra_titles():\n example = \"\"\"\n\n Notes\n -----\n hello\n\n Foo\n ---\n\n Notes\n -----\n bar\n \"\"\"\n\n expected = \"\"\"\n\n Notes\n -----\n hello\n\n Foo\n ---\n\n Extra Notes\n -----------\n bar\n \"\"\"\n\n assert extra_titles(example) == expected\n\n\ndef test_asciitable():\n res = asciitable(\n [\"fruit\", \"color\"],\n [(\"apple\", \"red\"), (\"banana\", \"yellow\"), (\"tomato\", \"red\"), (\"pear\", \"green\")],\n )\n assert res == (\n \"+--------+--------+\\n\"\n \"| fruit | color |\\n\"\n \"+--------+--------+\\n\"\n \"| apple | red |\\n\"\n \"| banana | yellow |\\n\"\n \"| tomato | red |\\n\"\n \"| pear | green |\\n\"\n \"+--------+--------+\"\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_SerializableLock_test_SerializableLock.None_4.for_y_in_b_b2_b3_.with_y_.with_x_.pass": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_SerializableLock_test_SerializableLock.None_4.for_y_in_b_b2_b3_.with_y_.with_x_.pass", "embedding": null, "metadata": {"file_path": "dask/tests/test_utils.py", "file_name": "test_utils.py", "file_type": "text/x-python", "category": "test", "start_line": 273, "end_line": 305, "span_ids": ["test_SerializableLock"], "tokens": 198}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_SerializableLock():\n a = SerializableLock()\n b = SerializableLock()\n with a:\n pass\n\n with a:\n with b:\n pass\n\n with a:\n assert not a.acquire(False)\n\n a2 = pickle.loads(pickle.dumps(a))\n a3 = pickle.loads(pickle.dumps(a))\n a4 = pickle.loads(pickle.dumps(a2))\n\n for x in [a, a2, a3, a4]:\n for y in [a, a2, a3, a4]:\n with x:\n assert not y.acquire(False)\n\n b2 = pickle.loads(pickle.dumps(b))\n b3 = pickle.loads(pickle.dumps(b2))\n\n for x in [a, a2, a3, a4]:\n for y in [b, b2, b3]:\n with x:\n with y:\n pass\n with y:\n with x:\n pass", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_SerializableLock_name_collision_test_funcname_numpy_vectorize.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_SerializableLock_name_collision_test_funcname_numpy_vectorize.None_1", "embedding": null, "metadata": {"file_path": "dask/tests/test_utils.py", "file_name": "test_utils.py", "file_type": "text/x-python", "category": "test", "start_line": 308, "end_line": 393, "span_ids": ["test_funcname_toolz", "test_funcname_long", "test_SerializableLock_locked", "test_funcname_numpy_vectorize", "test_SerializableLock_acquire_blocking", "test_funcname_multipledispatch", "test_SerializableLock_name_collision", "test_funcname"], "tokens": 569}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_SerializableLock_name_collision():\n a = SerializableLock(\"a\")\n b = SerializableLock(\"b\")\n c = SerializableLock(\"a\")\n d = SerializableLock()\n\n assert a.lock is not b.lock\n assert a.lock is c.lock\n assert d.lock not in (a.lock, b.lock, c.lock)\n\n\ndef test_SerializableLock_locked():\n a = SerializableLock(\"a\")\n assert not a.locked()\n with a:\n assert a.locked()\n assert not a.locked()\n\n\ndef test_SerializableLock_acquire_blocking():\n a = SerializableLock(\"a\")\n assert a.acquire(blocking=True)\n assert not a.acquire(blocking=False)\n a.release()\n\n\ndef test_funcname():\n def foo(a, b, c):\n pass\n\n assert funcname(foo) == \"foo\"\n assert funcname(functools.partial(foo, a=1)) == \"foo\"\n assert funcname(M.sum) == \"sum\"\n assert funcname(lambda: 1) == \"lambda\"\n\n class Foo(object):\n pass\n\n assert funcname(Foo) == \"Foo\"\n assert \"Foo\" in funcname(Foo())\n\n\ndef test_funcname_long():\n def a_long_function_name_11111111111111111111111111111111111111111111111():\n pass\n\n result = funcname(\n a_long_function_name_11111111111111111111111111111111111111111111111\n )\n assert \"a_long_function_name\" in result\n assert len(result) < 60\n\n\ndef test_funcname_toolz():\n toolz = pytest.importorskip(\"tlz\")\n\n @toolz.curry\n def foo(a, b, c):\n pass\n\n assert funcname(foo) == \"foo\"\n assert funcname(foo(1)) == \"foo\"\n\n\ndef test_funcname_multipledispatch():\n md = pytest.importorskip(\"multipledispatch\")\n\n @md.dispatch(int, int, int)\n def foo(a, b, c):\n pass\n\n assert funcname(foo) == \"foo\"\n assert funcname(functools.partial(foo, a=1)) == \"foo\"\n\n\ndef test_funcname_numpy_vectorize():\n np = pytest.importorskip(\"numpy\")\n\n vfunc = np.vectorize(int)\n assert funcname(vfunc) == \"vectorize_int\"\n\n # Regression test for https://github.com/pydata/xarray/issues/3303\n # Partial functions don't have a __name__ attribute\n func = functools.partial(np.add, out=None)\n vfunc = np.vectorize(func)\n assert funcname(vfunc) == \"vectorize_add\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_ndeepmap_test_ndeepmap.assert_ndeepmap_3_inc_L": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_ndeepmap_test_ndeepmap.assert_ndeepmap_3_inc_L", "embedding": null, "metadata": {"file_path": "dask/tests/test_utils.py", "file_name": "test_utils.py", "file_type": "text/x-python", "category": "test", "start_line": 396, "end_line": 410, "span_ids": ["test_ndeepmap"], "tokens": 186}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_ndeepmap():\n L = 1\n assert ndeepmap(0, inc, L) == 2\n\n L = [1]\n assert ndeepmap(0, inc, L) == 2\n\n L = [1, 2, 3]\n assert ndeepmap(1, inc, L) == [2, 3, 4]\n\n L = [[1, 2], [3, 4]]\n assert ndeepmap(2, inc, L) == [[2, 3], [4, 5]]\n\n L = [[[1, 2], [3, 4, 5]], [[6], []]]\n assert ndeepmap(3, inc, L) == [[[2, 3], [4, 5, 6]], [[7], []]]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_ensure_dict_test_has_keyword.None_4": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_ensure_dict_test_has_keyword.None_4", "embedding": null, "metadata": {"file_path": "dask/tests/test_utils.py", "file_name": "test_utils.py", "file_type": "text/x-python", "category": "test", "start_line": 413, "end_line": 452, "span_ids": ["test_ensure_dict", "test_has_keyword", "test_itemgetter", "test_partial_by_order"], "tokens": 273}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_ensure_dict():\n d = {\"x\": 1}\n assert ensure_dict(d) is d\n hlg = HighLevelGraph.from_collections(\"x\", d)\n assert type(ensure_dict(hlg)) is dict\n assert ensure_dict(hlg) == d\n\n class mydict(dict):\n pass\n\n md = mydict()\n md[\"x\"] = 1\n assert type(ensure_dict(md)) is dict\n assert ensure_dict(md) == d\n\n\ndef test_itemgetter():\n data = [1, 2, 3]\n g = itemgetter(1)\n assert g(data) == 2\n g2 = pickle.loads(pickle.dumps(g))\n assert g2(data) == 2\n assert g2.index == 1\n\n\ndef test_partial_by_order():\n assert partial_by_order(5, function=operator.add, other=[(1, 20)]) == 25\n\n\ndef test_has_keyword():\n def foo(a, b, c=None):\n pass\n\n assert has_keyword(foo, \"a\")\n assert has_keyword(foo, \"b\")\n assert has_keyword(foo, \"c\")\n\n bar = functools.partial(foo, a=1)\n assert has_keyword(bar, \"b\")\n assert has_keyword(bar, \"c\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_derived_from_test_derived_from.assert_extra_docstring": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_derived_from_test_derived_from.assert_extra_docstring", "embedding": null, "metadata": {"file_path": "dask/tests/test_utils.py", "file_name": "test_utils.py", "file_type": "text/x-python", "category": "test", "start_line": 455, "end_line": 489, "span_ids": ["test_derived_from"], "tokens": 224}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_derived_from():\n class Foo:\n def f(a, b):\n \"\"\"A super docstring\n\n An explanation\n\n Parameters\n ----------\n a: int\n an explanation of a\n b: float\n an explanation of b\n \"\"\"\n\n class Bar:\n @derived_from(Foo)\n def f(a, c):\n pass\n\n class Zap:\n @derived_from(Foo)\n def f(a, c):\n \"extra docstring\"\n pass\n\n assert Bar.f.__doc__.strip().startswith(\"A super docstring\")\n assert \"Foo.f\" in Bar.f.__doc__\n assert any(\"inconsistencies\" in line for line in Bar.f.__doc__.split(\"\\n\")[:7])\n\n [b_arg] = [line for line in Bar.f.__doc__.split(\"\\n\") if \"b:\" in line]\n assert \"not supported\" in b_arg.lower()\n assert \"dask\" in b_arg.lower()\n\n assert \" extra docstring\\n\\n\" in Zap.f.__doc__", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_derived_from_func_test_derived_from_dask_dataframe.assert_dask_in_axis_arg": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_derived_from_func_test_derived_from_dask_dataframe.assert_dask_in_axis_arg", "embedding": null, "metadata": {"file_path": "dask/tests/test_utils.py", "file_name": "test_utils.py", "file_type": "text/x-python", "category": "test", "start_line": 492, "end_line": 514, "span_ids": ["test_derived_from_func", "test_derived_from_dask_dataframe"], "tokens": 171}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_derived_from_func():\n import builtins\n\n @derived_from(builtins)\n def sum():\n \"extra docstring\"\n pass\n\n assert \"extra docstring\\n\\n\" in sum.__doc__\n assert \"Return the sum of\" in sum.__doc__\n assert \"This docstring was copied from builtins.sum\" in sum.__doc__\n\n\ndef test_derived_from_dask_dataframe():\n dd = pytest.importorskip(\"dask.dataframe\")\n\n assert \"inconsistencies\" in dd.DataFrame.dropna.__doc__\n\n [axis_arg] = [\n line for line in dd.DataFrame.dropna.__doc__.split(\"\\n\") if \"axis :\" in line\n ]\n assert \"not supported\" in axis_arg.lower()\n assert \"dask\" in axis_arg.lower()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_parse_bytes_test_parse_bytes.assert_parse_bytes_5GB_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_parse_bytes_test_parse_bytes.assert_parse_bytes_5GB_", "embedding": null, "metadata": {"file_path": "dask/tests/test_utils.py", "file_name": "test_utils.py", "file_type": "text/x-python", "category": "test", "start_line": 517, "end_line": 529, "span_ids": ["test_parse_bytes"], "tokens": 170}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_parse_bytes():\n assert parse_bytes(\"100\") == 100\n assert parse_bytes(\"100 MB\") == 100000000\n assert parse_bytes(\"100M\") == 100000000\n assert parse_bytes(\"5kB\") == 5000\n assert parse_bytes(\"5.4 kB\") == 5400\n assert parse_bytes(\"1kiB\") == 1024\n assert parse_bytes(\"1Mi\") == 2 ** 20\n assert parse_bytes(\"1e6\") == 1000000\n assert parse_bytes(\"1e6 kB\") == 1000000000\n assert parse_bytes(\"MB\") == 1000000\n assert parse_bytes(123) == 123\n assert parse_bytes(\".5GB\") == 500000000", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_parse_timedelta_test_parse_timedelta.assert_parse_timedelta_1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_parse_timedelta_test_parse_timedelta.assert_parse_timedelta_1_", "embedding": null, "metadata": {"file_path": "dask/tests/test_utils.py", "file_name": "test_utils.py", "file_type": "text/x-python", "category": "test", "start_line": 532, "end_line": 557, "span_ids": ["test_parse_timedelta"], "tokens": 271}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_parse_timedelta():\n for text, value in [\n (\"1s\", 1),\n (\"100ms\", 0.1),\n (\"5S\", 5),\n (\"5.5s\", 5.5),\n (\"5.5 s\", 5.5),\n (\"1 second\", 1),\n (\"3.3 seconds\", 3.3),\n (\"3.3 milliseconds\", 0.0033),\n (\"3500 us\", 0.0035),\n (\"1 ns\", 1e-9),\n (\"2m\", 120),\n (\"2 minutes\", 120),\n (None, None),\n (3, 3),\n (datetime.timedelta(seconds=2), 2),\n (datetime.timedelta(milliseconds=100), 0.1),\n ]:\n result = parse_timedelta(text)\n assert result == value or abs(result - value) < 1e-14\n\n assert parse_timedelta(\"1ms\", default=\"seconds\") == 0.001\n assert parse_timedelta(\"1\", default=\"seconds\") == 1\n assert parse_timedelta(\"1\", default=\"ms\") == 0.001\n assert parse_timedelta(1, default=\"ms\") == 0.001", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_is_arraylike_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_is_arraylike_", "embedding": null, "metadata": {"file_path": "dask/tests/test_utils.py", "file_name": "test_utils.py", "file_type": "text/x-python", "category": "test", "start_line": 560, "end_line": 585, "span_ids": ["test_iter_chunks", "test_is_arraylike"], "tokens": 275}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_is_arraylike():\n assert is_arraylike(0) is False\n assert is_arraylike(()) is False\n assert is_arraylike(0) is False\n assert is_arraylike([]) is False\n assert is_arraylike([0]) is False\n\n assert is_arraylike(np.empty(())) is True\n assert is_arraylike(np.empty((0,))) is True\n assert is_arraylike(np.empty((0, 0))) is True\n\n\ndef test_iter_chunks():\n sizes = [14, 8, 5, 9, 7, 9, 1, 19, 8, 19]\n assert list(iter_chunks(sizes, 19)) == [\n [14],\n [8, 5],\n [9, 7],\n [9, 1],\n [19],\n [8],\n [19],\n ]\n assert list(iter_chunks(sizes, 28)) == [[14, 8, 5], [9, 7, 9, 1], [19, 8], [19]]\n assert list(iter_chunks(sizes, 67)) == [[14, 8, 5, 9, 7, 9, 1], [19, 8, 19]]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/threaded.py___pack_exception.return.e_sys_exc_info_2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/threaded.py___pack_exception.return.e_sys_exc_info_2_", "embedding": null, "metadata": {"file_path": "dask/threaded.py", "file_name": "threaded.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 30, "span_ids": ["pack_exception", "_thread_get_id", "impl", "docstring"], "tokens": 128}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\"\nA threaded shared-memory scheduler\n\nSee local.py\n\"\"\"\nimport atexit\nimport sys\nfrom collections import defaultdict\nfrom multiprocessing.pool import ThreadPool\nimport threading\nfrom threading import current_thread, Lock\n\nfrom . import config\nfrom .system import CPU_COUNT\nfrom .local import get_async\nfrom .utils_test import inc, add # noqa: F401\n\n\ndef _thread_get_id():\n return current_thread().ident\n\n\nmain_thread = current_thread()\ndefault_pool = None\npools = defaultdict(dict)\npools_lock = Lock()\n\n\ndef pack_exception(e, dumps):\n return e, sys.exc_info()[2]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/threaded.py_get_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/threaded.py_get_", "embedding": null, "metadata": {"file_path": "dask/threaded.py", "file_name": "threaded.py", "file_type": "text/x-python", "category": "implementation", "start_line": 33, "end_line": 97, "span_ids": ["get"], "tokens": 439}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def get(dsk, result, cache=None, num_workers=None, pool=None, **kwargs):\n \"\"\"Threaded cached implementation of dask.get\n\n Parameters\n ----------\n\n dsk: dict\n A dask dictionary specifying a workflow\n result: key or list of keys\n Keys corresponding to desired data\n num_workers: integer of thread count\n The number of threads to use in the ThreadPool that will actually execute tasks\n cache: dict-like (optional)\n Temporary storage of results\n\n Examples\n --------\n\n >>> dsk = {'x': 1, 'y': 2, 'z': (inc, 'x'), 'w': (add, 'z', 'y')}\n >>> get(dsk, 'w')\n 4\n >>> get(dsk, ['w', 'y'])\n (4, 2)\n \"\"\"\n global default_pool\n pool = pool or config.get(\"pool\", None)\n num_workers = num_workers or config.get(\"num_workers\", None)\n thread = current_thread()\n\n with pools_lock:\n if pool is None:\n if num_workers is None and thread is main_thread:\n if default_pool is None:\n default_pool = ThreadPool(CPU_COUNT)\n atexit.register(default_pool.close)\n pool = default_pool\n elif thread in pools and num_workers in pools[thread]:\n pool = pools[thread][num_workers]\n else:\n pool = ThreadPool(num_workers)\n atexit.register(pool.close)\n pools[thread][num_workers] = pool\n\n results = get_async(\n pool.apply_async,\n len(pool._pool),\n dsk,\n result,\n cache=cache,\n get_id=_thread_get_id,\n pack_exception=pack_exception,\n **kwargs\n )\n\n # Cleanup pools associated to dead threads\n with pools_lock:\n active_threads = set(threading.enumerate())\n if thread is not main_thread:\n for t in list(pools):\n if t not in active_threads:\n for p in pools.pop(t).values():\n p.close()\n\n return results", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_from_datetime_import_time_apply.if_kwargs_.else_.return.func_args_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_from_datetime_import_time_apply.if_kwargs_.else_.return.func_args_", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 31, "span_ids": ["apply", "imports"], "tokens": 146}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from datetime import timedelta\nimport functools\nimport inspect\nimport os\nimport shutil\nimport sys\nimport tempfile\nimport re\nfrom errno import ENOENT\nfrom collections.abc import Iterator\nfrom contextlib import contextmanager\nfrom importlib import import_module\nfrom numbers import Integral, Number\nfrom threading import Lock\nimport uuid\nfrom weakref import WeakValueDictionary\nfrom functools import lru_cache\n\nfrom .core import get_deps\n\n\nsystem_encoding = sys.getdefaultencoding()\nif system_encoding == \"ascii\":\n system_encoding = \"utf-8\"\n\n\ndef apply(func, args, kwargs=None):\n if kwargs:\n return func(*args, **kwargs)\n else:\n return func(*args)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_deepmap_deepmap.if_isinstance_seqs_0_l.else_.return.func_seqs_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_deepmap_deepmap.if_isinstance_seqs_0_l.else_.return.func_seqs_", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 34, "end_line": 48, "span_ids": ["deepmap"], "tokens": 160}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def deepmap(func, *seqs):\n \"\"\"Apply function inside nested lists\n\n >>> inc = lambda x: x + 1\n >>> deepmap(inc, [[1, 2], [3, 4]])\n [[2, 3], [4, 5]]\n\n >>> add = lambda x, y: x + y\n >>> deepmap(add, [[1, 2], [3, 4]], [[10, 20], [30, 40]])\n [[11, 22], [33, 44]]\n \"\"\"\n if isinstance(seqs[0], (list, Iterator)):\n return [deepmap(func, *items) for items in zip(*seqs)]\n else:\n return func(*seqs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_homogeneous_deepmap_ndeepmap.if_n_1_.else_.return.func_seq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_homogeneous_deepmap_ndeepmap.if_n_1_.else_.return.func_seq_", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 51, "end_line": 79, "span_ids": ["homogeneous_deepmap", "ndeepmap"], "tokens": 216}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def homogeneous_deepmap(func, seq):\n if not seq:\n return seq\n n = 0\n tmp = seq\n while isinstance(tmp, list):\n n += 1\n tmp = tmp[0]\n\n return ndeepmap(n, func, seq)\n\n\ndef ndeepmap(n, func, seq):\n \"\"\"Call a function on every element within a nested container\n\n >>> def inc(x):\n ... return x + 1\n >>> L = [[1, 2], [3, 4, 5]]\n >>> ndeepmap(2, inc, L)\n [[2, 3], [4, 5, 6]]\n \"\"\"\n if n == 1:\n return [func(item) for item in seq]\n elif n > 1:\n return [ndeepmap(n - 1, func, item) for item in seq]\n elif isinstance(seq, list):\n return func(seq[0])\n else:\n return func(seq)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_ignoring_IndexCallable.__getitem__.return.self_fn_key_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_ignoring_IndexCallable.__getitem__.return.self_fn_key_", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 82, "end_line": 189, "span_ids": ["tmpdir", "tmpfile", "import_required", "IndexCallable", "tmp_cwd", "filetext", "IndexCallable.__init__", "IndexCallable.__getitem__", "noop_context", "changed_cwd", "ignoring"], "tokens": 481}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@contextmanager\ndef ignoring(*exceptions):\n try:\n yield\n except exceptions:\n pass\n\n\ndef import_required(mod_name, error_msg):\n \"\"\"Attempt to import a required dependency.\n\n Raises a RuntimeError if the requested module is not available.\n \"\"\"\n try:\n return import_module(mod_name)\n except ImportError as e:\n raise RuntimeError(error_msg) from e\n\n\n@contextmanager\ndef tmpfile(extension=\"\", dir=None):\n extension = \".\" + extension.lstrip(\".\")\n handle, filename = tempfile.mkstemp(extension, dir=dir)\n os.close(handle)\n os.remove(filename)\n\n try:\n yield filename\n finally:\n if os.path.exists(filename):\n if os.path.isdir(filename):\n shutil.rmtree(filename)\n else:\n with ignoring(OSError):\n os.remove(filename)\n\n\n@contextmanager\ndef tmpdir(dir=None):\n dirname = tempfile.mkdtemp(dir=dir)\n\n try:\n yield dirname\n finally:\n if os.path.exists(dirname):\n if os.path.isdir(dirname):\n with ignoring(OSError):\n shutil.rmtree(dirname)\n else:\n with ignoring(OSError):\n os.remove(dirname)\n\n\n@contextmanager\ndef filetext(text, extension=\"\", open=open, mode=\"w\"):\n with tmpfile(extension=extension) as filename:\n f = open(filename, mode=mode)\n try:\n f.write(text)\n finally:\n try:\n f.close()\n except AttributeError:\n pass\n\n yield filename\n\n\n@contextmanager\ndef changed_cwd(new_cwd):\n old_cwd = os.getcwd()\n os.chdir(new_cwd)\n try:\n yield\n finally:\n os.chdir(old_cwd)\n\n\n@contextmanager\ndef tmp_cwd(dir=None):\n with tmpdir(dir) as dirname:\n with changed_cwd(dirname):\n yield dirname\n\n\n@contextmanager\ndef noop_context():\n yield\n\n\nclass IndexCallable(object):\n \"\"\"Provide getitem syntax for functions\n\n >>> def inc(x):\n ... return x + 1\n\n >>> I = IndexCallable(inc)\n >>> I[3]\n 4\n \"\"\"\n\n __slots__ = (\"fn\",)\n\n def __init__(self, fn):\n self.fn = fn\n\n def __getitem__(self, key):\n return self.fn(key)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_filetexts_concrete.return.seq": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_filetexts_concrete.return.seq", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 192, "end_line": 238, "span_ids": ["filetexts", "concrete"], "tokens": 299}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@contextmanager\ndef filetexts(d, open=open, mode=\"t\", use_tmpdir=True):\n \"\"\"Dumps a number of textfiles to disk\n\n d - dict\n a mapping from filename to text like {'a.csv': '1,1\\n2,2'}\n\n Since this is meant for use in tests, this context manager will\n automatically switch to a temporary current directory, to avoid\n race conditions when running tests in parallel.\n \"\"\"\n with (tmp_cwd() if use_tmpdir else noop_context()):\n for filename, text in d.items():\n try:\n os.makedirs(os.path.dirname(filename))\n except OSError:\n pass\n f = open(filename, \"w\" + mode)\n try:\n f.write(text)\n finally:\n try:\n f.close()\n except AttributeError:\n pass\n\n yield list(d)\n\n for filename in d:\n if os.path.exists(filename):\n with ignoring(OSError):\n os.remove(filename)\n\n\ndef concrete(seq):\n \"\"\"Make nested iterators concrete lists\n\n >>> data = [[1, 2], [3, 4]]\n >>> seq = iter(map(iter, data))\n >>> concrete(seq)\n [[1, 2], [3, 4]]\n \"\"\"\n if isinstance(seq, Iterator):\n seq = list(seq)\n if isinstance(seq, (tuple, list)):\n seq = list(map(concrete, seq))\n return seq", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_pseudorandom_pseudorandom.return.out": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_pseudorandom_pseudorandom.return.out", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 241, "end_line": 265, "span_ids": ["pseudorandom"], "tokens": 267}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def pseudorandom(n, p, random_state=None):\n \"\"\"Pseudorandom array of integer indexes\n\n >>> pseudorandom(5, [0.5, 0.5], random_state=123)\n array([1, 0, 0, 1, 1], dtype=int8)\n\n >>> pseudorandom(10, [0.5, 0.2, 0.2, 0.1], random_state=5)\n array([0, 2, 0, 3, 0, 1, 2, 1, 0, 0], dtype=int8)\n \"\"\"\n import numpy as np\n\n p = list(p)\n cp = np.cumsum([0] + p)\n assert np.allclose(1, cp[-1])\n assert len(p) < 256\n\n if not isinstance(random_state, np.random.RandomState):\n random_state = np.random.RandomState(random_state)\n\n x = random_state.random_sample(n)\n out = np.empty(n, dtype=\"i1\")\n\n for i, (low, high) in enumerate(zip(cp[:-1], cp[1:])):\n out[(x >= low) & (x < high)] = i\n return out", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_random_state_data_random_state_data.return.l": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_random_state_data_random_state_data.return.l", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 268, "end_line": 289, "span_ids": ["random_state_data"], "tokens": 184}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def random_state_data(n, random_state=None):\n \"\"\"Return a list of arrays that can initialize\n ``np.random.RandomState``.\n\n Parameters\n ----------\n n : int\n Number of arrays to return.\n random_state : int or np.random.RandomState, optional\n If an int, is used to seed a new ``RandomState``.\n \"\"\"\n import numpy as np\n\n if not all(\n hasattr(random_state, attr) for attr in [\"normal\", \"beta\", \"bytes\", \"uniform\"]\n ):\n random_state = np.random.RandomState(random_state)\n\n random_data = random_state.bytes(624 * n * 4) # `n * 624` 32-bit integers\n l = list(np.frombuffer(random_data, dtype=np.uint32).reshape((n, -1)))\n assert len(l) == n\n return l", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_is_integer_getargspec.if_isinstance_func_type_.else_.return.inspect_getfullargspec_fu": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_is_integer_getargspec.if_isinstance_func_type_.else_.return.inspect_getfullargspec_fu", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 292, "end_line": 380, "span_ids": ["getargspec", "impl:6", "is_integer"], "tokens": 349}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def is_integer(i):\n \"\"\"\n >>> is_integer(6)\n True\n >>> is_integer(42.0)\n True\n >>> is_integer('abc')\n False\n \"\"\"\n return isinstance(i, Integral) or (isinstance(i, float) and i.is_integer())\n\n\nONE_ARITY_BUILTINS = set(\n [\n abs,\n all,\n any,\n ascii,\n bool,\n bytearray,\n bytes,\n callable,\n chr,\n classmethod,\n complex,\n dict,\n dir,\n enumerate,\n eval,\n float,\n format,\n frozenset,\n hash,\n hex,\n id,\n int,\n iter,\n len,\n list,\n max,\n min,\n next,\n oct,\n open,\n ord,\n range,\n repr,\n reversed,\n round,\n set,\n slice,\n sorted,\n staticmethod,\n str,\n sum,\n tuple,\n type,\n vars,\n zip,\n memoryview,\n ]\n)\nMULTI_ARITY_BUILTINS = set(\n [\n compile,\n delattr,\n divmod,\n filter,\n getattr,\n hasattr,\n isinstance,\n issubclass,\n map,\n pow,\n setattr,\n ]\n)\n\n\ndef getargspec(func):\n \"\"\"Version of inspect.getargspec that works with partial and warps.\"\"\"\n if isinstance(func, functools.partial):\n return getargspec(func.func)\n\n func = getattr(func, \"__wrapped__\", func)\n if isinstance(func, type):\n return inspect.getfullargspec(func.__init__)\n else:\n return inspect.getfullargspec(func)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_takes_multiple_arguments_takes_multiple_arguments.return.len_spec_args_ndefault": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_takes_multiple_arguments_takes_multiple_arguments.return.len_spec_args_ndefault", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 383, "end_line": 427, "span_ids": ["takes_multiple_arguments"], "tokens": 258}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def takes_multiple_arguments(func, varargs=True):\n \"\"\"Does this function take multiple arguments?\n\n >>> def f(x, y): pass\n >>> takes_multiple_arguments(f)\n True\n\n >>> def f(x): pass\n >>> takes_multiple_arguments(f)\n False\n\n >>> def f(x, y=None): pass\n >>> takes_multiple_arguments(f)\n False\n\n >>> def f(*args): pass\n >>> takes_multiple_arguments(f)\n True\n\n >>> class Thing(object):\n ... def __init__(self, a): pass\n >>> takes_multiple_arguments(Thing)\n False\n\n \"\"\"\n if func in ONE_ARITY_BUILTINS:\n return False\n elif func in MULTI_ARITY_BUILTINS:\n return True\n\n try:\n spec = getargspec(func)\n except Exception:\n return False\n\n try:\n is_constructor = spec.args[0] == \"self\" and isinstance(func, type)\n except Exception:\n is_constructor = False\n\n if varargs and spec.varargs:\n return True\n\n ndefaults = 0 if spec.defaults is None else len(spec.defaults)\n return len(spec.args) - ndefaults - is_constructor > 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_get_named_args_Dispatch.register_lazy.return.wrapper_func_if_func_is_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_get_named_args_Dispatch.register_lazy.return.wrapper_func_if_func_is_", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 430, "end_line": 472, "span_ids": ["Dispatch", "get_named_args", "Dispatch.__init__", "Dispatch.register", "Dispatch.register_lazy"], "tokens": 270}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def get_named_args(func):\n \"\"\"Get all non ``*args/**kwargs`` arguments for a function\"\"\"\n s = inspect.signature(func)\n return [\n n\n for n, p in s.parameters.items()\n if p.kind in [p.POSITIONAL_OR_KEYWORD, p.POSITIONAL_ONLY, p.KEYWORD_ONLY]\n ]\n\n\nclass Dispatch(object):\n \"\"\"Simple single dispatch.\"\"\"\n\n def __init__(self, name=None):\n self._lookup = {}\n self._lazy = {}\n if name:\n self.__name__ = name\n\n def register(self, type, func=None):\n \"\"\"Register dispatch of `func` on arguments of type `type`\"\"\"\n\n def wrapper(func):\n if isinstance(type, tuple):\n for t in type:\n self.register(t, func)\n else:\n self._lookup[type] = func\n return func\n\n return wrapper(func) if func is not None else wrapper\n\n def register_lazy(self, toplevel, func=None):\n \"\"\"\n Register a registration function which will be called if the\n *toplevel* module (e.g. 'pandas') is ever loaded.\n \"\"\"\n\n def wrapper(func):\n self._lazy[toplevel] = func\n return func\n\n return wrapper(func) if func is not None else wrapper", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_Dispatch.dispatch_Dispatch.__doc__.try_.except_TypeError_.return._Single_Dispatch_for_s_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_Dispatch.dispatch_Dispatch.__doc__.try_.except_TypeError_.return._Single_Dispatch_for_s_", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 474, "end_line": 513, "span_ids": ["Dispatch.__doc__", "Dispatch.dispatch", "Dispatch.__call__"], "tokens": 281}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Dispatch(object):\n\n def dispatch(self, cls):\n \"\"\"Return the function implementation for the given ``cls``\"\"\"\n # Fast path with direct lookup on cls\n lk = self._lookup\n try:\n impl = lk[cls]\n except KeyError:\n pass\n else:\n return impl\n # Is a lazy registration function present?\n toplevel, _, _ = cls.__module__.partition(\".\")\n try:\n register = self._lazy.pop(toplevel)\n except KeyError:\n pass\n else:\n register()\n return self.dispatch(cls) # recurse\n # Walk the MRO and cache the lookup result\n for cls2 in inspect.getmro(cls)[1:]:\n if cls2 in lk:\n lk[cls] = lk[cls2]\n return lk[cls2]\n raise TypeError(\"No dispatch for {0}\".format(cls))\n\n def __call__(self, arg, *args, **kwargs):\n \"\"\"\n Call the corresponding method based on type of argument.\n \"\"\"\n meth = self.dispatch(type(arg))\n return meth(arg, *args, **kwargs)\n\n @property\n def __doc__(self):\n try:\n func = self.dispatch(object)\n return func.__doc__\n except TypeError:\n return \"Single Dispatch for %s\" % self.__name__", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_ensure_not_exists_skip_doctest.return._n_join__skip_doctest_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_ensure_not_exists_skip_doctest.return._n_join__skip_doctest_", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 516, "end_line": 544, "span_ids": ["ensure_not_exists", "skip_doctest", "_skip_doctest"], "tokens": 182}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def ensure_not_exists(filename):\n \"\"\"\n Ensure that a file does not exist.\n \"\"\"\n try:\n os.unlink(filename)\n except OSError as e:\n if e.errno != ENOENT:\n raise\n\n\ndef _skip_doctest(line):\n # NumPy docstring contains cursor and comment only example\n stripped = line.strip()\n if stripped == \">>>\" or stripped.startswith(\">>> #\"):\n return line\n elif \">>>\" in stripped and \"+SKIP\" not in stripped:\n if \"# doctest:\" in line:\n return line + \", +SKIP\"\n else:\n return line + \" # doctest: +SKIP\"\n else:\n return line\n\n\ndef skip_doctest(doc):\n if doc is None:\n return \"\"\n return \"\\n\".join([_skip_doctest(line) for line in doc.split(\"\\n\")])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_extra_titles_extra_titles.return._n_join_lines_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_extra_titles_extra_titles.return._n_join_lines_", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 547, "end_line": 564, "span_ids": ["extra_titles"], "tokens": 154}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def extra_titles(doc):\n lines = doc.split(\"\\n\")\n titles = {\n i: lines[i].strip()\n for i in range(len(lines) - 1)\n if lines[i + 1].strip() and all(c == \"-\" for c in lines[i + 1].strip())\n }\n\n seen = set()\n for i, title in sorted(titles.items()):\n if title in seen:\n new_title = \"Extra \" + title\n lines[i] = lines[i].replace(title, new_title)\n lines[i + 1] = lines[i + 1].replace(\"-\" * len(title), \"-\" * len(new_title))\n else:\n seen.add(title)\n\n return \"\\n\".join(lines)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_ignore_warning_ignore_warning.return.doc": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_ignore_warning_ignore_warning.return.doc", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 567, "end_line": 601, "span_ids": ["ignore_warning"], "tokens": 310}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def ignore_warning(doc, cls, name, extra=\"\", skipblocks=0):\n \"\"\"Expand docstring by adding disclaimer and extra text\"\"\"\n import inspect\n\n if inspect.isclass(cls):\n l1 = \"This docstring was copied from %s.%s.%s.\\n\\n\" % (\n cls.__module__,\n cls.__name__,\n name,\n )\n else:\n l1 = \"This docstring was copied from %s.%s.\\n\\n\" % (cls.__name__, name)\n l2 = \"Some inconsistencies with the Dask version may exist.\"\n\n i = doc.find(\"\\n\\n\")\n if i != -1:\n # Insert our warning\n head = doc[: i + 2]\n tail = doc[i + 2 :]\n while skipblocks > 0:\n i = tail.find(\"\\n\\n\")\n head = tail[: i + 2]\n tail = tail[i + 2 :]\n skipblocks -= 1\n # Indentation of next line\n indent = re.match(r\"\\s*\", tail).group(0)\n # Insert the warning, indented, with a blank line before and after\n if extra:\n more = [indent, extra.rstrip(\"\\n\") + \"\\n\\n\"]\n else:\n more = []\n bits = [head, indent, l1, indent, l2, \"\\n\\n\"] + more + [tail]\n doc = \"\".join(bits)\n\n return doc", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_unsupported_arguments_unsupported_arguments.return._n_join_lines_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_unsupported_arguments_unsupported_arguments.return._n_join_lines_", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 604, "end_line": 616, "span_ids": ["unsupported_arguments"], "tokens": 111}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def unsupported_arguments(doc, args):\n \"\"\" Mark unsupported arguments with a disclaimer \"\"\"\n lines = doc.split(\"\\n\")\n for arg in args:\n subset = [\n (i, line)\n for i, line in enumerate(lines)\n if re.match(r\"^\\s*\" + arg + \" ?:\", line)\n ]\n if len(subset) == 1:\n [(i, line)] = subset\n lines[i] = line + \" (Not supported in Dask)\"\n return \"\\n\".join(lines)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py__derived_from__derived_from.return.doc": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py__derived_from__derived_from.return.doc", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 619, "end_line": 656, "span_ids": ["_derived_from"], "tokens": 280}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _derived_from(cls, method, ua_args=[], extra=\"\", skipblocks=0):\n \"\"\" Helper function for derived_from to ease testing \"\"\"\n # do not use wraps here, as it hides keyword arguments displayed\n # in the doc\n original_method = getattr(cls, method.__name__)\n\n if isinstance(original_method, property):\n # some things like SeriesGroupBy.unique are generated.\n original_method = original_method.fget\n\n doc = original_method.__doc__\n if doc is None:\n doc = \"\"\n\n # Insert disclaimer that this is a copied docstring\n if doc:\n doc = ignore_warning(\n doc, cls, method.__name__, extra=extra, skipblocks=skipblocks\n )\n elif extra:\n doc += extra.rstrip(\"\\n\") + \"\\n\\n\"\n\n # Mark unsupported arguments\n try:\n method_args = get_named_args(method)\n original_args = get_named_args(original_method)\n not_supported = [m for m in original_args if m not in method_args]\n except ValueError:\n not_supported = []\n if len(ua_args) > 0:\n not_supported.extend(ua_args)\n if len(not_supported) > 0:\n doc = unsupported_arguments(doc, not_supported)\n\n doc = skip_doctest(doc)\n doc = extra_titles(doc)\n\n return doc", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_derived_from_derived_from._Decorator_to_attach_or": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_derived_from_derived_from._Decorator_to_attach_or", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 659, "end_line": 679, "span_ids": ["derived_from"], "tokens": 211}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def derived_from(original_klass, version=None, ua_args=[], skipblocks=0):\n \"\"\"Decorator to attach original class's docstring to the wrapped method.\n\n The output structure will be: top line of docstring, disclaimer about this\n being auto-derived, any extra text associated with the method being patched,\n the body of the docstring and finally, the list of keywords that exist in\n the original method but not in the dask version.\n\n Parameters\n ----------\n original_klass: type\n Original class which the method is derived from\n version : str\n Original package version which supports the wrapped method\n ua_args : list\n List of keywords which Dask doesn't support. Keywords existing in\n original but not in Dask will automatically be added.\n skipblocks : int\n How many text blocks (paragraphs) to skip from the start of the\n docstring. Useful for cases where the target has extra front-matter.\n \"\"\"\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_derived_from.wrapper_derived_from.return.wrapper": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_derived_from.wrapper_derived_from.return.wrapper", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 681, "end_line": 706, "span_ids": ["derived_from"], "tokens": 187}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def derived_from(original_klass, version=None, ua_args=[], skipblocks=0):\n\n def wrapper(method):\n try:\n extra = getattr(method, \"__doc__\", None) or \"\"\n method.__doc__ = _derived_from(\n original_klass,\n method,\n ua_args=ua_args,\n extra=extra,\n skipblocks=skipblocks,\n )\n return method\n\n except AttributeError:\n module_name = original_klass.__module__.split(\".\")[0]\n\n @functools.wraps(method)\n def wrapped(*args, **kwargs):\n msg = \"Base package doesn't support '{0}'.\".format(method.__name__)\n if version is not None:\n msg2 = \" Use {0} {1} or later to use this method.\"\n msg += msg2.format(module_name, version)\n raise NotImplementedError(msg)\n\n return wrapped\n\n return wrapper", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_funcname_funcname.try_.except_AttributeError_.return.str_func_50_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_funcname_funcname.try_.except_AttributeError_.return.str_func_50_", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 709, "end_line": 738, "span_ids": ["funcname"], "tokens": 239}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def funcname(func):\n \"\"\"Get the name of a function.\"\"\"\n # functools.partial\n if isinstance(func, functools.partial):\n return funcname(func.func)\n # methodcaller\n if isinstance(func, methodcaller):\n return func.method[:50]\n\n module_name = getattr(func, \"__module__\", None) or \"\"\n type_name = getattr(type(func), \"__name__\", None) or \"\"\n\n # toolz.curry\n if \"toolz\" in module_name and \"curry\" == type_name:\n return func.func_name[:50]\n # multipledispatch objects\n if \"multipledispatch\" in module_name and \"Dispatcher\" == type_name:\n return func.name[:50]\n # numpy.vectorize objects\n if \"numpy\" in module_name and \"vectorize\" == type_name:\n return (\"vectorize_\" + funcname(func.pyfunc))[:50]\n\n # All other callables\n try:\n name = func.__name__\n if name == \"\":\n return \"lambda\"\n return name[:50]\n except AttributeError:\n return str(func)[:50]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_typename_memory_repr.for_x_in_bytes_KB_.num_1024_0": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_typename_memory_repr.for_x_in_bytes_KB_.num_1024_0", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 741, "end_line": 840, "span_ids": ["memory_repr", "ensure_bytes", "ensure_unicode", "typename", "insert", "digit", "dependency_depth"], "tokens": 587}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def typename(typ):\n \"\"\"\n Return the name of a type\n\n Examples\n --------\n >>> typename(int)\n 'int'\n\n >>> from dask.core import literal\n >>> typename(literal)\n 'dask.core.literal'\n \"\"\"\n if not typ.__module__ or typ.__module__ == \"builtins\":\n return typ.__name__\n else:\n return typ.__module__ + \".\" + typ.__name__\n\n\ndef ensure_bytes(s):\n \"\"\"Turn string or bytes to bytes\n\n >>> ensure_bytes('123')\n b'123'\n >>> ensure_bytes('123')\n b'123'\n >>> ensure_bytes(b'123')\n b'123'\n \"\"\"\n if isinstance(s, bytes):\n return s\n if hasattr(s, \"encode\"):\n return s.encode()\n msg = \"Object %s is neither a bytes object nor has an encode method\"\n raise TypeError(msg % s)\n\n\ndef ensure_unicode(s):\n \"\"\"Turn string or bytes to bytes\n\n >>> ensure_unicode('123')\n '123'\n >>> ensure_unicode('123')\n '123'\n >>> ensure_unicode(b'123')\n '123'\n \"\"\"\n if isinstance(s, str):\n return s\n if hasattr(s, \"decode\"):\n return s.decode()\n msg = \"Object %s is neither a bytes object nor has an encode method\"\n raise TypeError(msg % s)\n\n\ndef digit(n, k, base):\n \"\"\"\n\n >>> digit(1234, 0, 10)\n 4\n >>> digit(1234, 1, 10)\n 3\n >>> digit(1234, 2, 10)\n 2\n >>> digit(1234, 3, 10)\n 1\n \"\"\"\n return n // base ** k % base\n\n\ndef insert(tup, loc, val):\n \"\"\"\n\n >>> insert(('a', 'b', 'c'), 0, 'x')\n ('x', 'b', 'c')\n \"\"\"\n L = list(tup)\n L[loc] = val\n return tuple(L)\n\n\ndef dependency_depth(dsk):\n deps, _ = get_deps(dsk)\n\n @lru_cache(maxsize=None)\n def max_depth_by_deps(key):\n if not deps[key]:\n return 1\n\n d = 1 + max(max_depth_by_deps(dep_key) for dep_key in deps[key])\n return d\n\n return max(max_depth_by_deps(dep_key) for dep_key in deps.keys())\n\n\ndef memory_repr(num):\n for x in [\"bytes\", \"KB\", \"MB\", \"GB\", \"TB\"]:\n if num < 1024.0:\n return \"%3.1f %s\" % (num, x)\n num /= 1024.0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_asciitable_asciitable.return._n_join_bar_header_b": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_asciitable_asciitable.return._n_join_bar_header_b", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 843, "end_line": 861, "span_ids": ["asciitable"], "tokens": 208}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def asciitable(columns, rows):\n \"\"\"Formats an ascii table for given columns and rows.\n\n Parameters\n ----------\n columns : list\n The column names\n rows : list of tuples\n The rows in the table. Each tuple must be the same length as\n ``columns``.\n \"\"\"\n rows = [tuple(str(i) for i in r) for r in rows]\n columns = tuple(str(i) for i in columns)\n widths = tuple(max(max(map(len, x)), len(c)) for x, c in zip(zip(*rows), columns))\n row_template = (\"|\" + (\" %%-%ds |\" * len(columns))) % widths\n header = row_template % tuple(columns)\n bar = \"+%s+\" % \"+\".join(\"-\" * (w + 2) for w in widths)\n data = \"\\n\".join(row_template % r for r in rows)\n return \"\\n\".join([bar, header, bar, data, bar])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_put_lines_methodcaller.__repr__.__str__": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_put_lines_methodcaller.__repr__.__str__", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 864, "end_line": 901, "span_ids": ["methodcaller.__reduce__", "methodcaller.__call__", "methodcaller.__str__", "impl:10", "methodcaller.__new__", "put_lines", "methodcaller", "methodcaller:7"], "tokens": 250}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def put_lines(buf, lines):\n if any(not isinstance(x, str) for x in lines):\n lines = [str(x) for x in lines]\n buf.write(\"\\n\".join(lines))\n\n\n_method_cache = {}\n\n\nclass methodcaller(object):\n \"\"\"\n Return a callable object that calls the given method on its operand.\n\n Unlike the builtin `operator.methodcaller`, instances of this class are\n serializable\n \"\"\"\n\n __slots__ = (\"method\",)\n func = property(lambda self: self.method) # For `funcname` to work\n\n def __new__(cls, method):\n if method in _method_cache:\n return _method_cache[method]\n self = object.__new__(cls)\n self.method = method\n _method_cache[method] = self\n return self\n\n def __call__(self, obj, *args, **kwargs):\n return getattr(obj, self.method)(*args, **kwargs)\n\n def __reduce__(self):\n return (methodcaller, (self.method,))\n\n def __str__(self):\n return \"<%s: %s>\" % (self.__class__.__name__, self.method)\n\n __repr__ = __str__", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_itemgetter_M.MethodCache_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_itemgetter_M.MethodCache_", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 904, "end_line": 942, "span_ids": ["itemgetter.__eq__", "MethodCache", "impl:12", "itemgetter.__call__", "itemgetter.__init__", "itemgetter.__reduce__", "itemgetter"], "tokens": 211}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class itemgetter(object):\n \"\"\"\n Return a callable object that gets an item from the operand\n\n Unlike the builtin `operator.itemgetter`, instances of this class are\n serializable\n \"\"\"\n\n __slots__ = (\"index\",)\n\n def __init__(self, index):\n self.index = index\n\n def __call__(self, x):\n return x[self.index]\n\n def __reduce__(self):\n return (itemgetter, (self.index,))\n\n def __eq__(self, other):\n return type(self) is type(other) and self.index == other.index\n\n\nclass MethodCache(object):\n \"\"\"Attribute access on this object returns a methodcaller for that\n attribute.\n\n Examples\n --------\n >>> a = [1, 3, 3]\n >>> M.count(a, 3) == a.count(3)\n True\n \"\"\"\n\n __getattr__ = staticmethod(methodcaller)\n __dir__ = lambda self: list(_method_cache)\n\n\nM = MethodCache()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_SerializableLock_SerializableLock.__repr__.__str__": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_SerializableLock_SerializableLock.__repr__.__str__", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 945, "end_line": 1006, "span_ids": ["SerializableLock.__exit__", "SerializableLock.__getstate__", "SerializableLock.__setstate__", "SerializableLock.__init__", "SerializableLock.release", "SerializableLock.__str__", "SerializableLock", "SerializableLock.locked", "SerializableLock.__enter__", "SerializableLock.acquire", "SerializableLock:4"], "tokens": 414}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class SerializableLock(object):\n _locks = WeakValueDictionary()\n \"\"\" A Serializable per-process Lock\n\n This wraps a normal ``threading.Lock`` object and satisfies the same\n interface. However, this lock can also be serialized and sent to different\n processes. It will not block concurrent operations between processes (for\n this you should look at ``multiprocessing.Lock`` or ``locket.lock_file``\n but will consistently deserialize into the same lock.\n\n So if we make a lock in one process::\n\n lock = SerializableLock()\n\n And then send it over to another process multiple times::\n\n bytes = pickle.dumps(lock)\n a = pickle.loads(bytes)\n b = pickle.loads(bytes)\n\n Then the deserialized objects will operate as though they were the same\n lock, and collide as appropriate.\n\n This is useful for consistently protecting resources on a per-process\n level.\n\n The creation of locks is itself not threadsafe.\n \"\"\"\n\n def __init__(self, token=None):\n self.token = token or str(uuid.uuid4())\n if self.token in SerializableLock._locks:\n self.lock = SerializableLock._locks[self.token]\n else:\n self.lock = Lock()\n SerializableLock._locks[self.token] = self.lock\n\n def acquire(self, *args, **kwargs):\n return self.lock.acquire(*args, **kwargs)\n\n def release(self, *args, **kwargs):\n return self.lock.release(*args, **kwargs)\n\n def __enter__(self):\n self.lock.__enter__()\n\n def __exit__(self, *args):\n self.lock.__exit__(*args)\n\n def locked(self):\n return self.lock.locked()\n\n def __getstate__(self):\n return self.token\n\n def __setstate__(self, token):\n self.__init__(token)\n\n def __str__(self):\n return \"<%s: %s>\" % (self.__class__.__name__, self.token)\n\n __repr__ = __str__", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_get_scheduler_lock_ensure_dict.return.dict_d_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_get_scheduler_lock_ensure_dict.return.dict_d_", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1009, "end_line": 1035, "span_ids": ["get_scheduler_lock", "ensure_dict"], "tokens": 161}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def get_scheduler_lock(collection=None, scheduler=None):\n \"\"\"Get an instance of the appropriate lock for a certain situation based on\n scheduler used.\"\"\"\n from . import multiprocessing\n from .base import get_scheduler\n\n actual_get = get_scheduler(collections=[collection], scheduler=scheduler)\n\n if actual_get == multiprocessing.get:\n return multiprocessing.get_context().Manager().Lock()\n\n return SerializableLock()\n\n\ndef ensure_dict(d):\n if type(d) is dict:\n return d\n elif hasattr(d, \"dicts\"):\n result = {}\n seen = set()\n for dd in d.dicts.values():\n dd_id = id(dd)\n if dd_id not in seen:\n result.update(dd)\n seen.add(dd_id)\n return result\n return dict(d)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_OperatorMethodMixin_OperatorMethodMixin._get_binary_operator.raise_NotImplementedError": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_OperatorMethodMixin_OperatorMethodMixin._get_binary_operator.raise_NotImplementedError", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1038, "end_line": 1073, "span_ids": ["OperatorMethodMixin._bind_operator", "OperatorMethodMixin._get_binary_operator", "OperatorMethodMixin._get_unary_operator", "OperatorMethodMixin"], "tokens": 255}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class OperatorMethodMixin(object):\n \"\"\"A mixin for dynamically implementing operators\"\"\"\n\n @classmethod\n def _bind_operator(cls, op):\n \"\"\" bind operator to this class \"\"\"\n name = op.__name__\n\n if name.endswith(\"_\"):\n # for and_ and or_\n name = name[:-1]\n elif name == \"inv\":\n name = \"invert\"\n\n meth = \"__{0}__\".format(name)\n\n if name in (\"abs\", \"invert\", \"neg\", \"pos\"):\n setattr(cls, meth, cls._get_unary_operator(op))\n else:\n setattr(cls, meth, cls._get_binary_operator(op))\n\n if name in (\"eq\", \"gt\", \"ge\", \"lt\", \"le\", \"ne\", \"getitem\"):\n return\n\n rmeth = \"__r{0}__\".format(name)\n setattr(cls, rmeth, cls._get_binary_operator(op, inv=True))\n\n @classmethod\n def _get_unary_operator(cls, op):\n \"\"\" Must return a method used by unary operator \"\"\"\n raise NotImplementedError\n\n @classmethod\n def _get_binary_operator(cls, op, inv=False):\n \"\"\" Must return a method used by binary operator \"\"\"\n raise NotImplementedError", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_partial_by_order_is_arraylike.return.bool_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_partial_by_order_is_arraylike.return.bool_", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1076, "end_line": 1113, "span_ids": ["is_arraylike", "partial_by_order"], "tokens": 227}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def partial_by_order(*args, **kwargs):\n \"\"\"\n\n >>> from operator import add\n >>> partial_by_order(5, function=add, other=[(1, 10)])\n 15\n \"\"\"\n function = kwargs.pop(\"function\")\n other = kwargs.pop(\"other\")\n args2 = list(args)\n for i, arg in other:\n args2.insert(i, arg)\n return function(*args2, **kwargs)\n\n\ndef is_arraylike(x):\n \"\"\"Is this object a numpy array or something similar?\n\n Examples\n --------\n >>> import numpy as np\n >>> is_arraylike(np.ones(5))\n True\n >>> is_arraylike(np.ones(()))\n True\n >>> is_arraylike(5)\n False\n >>> is_arraylike('cat')\n False\n \"\"\"\n from .base import is_dask_collection\n\n return bool(\n hasattr(x, \"shape\")\n and isinstance(x.shape, tuple)\n and hasattr(x, \"dtype\")\n and not any(is_dask_collection(n) for n in x.shape)\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_is_dataframe_like_is_index_like.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_is_dataframe_like_is_index_like.return._", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1116, "end_line": 1142, "span_ids": ["is_series_like", "is_index_like", "is_dataframe_like"], "tokens": 221}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def is_dataframe_like(df):\n \"\"\" Looks like a Pandas DataFrame \"\"\"\n typ = type(df)\n return (\n all(hasattr(typ, name) for name in (\"groupby\", \"head\", \"merge\", \"mean\"))\n and all(hasattr(df, name) for name in (\"dtypes\", \"columns\"))\n and not any(hasattr(typ, name) for name in (\"name\", \"dtype\"))\n )\n\n\ndef is_series_like(s):\n \"\"\" Looks like a Pandas Series \"\"\"\n typ = type(s)\n return (\n all(hasattr(typ, name) for name in (\"groupby\", \"head\", \"mean\"))\n and all(hasattr(s, name) for name in (\"dtype\", \"name\"))\n and \"index\" not in typ.__name__.lower()\n )\n\n\ndef is_index_like(s):\n \"\"\" Looks like a Pandas Index \"\"\"\n typ = type(s)\n return (\n all(hasattr(s, name) for name in (\"name\", \"dtype\"))\n and \"index\" in typ.__name__.lower()\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_natural_sort_key_factors.return.set_functools_reduce_list": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_natural_sort_key_factors.return.set_functools_reduce_list", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1145, "end_line": 1180, "span_ids": ["factors", "natural_sort_key"], "tokens": 351}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def natural_sort_key(s):\n \"\"\"\n Sorting `key` function for performing a natural sort on a collection of\n strings\n\n See https://en.wikipedia.org/wiki/Natural_sort_order\n\n Parameters\n ----------\n s : str\n A string that is an element of the collection being sorted\n\n Returns\n -------\n tuple[str or int]\n Tuple of the parts of the input string where each part is either a\n string or an integer\n\n Examples\n --------\n >>> a = ['f0', 'f1', 'f2', 'f8', 'f9', 'f10', 'f11', 'f19', 'f20', 'f21']\n >>> sorted(a)\n ['f0', 'f1', 'f10', 'f11', 'f19', 'f2', 'f20', 'f21', 'f8', 'f9']\n >>> sorted(a, key=natural_sort_key)\n ['f0', 'f1', 'f2', 'f8', 'f9', 'f10', 'f11', 'f19', 'f20', 'f21']\n \"\"\"\n return [int(part) if part.isdigit() else part for part in re.split(r\"(\\d+)\", s)]\n\n\ndef factors(n):\n \"\"\"Return the factors of an integer\n\n https://stackoverflow.com/a/6800214/616616\n \"\"\"\n seq = ([i, n // i] for i in range(1, int(pow(n, 0.5) + 1)) if n % i == 0)\n return set(functools.reduce(list.__add__, seq))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_parse_bytes_parse_bytes.return.int_result_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_parse_bytes_parse_bytes.return.int_result_", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1183, "end_line": 1235, "span_ids": ["parse_bytes"], "tokens": 375}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def parse_bytes(s):\n \"\"\"Parse byte string to numbers\n\n >>> from dask.utils import parse_bytes\n >>> parse_bytes('100')\n 100\n >>> parse_bytes('100 MB')\n 100000000\n >>> parse_bytes('100M')\n 100000000\n >>> parse_bytes('5kB')\n 5000\n >>> parse_bytes('5.4 kB')\n 5400\n >>> parse_bytes('1kiB')\n 1024\n >>> parse_bytes('1e6')\n 1000000\n >>> parse_bytes('1e6 kB')\n 1000000000\n >>> parse_bytes('MB')\n 1000000\n >>> parse_bytes(123)\n 123\n >>> parse_bytes('5 foos') # doctest: +SKIP\n ValueError: Could not interpret 'foos' as a byte unit\n \"\"\"\n if isinstance(s, (int, float)):\n return int(s)\n s = s.replace(\" \", \"\")\n if not any(char.isdigit() for char in s):\n s = \"1\" + s\n\n for i in range(len(s) - 1, -1, -1):\n if not s[i].isalpha():\n break\n index = i + 1\n\n prefix = s[:index]\n suffix = s[index:]\n\n try:\n n = float(prefix)\n except ValueError as e:\n raise ValueError(\"Could not interpret '%s' as a number\" % prefix) from e\n\n try:\n multiplier = byte_sizes[suffix.lower()]\n except KeyError as e:\n raise ValueError(\"Could not interpret '%s' as a byte unit\" % suffix) from e\n\n result = n * multiplier\n return int(result)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_byte_sizes_byte_sizes_update_k_1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_byte_sizes_byte_sizes_update_k_1_", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1237, "end_line": 1253, "span_ids": ["impl:14"], "tokens": 195}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "byte_sizes = {\n \"kB\": 10 ** 3,\n \"MB\": 10 ** 6,\n \"GB\": 10 ** 9,\n \"TB\": 10 ** 12,\n \"PB\": 10 ** 15,\n \"KiB\": 2 ** 10,\n \"MiB\": 2 ** 20,\n \"GiB\": 2 ** 30,\n \"TiB\": 2 ** 40,\n \"PiB\": 2 ** 50,\n \"B\": 1,\n \"\": 1,\n}\nbyte_sizes = {k.lower(): v for k, v in byte_sizes.items()}\nbyte_sizes.update({k[0]: v for k, v in byte_sizes.items() if k and \"i\" not in k})\nbyte_sizes.update({k[:-1]: v for k, v in byte_sizes.items() if k and \"i\" in k})", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_format_time_format_time.return._2f_us_n_1e6_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_format_time_format_time.return._2f_us_n_1e6_", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1257, "end_line": 1274, "span_ids": ["format_time"], "tokens": 148}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def format_time(n):\n \"\"\"format integers as time\n\n >>> from dask.utils import format_time\n >>> format_time(1)\n '1.00 s'\n >>> format_time(0.001234)\n '1.23 ms'\n >>> format_time(0.00012345)\n '123.45 us'\n >>> format_time(123.456)\n '123.46 s'\n \"\"\"\n if n >= 1:\n return \"%.2f s\" % n\n if n >= 1e-3:\n return \"%.2f ms\" % (n * 1e3)\n return \"%.2f us\" % (n * 1e6)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_format_bytes_format_bytes.return._d_B_n": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_format_bytes_format_bytes.return._d_B_n", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1277, "end_line": 1304, "span_ids": ["format_bytes"], "tokens": 263}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def format_bytes(n):\n \"\"\"Format bytes as text\n\n >>> from dask.utils import format_bytes\n >>> format_bytes(1)\n '1 B'\n >>> format_bytes(1234)\n '1.23 kB'\n >>> format_bytes(12345678)\n '12.35 MB'\n >>> format_bytes(1234567890)\n '1.23 GB'\n >>> format_bytes(1234567890000)\n '1.23 TB'\n >>> format_bytes(1234567890000000)\n '1.23 PB'\n \"\"\"\n if n > 1e15:\n return \"%0.2f PB\" % (n / 1e15)\n if n > 1e12:\n return \"%0.2f TB\" % (n / 1e12)\n if n > 1e9:\n return \"%0.2f GB\" % (n / 1e9)\n if n > 1e6:\n return \"%0.2f MB\" % (n / 1e6)\n if n > 1e3:\n return \"%0.2f kB\" % (n / 1000)\n return \"%d B\" % n", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_timedelta_sizes_timedelta_sizes_update_k": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_timedelta_sizes_timedelta_sizes_update_k", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1304, "end_line": 1325, "span_ids": ["impl:20"], "tokens": 194}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "timedelta_sizes = {\n \"s\": 1,\n \"ms\": 1e-3,\n \"us\": 1e-6,\n \"ns\": 1e-9,\n \"m\": 60,\n \"h\": 3600,\n \"d\": 3600 * 24,\n}\n\ntds2 = {\n \"second\": 1,\n \"minute\": 60,\n \"hour\": 60 * 60,\n \"day\": 60 * 60 * 24,\n \"millisecond\": 1e-3,\n \"microsecond\": 1e-6,\n \"nanosecond\": 1e-9,\n}\ntds2.update({k + \"s\": v for k, v in tds2.items()})\ntimedelta_sizes.update(tds2)\ntimedelta_sizes.update({k.upper(): v for k, v in timedelta_sizes.items()})", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_parse_timedelta_parse_timedelta.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_parse_timedelta_parse_timedelta.return.result", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1331, "end_line": 1373, "span_ids": ["parse_timedelta"], "tokens": 283}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def parse_timedelta(s, default=\"seconds\"):\n \"\"\"Parse timedelta string to number of seconds\n\n Examples\n --------\n >>> from datetime import timedelta\n >>> from dask.utils import parse_timedelta\n >>> parse_timedelta('3s')\n 3\n >>> parse_timedelta('3.5 seconds')\n 3.5\n >>> parse_timedelta('300ms')\n 0.3\n >>> parse_timedelta(timedelta(seconds=3)) # also supports timedeltas\n 3\n \"\"\"\n if s is None:\n return None\n if isinstance(s, timedelta):\n s = s.total_seconds()\n return int(s) if int(s) == s else s\n if isinstance(s, Number):\n s = str(s)\n s = s.replace(\" \", \"\")\n if not s[0].isdigit():\n s = \"1\" + s\n\n for i in range(len(s) - 1, -1, -1):\n if not s[i].isalpha():\n break\n index = i + 1\n\n prefix = s[:index]\n suffix = s[index:] or default\n\n n = float(prefix)\n\n multiplier = timedelta_sizes[suffix.lower()]\n\n result = n * multiplier\n if int(result) == result:\n result = int(result)\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_has_keyword_hex_pattern.re_compile_a_f_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_has_keyword_hex_pattern.re_compile_a_f_", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1371, "end_line": 1415, "span_ids": ["has_keyword", "ndimlist", "impl:27", "iter_chunks"], "tokens": 254}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def has_keyword(func, keyword):\n try:\n return keyword in inspect.signature(func).parameters\n except Exception:\n return False\n\n\ndef ndimlist(seq):\n if not isinstance(seq, (list, tuple)):\n return 0\n elif not seq:\n return 1\n else:\n return 1 + ndimlist(seq[0])\n\n\ndef iter_chunks(sizes, max_size):\n \"\"\"Split sizes into chunks of total max_size each\n\n Parameters\n ----------\n sizes : iterable of numbers\n The sizes to be chunked\n max_size : number\n Maximum total size per chunk.\n It must be greater or equal than each size in sizes\n \"\"\"\n chunk, chunk_sum = [], 0\n iter_sizes = iter(sizes)\n size = next(iter_sizes, None)\n while size is not None:\n assert size <= max_size\n if chunk_sum + size <= max_size:\n chunk.append(size)\n chunk_sum += size\n size = next(iter_sizes, None)\n else:\n assert chunk\n yield chunk\n chunk, chunk_sum = [], 0\n if chunk:\n yield chunk\n\n\nhex_pattern = re.compile(\"[a-f]+\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_key_split_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_key_split_", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1418, "end_line": 1470, "span_ids": ["key_split"], "tokens": 402}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def key_split(s):\n \"\"\"\n >>> key_split('x')\n 'x'\n >>> key_split('x-1')\n 'x'\n >>> key_split('x-1-2-3')\n 'x'\n >>> key_split(('x-2', 1))\n 'x'\n >>> key_split(\"('x-2', 1)\")\n 'x'\n >>> key_split('hello-world-1')\n 'hello-world'\n >>> key_split(b'hello-world-1')\n 'hello-world'\n >>> key_split('ae05086432ca935f6eba409a8ecd4896')\n 'data'\n >>> key_split('>> key_split(None)\n 'Other'\n >>> key_split('x-abcdefab') # ignores hex\n 'x'\n >>> key_split('_(x)') # strips unpleasant characters\n 'x'\n \"\"\"\n if type(s) is bytes:\n s = s.decode()\n if type(s) is tuple:\n s = s[0]\n try:\n words = s.split(\"-\")\n if not words[0][0].isalpha():\n result = words[0].strip(\"_'()\\\"\")\n else:\n result = words[0]\n for word in words[1:]:\n if word.isalpha() and not (\n len(word) == 8 and hex_pattern.match(word) is not None\n ):\n result += \"-\" + word\n else:\n break\n if len(result) == 32 and re.match(r\"[a-f0-9]{32}\", result):\n return \"data\"\n else:\n if result[0] == \"<\":\n result = result.strip(\"<>\").split()[0].split(\".\")[-1]\n return result\n except Exception:\n return \"Other\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils_test.py_inc_GetFunctionTestMixin.test_get_with_list.assert_self_get_d_z_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils_test.py_inc_GetFunctionTestMixin.test_get_with_list.assert_self_get_d_z_", "embedding": null, "metadata": {"file_path": "dask/utils_test.py", "file_name": "utils_test.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 68, "span_ids": ["GetFunctionTestMixin.test_get", "inc", "GetFunctionTestMixin.test_badkey", "GetFunctionTestMixin.test_data_not_in_dict_is_ok", "GetFunctionTestMixin.test_nested_badkey", "dec", "add", "GetFunctionTestMixin.test_get_with_list", "GetFunctionTestMixin"], "tokens": 555}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def inc(x):\n return x + 1\n\n\ndef dec(x):\n return x - 1\n\n\ndef add(x, y):\n return x + y\n\n\nclass GetFunctionTestMixin(object):\n \"\"\"\n The GetFunctionTestCase class can be imported and used to test foreign\n implementations of the `get` function specification. It aims to enforce all\n known expectations of `get` functions.\n\n To use the class, inherit from it and override the `get` function. For\n example:\n\n > from dask.utils_test import GetFunctionTestMixin\n > class TestCustomGet(GetFunctionTestMixin):\n get = staticmethod(myget)\n\n Note that the foreign `myget` function has to be explicitly decorated as a\n staticmethod.\n \"\"\"\n\n def test_get(self):\n d = {\":x\": 1, \":y\": (inc, \":x\"), \":z\": (add, \":x\", \":y\")}\n\n assert self.get(d, \":x\") == 1\n assert self.get(d, \":y\") == 2\n assert self.get(d, \":z\") == 3\n\n def test_badkey(self):\n d = {\":x\": 1, \":y\": (inc, \":x\"), \":z\": (add, \":x\", \":y\")}\n try:\n result = self.get(d, \"badkey\")\n except KeyError:\n pass\n else:\n msg = \"Expected `{}` with badkey to raise KeyError.\\n\"\n msg += \"Obtained '{}' instead.\".format(result)\n assert False, msg.format(self.get.__name__)\n\n def test_nested_badkey(self):\n d = {\"x\": 1, \"y\": 2, \"z\": (sum, [\"x\", \"y\"])}\n\n try:\n result = self.get(d, [[\"badkey\"], \"y\"])\n except KeyError:\n pass\n else:\n msg = \"Expected `{}` with badkey to raise KeyError.\\n\"\n msg += \"Obtained '{}' instead.\".format(result)\n assert False, msg.format(self.get.__name__)\n\n def test_data_not_in_dict_is_ok(self):\n d = {\"x\": 1, \"y\": (add, \"x\", 10)}\n assert self.get(d, \"y\") == 11\n\n def test_get_with_list(self):\n d = {\"x\": 1, \"y\": 2, \"z\": (sum, [\"x\", \"y\"])}\n\n assert self.get(d, [\"x\", \"y\"]) == (1, 2)\n assert self.get(d, \"z\") == 3", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils_test.py_GetFunctionTestMixin.test_get_with_list_top_level_GetFunctionTestMixin.test_get_with_list_top_level.assert_self_get_d_f_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils_test.py_GetFunctionTestMixin.test_get_with_list_top_level_GetFunctionTestMixin.test_get_with_list_top_level.assert_self_get_d_f_", "embedding": null, "metadata": {"file_path": "dask/utils_test.py", "file_name": "utils_test.py", "file_type": "text/x-python", "category": "test", "start_line": 70, "end_line": 84, "span_ids": ["GetFunctionTestMixin.test_get_with_list_top_level"], "tokens": 221}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class GetFunctionTestMixin(object):\n\n def test_get_with_list_top_level(self):\n d = {\n \"a\": [1, 2, 3],\n \"b\": \"a\",\n \"c\": [1, (inc, 1)],\n \"d\": [(sum, \"a\")],\n \"e\": [\"a\", \"b\"],\n \"f\": [[[(sum, \"a\"), \"c\"], (sum, \"b\")], 2],\n }\n assert self.get(d, \"a\") == [1, 2, 3]\n assert self.get(d, \"b\") == [1, 2, 3]\n assert self.get(d, \"c\") == [1, 2]\n assert self.get(d, \"d\") == [6]\n assert self.get(d, \"e\") == [[1, 2, 3], [1, 2, 3]]\n assert self.get(d, \"f\") == [[[6, [1, 2]], 6], 2]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils_test.py_GetFunctionTestMixin.test_get_with_nested_list_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils_test.py_GetFunctionTestMixin.test_get_with_nested_list_", "embedding": null, "metadata": {"file_path": "dask/utils_test.py", "file_name": "utils_test.py", "file_type": "text/x-python", "category": "test", "start_line": 86, "end_line": 115, "span_ids": ["GetFunctionTestMixin.test_get_with_nested_list", "GetFunctionTestMixin.test_with_HighLevelGraph", "GetFunctionTestMixin.test_get_works_with_unhashables_in_values", "GetFunctionTestMixin.test_nested_tasks", "GetFunctionTestMixin.test_get_stack_limit"], "tokens": 356}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class GetFunctionTestMixin(object):\n\n def test_get_with_nested_list(self):\n d = {\"x\": 1, \"y\": 2, \"z\": (sum, [\"x\", \"y\"])}\n\n assert self.get(d, [[\"x\"], \"y\"]) == ((1,), 2)\n assert self.get(d, \"z\") == 3\n\n def test_get_works_with_unhashables_in_values(self):\n f = lambda x, y: x + len(y)\n d = {\"x\": 1, \"y\": (f, \"x\", set([1]))}\n\n assert self.get(d, \"y\") == 2\n\n def test_nested_tasks(self):\n d = {\"x\": 1, \"y\": (inc, \"x\"), \"z\": (add, (inc, \"x\"), \"y\")}\n\n assert self.get(d, \"z\") == 4\n\n def test_get_stack_limit(self):\n d = {\"x%d\" % (i + 1): (inc, \"x%d\" % i) for i in range(10000)}\n d[\"x0\"] = 0\n assert self.get(d, \"x10000\") == 10000\n\n def test_with_HighLevelGraph(self):\n from .highlevelgraph import HighLevelGraph\n\n layers = {\"a\": {\"x\": 1, \"y\": (inc, \"x\")}, \"b\": {\"z\": (add, (inc, \"x\"), \"y\")}}\n dependencies = {\"a\": (), \"b\": {\"a\"}}\n graph = HighLevelGraph(layers, dependencies)\n assert self.get(graph, \"z\") == 4", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/docs/source/conf.py__coding_utf_8___html_show_sphinx_True": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/docs/source/conf.py__coding_utf_8___html_show_sphinx_True", "embedding": null, "metadata": {"file_path": "docs/source/conf.py", "file_name": "conf.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 161, "span_ids": ["docstring:71", "docstring:33", "docstring"], "tokens": 1186}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# -*- coding: utf-8 -*-\n#\n# dask documentation build configuration file, created by\n# sphinx-quickstart on Sun Jan 4 08:58:22 2015.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport os\n\n# -- General configuration -----------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nimport sys\n\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath(\"../../\"))\n\nsource_dir = os.path.dirname(__file__)\nsys.path.insert(0, os.path.join(source_dir, \"ext\"))\n\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.doctest\",\n \"sphinx.ext.mathjax\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.extlinks\",\n \"numpydoc\",\n \"sphinx_click.ext\",\n \"dask_config_sphinx_ext\",\n]\n\nnumpydoc_show_class_members = False\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix of source filenames.\nsource_suffix = \".rst\"\n\n# The encoding of source files.\n# source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# General information about the project.\nproject = \"Dask\"\ncopyright = \"2014-2018, Anaconda, Inc. and contributors\"\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n# language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n# today = ''\n# Else, today_fmt is used as the format for a strftime call.\n# today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = []\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n# default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n# add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n# add_module_names = True\npygments_style = \"default\"\n\n# A list of ignored prefixes for module index sorting.\n# modindex_common_prefix = []\n\n\n# -- Options for HTML output ---------------------------------------------------\n\nhtml_theme = \"dask_sphinx_theme\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\nhtml_theme_options = {\"logo_only\": True}\n\n# Add any paths that contain custom themes here, relative to this directory.\n# html_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \" v documentation\".\n# html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n# html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n# html_logo = \"images/dask_horizontal_white_no_pad.svg\"\n\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n# html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n# html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n# html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n# html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n# html_additional_pages = {}\n\n# If false, no module index is generated.\n# html_domain_indices = True\n\n# If false, no index is generated.\n# html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n# html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n# html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n# html_show_sphinx = True", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/docs/source/conf.py_htmlhelp_basename__Options_for_sphinx_e": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/docs/source/conf.py_htmlhelp_basename__Options_for_sphinx_e", "embedding": null, "metadata": {"file_path": "docs/source/conf.py", "file_name": "conf.py", "file_type": "text/x-python", "category": "implementation", "start_line": 175, "end_line": 300, "span_ids": ["docstring:145", "docstring:105", "docstring:71"], "tokens": 820}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "htmlhelp_basename = \"daskdoc\"\n\n\n# -- Options for LaTeX output --------------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #'papersize': 'letterpaper',\n # The font size ('10pt', '11pt' or '12pt').\n #'pointsize': '10pt',\n # Additional stuff for the LaTeX preamble.\n #'preamble': '',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\nlatex_documents = [\n (master_doc, \"dask.tex\", \"dask Documentation\", \"Dask Development Team\", \"manual\")\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n# latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n# latex_use_parts = False\n\n# If true, show page references after internal links.\n# latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n# latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n# latex_appendices = []\n\n# If false, no module index is generated.\n# latex_domain_indices = True\n\n\n# -- Options for manual page output --------------------------------------------\n\n# One entry per manual page. List of tuples\nman_pages = [(master_doc, \"dask\", \"dask Documentation\", [\"Dask Development Team\"], 1)]\n\n# If true, show URL addresses after external links.\n# man_show_urls = False\n\n\n# -- Options for Texinfo output ------------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\ntexinfo_documents = [\n (\n master_doc,\n \"Dask\",\n \"dask Documentation\",\n \"Dask Development Team\",\n \"Dask\",\n \"One line description of project.\",\n \"Miscellaneous\",\n )\n]\n\n# Documents to append as an appendix to all manuals.\n# texinfo_appendices = []\n\n# If false, no module index is generated.\n# texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n# texinfo_show_urls = 'footnote'\n\n\n# -- Options for Epub output ---------------------------------------------------\n\n# Bibliographic Dublin Core info.\nepub_title = \"Dask\"\nepub_author = \"Dask Development Team\"\nepub_publisher = \"Anaconda Inc\"\nepub_copyright = \"2014-2018, Anaconda, Inc. and contributors\"\n\n# The language of the text. It defaults to the language option\n# or en if the language is not set.\n# epub_language = ''\n\n# The scheme of the identifier. Typical schemes are ISBN or URL.\n# epub_scheme = ''\n\n# The unique identifier of the text. This can be a ISBN number\n# or the project homepage.\n# epub_identifier = ''\n\n# A unique identification for the text.\n# epub_uid = ''\n\n# A tuple containing the cover image and cover page html template filenames.\n# epub_cover = ()\n\n# HTML files that should be inserted before the pages created by sphinx.\n# The format is a list of tuples containing the path and title.\n# epub_pre_files = []\n\n# HTML files that should be inserted after the pages created by sphinx.\n# The format is a list of tuples containing the path and title.\n# epub_post_files = []\n\n# A list of files that should not be packed into the epub file.\n# epub_exclude_files = []\n\n# The depth of the table of contents in toc.ncx.\n# epub_tocdepth = 3\n\n# Allow duplicate toc entries.\n# epub_tocdup = True\n\nextlinks = {\n \"issue\": (\"https://github.com/dask/dask/issues/%s\", \"GH#\"),\n \"pr\": (\"https://github.com/dask/dask/pull/%s\", \"GH#\"),\n}\n\n# --Options for sphinx extensions -----------------------------------------------", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/docs/source/conf.py_intersphinx_mapping_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/docs/source/conf.py_intersphinx_mapping_", "embedding": null, "metadata": {"file_path": "docs/source/conf.py", "file_name": "conf.py", "file_type": "text/x-python", "category": "implementation", "start_line": 302, "end_line": 387, "span_ids": ["impl:53", "copy_legacy_redirects", "setup", "docstring:145"], "tokens": 763}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "intersphinx_mapping = {\n \"pandas\": (\n \"https://pandas.pydata.org/pandas-docs/stable/\",\n \"https://pandas.pydata.org/pandas-docs/stable/objects.inv\",\n ),\n \"numpy\": (\n \"https://docs.scipy.org/doc/numpy/\",\n \"https://docs.scipy.org/doc/numpy/objects.inv\",\n ),\n \"asyncssh\": (\n \"https://asyncssh.readthedocs.io/en/latest/\",\n \"https://asyncssh.readthedocs.io/en/latest/objects.inv\",\n ),\n}\n\n# Redirects\n# https://tech.signavio.com/2017/managing-sphinx-redirects\nredirect_files = [\n # old html, new html\n (\"bytes.html\", \"remote-data-services.html\"),\n (\"array-overview.html\", \"array.html\"),\n (\"array-ghost.html\", \"array-overlap.html\"),\n (\"dataframe-overview.html\", \"dataframe.html\"),\n (\"dataframe-performance.html\", \"dataframe-best-practices.html\"),\n (\"delayed-overview.html\", \"delayed.html\"),\n (\"scheduler-choice.html\", \"setup.html\"),\n (\"diagnostics.html\", \"diagnostics-local.html\"),\n (\"inspect.html\", \"graphviz.html\"),\n (\"faq.html\", \"https://stackoverflow.com/questions/tagged/dask?sort=frequent\"),\n (\"examples-tutorials.html\", \"https://examples.dask.org\"),\n (\"examples/array-extend.html\", \"https://examples.dask.org\"),\n (\"examples/array-hdf5.html\", \"https://examples.dask.org\"),\n (\"examples/array-numpy.html\", \"https://examples.dask.org\"),\n (\"examples/array-random.html\", \"https://examples.dask.org\"),\n (\"examples/bag-json.html\", \"https://examples.dask.org\"),\n (\"examples/bag-word-count-hdfs.html\", \"https://examples.dask.org\"),\n (\"examples/dataframe-csv.html\", \"https://examples.dask.org\"),\n (\"examples/dataframe-hdf5.html\", \"https://examples.dask.org\"),\n (\"examples/delayed-array.html\", \"https://examples.dask.org\"),\n (\"examples/delayed-custom.html\", \"https://examples.dask.org\"),\n (\"docs.html\", \"index.html\"),\n (\"use-cases.html\", \"https://stories.dask.org\"),\n (\"bag-overview.html\", \"bag.html\"),\n (\"distributed.html\", \"https://distributed.dask.org\"),\n]\n\n\nredirect_template = \"\"\"\\\n\n \n \n \n \n\n\"\"\"\n\nhtml_context = {\n \"css_files\": [\"_static/theme_overrides.css\"] # override wide tables in RTD theme\n}\n\n# Rate limiting issue for github: https://github.com/sphinx-doc/sphinx/issues/7388\nlinkcheck_ignore = [\n r\"^https?:\\/\\/(?:www\\.)?github.com\\/\",\n r\"^https?:\\/\\/localhost(?:[:\\/].+)?$\",\n]\n\ndoctest_global_setup = \"\"\"\nimport numpy as np\n\"\"\"\n\n\ndef copy_legacy_redirects(app, docname):\n if app.builder.name == \"html\":\n for html_src_path, new in redirect_files:\n page = redirect_template.format(new=new)\n target_path = app.outdir + \"/\" + html_src_path\n os.makedirs(os.path.dirname(target_path), exist_ok=True)\n with open(target_path, \"w\") as f:\n f.write(page)\n\n\ndef setup(app):\n app.connect(\"build-finished\", copy_legacy_redirects)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/docs/source/ext/dask_config_sphinx_ext.py_requests_setup.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/docs/source/ext/dask_config_sphinx_ext.py_requests_setup.return._", "embedding": null, "metadata": {"file_path": "docs/source/ext/dask_config_sphinx_ext.py", "file_name": "dask_config_sphinx_ext.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 49, "span_ids": ["DaskConfigDirective.run", "imports", "DaskConfigDirective", "setup", "get_remote_yaml"], "tokens": 272}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import requests\nimport yaml\n\nfrom docutils import nodes\nfrom docutils.parsers.rst import Directive, directives\n\n\ndef get_remote_yaml(url):\n r = requests.get(url)\n return yaml.safe_load(r.text)\n\n\nclass DaskConfigDirective(Directive):\n\n option_spec = {\n \"location\": directives.unchanged,\n \"schema\": directives.uri,\n \"config\": directives.uri,\n }\n\n def run(self):\n location = self.options[\"location\"]\n config = self.options[\"config\"]\n schema = self.options[\"schema\"]\n\n config = get_remote_yaml(config)\n schema = get_remote_yaml(schema)\n\n for k in location.split(\".\"):\n # dask config does not have a top level key\n # we need to pass full schema and config\n if k == \"dask\":\n schema = schema\n config = config\n else:\n config = config[k]\n schema = schema[\"properties\"].get(k, {})\n html = generate_html(config, schema, location)\n return [nodes.raw(\"\", html, format=\"html\")]\n\n\ndef setup(app):\n app.add_directive(\"dask-config-block\", DaskConfigDirective)\n\n return {\n \"version\": \"0.1\",\n \"parallel_read_safe\": True,\n \"parallel_write_safe\": True,\n }", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/docs/source/ext/dask_config_sphinx_ext.py_dask_config_to_html_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/docs/source/ext/dask_config_sphinx_ext.py_dask_config_to_html_", "embedding": null, "metadata": {"file_path": "docs/source/ext/dask_config_sphinx_ext.py", "file_name": "dask_config_sphinx_ext.py", "file_type": "text/x-python", "category": "implementation", "start_line": 52, "end_line": 95, "span_ids": ["generate_html", "dask_config_to_html"], "tokens": 275}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def dask_config_to_html(key, value, schema, prefix=\"\"):\n if isinstance(value, dict):\n return sum(\n [\n dask_config_to_html(\n k,\n v,\n schema.get(\"properties\", {}).get(k, {\"properties\": {}}),\n prefix=prefix + key + \".\",\n )\n for k, v in value.items()\n ],\n [],\n )\n\n else:\n\n try:\n description = schema[\"description\"]\n description = description.strip()\n except KeyError:\n description = \"No Comment\"\n\n key = prefix + key\n value = str(value)\n node = f\"\"\"
\n
\n {key}\n   {value}

\n \u00b6\n
\n

{description}

\n
\n\n \"\"\"\n return [node]\n\n\ndef generate_html(config, schema, location):\n nested_html = dask_config_to_html(\n key=\"\", value=config, schema=schema, prefix=location\n )\n return \"\".join(nested_html)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/docs/source/scripts/scheduling.py_from_time_import_time_trivial.return.d_x_height_1_i_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/docs/source/scripts/scheduling.py_from_time_import_time_trivial.return.d_x_height_1_i_", "embedding": null, "metadata": {"file_path": "docs/source/scripts/scheduling.py", "file_name": "scheduling.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 20, "span_ids": ["imports", "noop", "trivial", "impl"], "tokens": 139}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from time import time\nimport dask\nfrom dask import threaded, multiprocessing, local\nfrom random import randint\nimport matplotlib.pyplot as plt\n\n\ndef noop(x):\n pass\n\n\nnrepetitions = 1\n\n\ndef trivial(width, height):\n \"\"\" Embarrassingly parallel dask \"\"\"\n d = {(\"x\", 0, i): i for i in range(width)}\n for j in range(1, height):\n d.update({(\"x\", j, i): (noop, (\"x\", j - 1, i)) for i in range(width)})\n return d, [(\"x\", height - 1, i) for i in range(width)]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/docs/source/scripts/scheduling.py_crosstalk_crosstalk.return.d_x_height_1_i_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/docs/source/scripts/scheduling.py_crosstalk_crosstalk.return.d_x_height_1_i_", "embedding": null, "metadata": {"file_path": "docs/source/scripts/scheduling.py", "file_name": "scheduling.py", "file_type": "text/x-python", "category": "implementation", "start_line": 23, "end_line": 36, "span_ids": ["crosstalk"], "tokens": 124}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def crosstalk(width, height, connections):\n \"\"\" Natural looking dask with some inter-connections \"\"\"\n d = {(\"x\", 0, i): i for i in range(width)}\n for j in range(1, height):\n d.update(\n {\n (\"x\", j, i): (\n noop,\n [(\"x\", j - 1, randint(0, width)) for _ in range(connections)],\n )\n for i in range(width)\n }\n )\n return d, [(\"x\", height - 1, i) for i in range(width)]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/setup.py__usr_bin_env_python_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/setup.py__usr_bin_env_python_", "embedding": null, "metadata": {"file_path": "setup.py", "file_name": "setup.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 78, "span_ids": ["impl:16", "docstring"], "tokens": 629}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "#!/usr/bin/env python\n\nimport sys\nfrom os.path import exists\nfrom setuptools import setup\nimport versioneer\n\n# NOTE: These are tested in `continuous_integration/travis/test_imports.sh` If\n# you modify these, make sure to change the corresponding line there.\nextras_require = {\n \"array\": [\"numpy >= 1.13.0\", \"toolz >= 0.8.2\"],\n \"bag\": [\n \"cloudpickle >= 0.2.2\",\n \"fsspec >= 0.6.0\",\n \"toolz >= 0.8.2\",\n \"partd >= 0.3.10\",\n ],\n \"dataframe\": [\n \"numpy >= 1.13.0\",\n \"pandas >= 0.23.0\",\n \"toolz >= 0.8.2\",\n \"partd >= 0.3.10\",\n \"fsspec >= 0.6.0\",\n ],\n \"distributed\": [\"distributed >= 2.0\"],\n \"diagnostics\": [\"bokeh >= 1.0.0, != 2.0.0\"],\n \"delayed\": [\"cloudpickle >= 0.2.2\", \"toolz >= 0.8.2\"],\n}\nextras_require[\"complete\"] = sorted({v for req in extras_require.values() for v in req})\n\ninstall_requires = [\"pyyaml\"]\n\npackages = [\n \"dask\",\n \"dask.array\",\n \"dask.bag\",\n \"dask.bytes\",\n \"dask.dataframe\",\n \"dask.dataframe.io\",\n \"dask.dataframe.tseries\",\n \"dask.diagnostics\",\n]\n\ntests = [p + \".tests\" for p in packages]\n\n# Only include pytest-runner in setup_requires if we're invoking tests\nif {\"pytest\", \"test\", \"ptr\"}.intersection(sys.argv):\n setup_requires = [\"pytest-runner\"]\nelse:\n setup_requires = []\n\nsetup(\n name=\"dask\",\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n description=\"Parallel PyData with Task Scheduling\",\n url=\"https://github.com/dask/dask/\",\n maintainer=\"Matthew Rocklin\",\n maintainer_email=\"mrocklin@gmail.com\",\n license=\"BSD\",\n keywords=\"task-scheduling parallel numpy pandas pydata\",\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n ],\n packages=packages + tests,\n long_description=open(\"README.rst\").read() if exists(\"README.rst\") else \"\",\n python_requires=\">=3.6\",\n install_requires=install_requires,\n setup_requires=setup_requires,\n tests_require=[\"pytest\"],\n extras_require=extras_require,\n include_package_data=True,\n zip_safe=False,\n)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py__Version_0_16_get_root.return.root": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py__Version_0_16_get_root.return.root", "embedding": null, "metadata": {"file_path": "versioneer.py", "file_name": "versioneer.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2, "end_line": 402, "span_ids": ["VersioneerConfig", "imports", "get_root", "docstring"], "tokens": 496}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# Version: 0.16\n\nfrom __future__ import print_function\ntry:\n import configparser\nexcept ImportError:\n import ConfigParser as configparser\nimport errno\nimport json\nimport os\nimport re\nimport subprocess\nimport sys\n\n\nclass VersioneerConfig:\n \"\"\"Container for Versioneer configuration parameters.\"\"\"\n\n\ndef get_root():\n \"\"\"Get the project root directory.\n\n We require that all commands are run from the project root, i.e. the\n directory that contains setup.py, setup.cfg, and versioneer.py .\n \"\"\"\n root = os.path.realpath(os.path.abspath(os.getcwd()))\n setup_py = os.path.join(root, \"setup.py\")\n versioneer_py = os.path.join(root, \"versioneer.py\")\n if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):\n # allow 'python path/to/setup.py COMMAND'\n root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))\n setup_py = os.path.join(root, \"setup.py\")\n versioneer_py = os.path.join(root, \"versioneer.py\")\n if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):\n err = (\"Versioneer was unable to run the project root directory. \"\n \"Versioneer requires setup.py to be executed from \"\n \"its immediate directory (like 'python setup.py COMMAND'), \"\n \"or in a way that lets it use sys.argv[0] to find the root \"\n \"(like 'python path/to/setup.py COMMAND').\")\n raise VersioneerBadRootError(err)\n try:\n # Certain runtime workflows (setup.py install/develop in a setuptools\n # tree) execute all dependencies in a single python process, so\n # \"versioneer\" may be imported multiple times, and python's shared\n # module-import table will cache the first one. So we can't use\n # os.path.dirname(__file__), as that will find whichever\n # versioneer.py was first imported, even in later projects.\n me = os.path.realpath(os.path.abspath(__file__))\n if os.path.splitext(me)[0] != os.path.splitext(versioneer_py)[0]:\n print(\"Warning: build in %s is using versioneer.py from %s\"\n % (os.path.dirname(me), versioneer_py))\n except NameError:\n pass\n return root", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_get_config_from_root_get_config_from_root.return.cfg": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_get_config_from_root_get_config_from_root.return.cfg", "embedding": null, "metadata": {"file_path": "versioneer.py", "file_name": "versioneer.py", "file_type": "text/x-python", "category": "implementation", "start_line": 405, "end_line": 431, "span_ids": ["get_config_from_root"], "tokens": 294}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def get_config_from_root(root):\n \"\"\"Read the project setup.cfg file to determine Versioneer config.\"\"\"\n # This might raise EnvironmentError (if setup.cfg is missing), or\n # configparser.NoSectionError (if it lacks a [versioneer] section), or\n # configparser.NoOptionError (if it lacks \"VCS=\"). See the docstring at\n # the top of versioneer.py for instructions on writing your setup.cfg .\n setup_cfg = os.path.join(root, \"setup.cfg\")\n parser = configparser.SafeConfigParser()\n with open(setup_cfg, \"r\") as f:\n parser.readfp(f)\n VCS = parser.get(\"versioneer\", \"VCS\") # mandatory\n\n def get(parser, name):\n if parser.has_option(\"versioneer\", name):\n return parser.get(\"versioneer\", name)\n return None\n cfg = VersioneerConfig()\n cfg.VCS = VCS\n cfg.style = get(parser, \"style\") or \"\"\n cfg.versionfile_source = get(parser, \"versionfile_source\")\n cfg.versionfile_build = get(parser, \"versionfile_build\")\n cfg.tag_prefix = get(parser, \"tag_prefix\")\n if cfg.tag_prefix in (\"''\", '\"\"'):\n cfg.tag_prefix = \"\"\n cfg.parentdir_prefix = get(parser, \"parentdir_prefix\")\n cfg.verbose = get(parser, \"verbose\")\n return cfg", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_NotThisMethod_register_vcs_handler.return.decorate": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_NotThisMethod_register_vcs_handler.return.decorate", "embedding": null, "metadata": {"file_path": "versioneer.py", "file_name": "versioneer.py", "file_type": "text/x-python", "category": "implementation", "start_line": 434, "end_line": 450, "span_ids": ["impl:3", "NotThisMethod", "register_vcs_handler"], "tokens": 127}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class NotThisMethod(Exception):\n \"\"\"Exception raised if a method is not valid for the current scenario.\"\"\"\n\n# these dictionaries contain VCS-specific tools\nLONG_VERSION_PY = {}\nHANDLERS = {}\n\n\ndef register_vcs_handler(vcs, method): # decorator\n \"\"\"Decorator to mark a method as the handler for a particular VCS.\"\"\"\n def decorate(f):\n \"\"\"Store f in HANDLERS[vcs][method].\"\"\"\n if vcs not in HANDLERS:\n HANDLERS[vcs] = {}\n HANDLERS[vcs][method] = f\n return f\n return decorate", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_run_command_run_command.return.stdout": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_run_command_run_command.return.stdout", "embedding": null, "metadata": {"file_path": "versioneer.py", "file_name": "versioneer.py", "file_type": "text/x-python", "category": "implementation", "start_line": 453, "end_line": 484, "span_ids": ["run_command"], "tokens": 258}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):\n \"\"\"Call the given command(s).\"\"\"\n assert isinstance(commands, list)\n p = None\n for c in commands:\n try:\n dispcmd = str([c] + args)\n # remember shell=False, so use git.cmd on windows, not just git\n p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,\n stderr=(subprocess.PIPE if hide_stderr\n else None))\n break\n except EnvironmentError:\n e = sys.exc_info()[1]\n if e.errno == errno.ENOENT:\n continue\n if verbose:\n print(\"unable to run %s\" % dispcmd)\n print(e)\n return None\n else:\n if verbose:\n print(\"unable to find command, tried %s\" % (commands,))\n return None\n stdout = p.communicate()[0].strip()\n if sys.version_info[0] >= 3:\n stdout = stdout.decode()\n if p.returncode != 0:\n if verbose:\n print(\"unable to run %s (error)\" % dispcmd)\n return None\n return stdout", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_git_get_keywords_git_get_keywords.return.keywords": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_git_get_keywords_git_get_keywords.return.keywords", "embedding": null, "metadata": {"file_path": "versioneer.py", "file_name": "versioneer.py", "file_type": "text/x-python", "category": "implementation", "start_line": 972, "end_line": 994, "span_ids": ["git_get_keywords"], "tokens": 214}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@register_vcs_handler(\"git\", \"get_keywords\")\ndef git_get_keywords(versionfile_abs):\n \"\"\"Extract version information from the given file.\"\"\"\n # the code embedded in _version.py can just fetch the value of these\n # keywords. When used from setup.py, we don't want to import _version.py,\n # so we do it with a regexp instead. This function is not used from\n # _version.py.\n keywords = {}\n try:\n f = open(versionfile_abs, \"r\")\n for line in f.readlines():\n if line.strip().startswith(\"git_refnames =\"):\n mo = re.search(r'=\\s*\"(.*)\"', line)\n if mo:\n keywords[\"refnames\"] = mo.group(1)\n if line.strip().startswith(\"git_full =\"):\n mo = re.search(r'=\\s*\"(.*)\"', line)\n if mo:\n keywords[\"full\"] = mo.group(1)\n f.close()\n except EnvironmentError:\n pass\n return keywords", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_git_versions_from_keywords_git_versions_from_keywords.return._version_0_unknown_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_git_versions_from_keywords_git_versions_from_keywords.return._version_0_unknown_", "embedding": null, "metadata": {"file_path": "versioneer.py", "file_name": "versioneer.py", "file_type": "text/x-python", "category": "implementation", "start_line": 997, "end_line": 1040, "span_ids": ["git_versions_from_keywords"], "tokens": 558}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@register_vcs_handler(\"git\", \"keywords\")\ndef git_versions_from_keywords(keywords, tag_prefix, verbose):\n \"\"\"Get version information from git keywords.\"\"\"\n if not keywords:\n raise NotThisMethod(\"no keywords at all, weird\")\n refnames = keywords[\"refnames\"].strip()\n if refnames.startswith(\"$Format\"):\n if verbose:\n print(\"keywords are unexpanded, not using\")\n raise NotThisMethod(\"unexpanded keywords, not a git-archive tarball\")\n refs = set([r.strip() for r in refnames.strip(\"()\").split(\",\")])\n # starting in git-1.8.3, tags are listed as \"tag: foo-1.0\" instead of\n # just \"foo-1.0\". If we see a \"tag: \" prefix, prefer those.\n TAG = \"tag: \"\n tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])\n if not tags:\n # Either we're using git < 1.8.3, or there really are no tags. We use\n # a heuristic: assume all version tags have a digit. The old git %d\n # expansion behaves like git log --decorate=short and strips out the\n # refs/heads/ and refs/tags/ prefixes that would let us distinguish\n # between branches and tags. By ignoring refnames without digits, we\n # filter out many common branch names like \"release\" and\n # \"stabilization\", as well as \"HEAD\" and \"master\".\n tags = set([r for r in refs if re.search(r'\\d', r)])\n if verbose:\n print(\"discarding '%s', no digits\" % \",\".join(refs-tags))\n if verbose:\n print(\"likely tags: %s\" % \",\".join(sorted(tags)))\n for ref in sorted(tags):\n # sorting will prefer e.g. \"2.0\" over \"2.0rc1\"\n if ref.startswith(tag_prefix):\n r = ref[len(tag_prefix):]\n if verbose:\n print(\"picking %s\" % r)\n return {\"version\": r,\n \"full-revisionid\": keywords[\"full\"].strip(),\n \"dirty\": False, \"error\": None\n }\n # no suitable tags, so version is \"0+unknown\", but full hex is still there\n if verbose:\n print(\"no suitable tags, using unknown + full revision id\")\n return {\"version\": \"0+unknown\",\n \"full-revisionid\": keywords[\"full\"].strip(),\n \"dirty\": False, \"error\": \"no suitable tags\"}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_git_pieces_from_vcs_git_pieces_from_vcs.return.pieces": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_git_pieces_from_vcs_git_pieces_from_vcs.return.pieces", "embedding": null, "metadata": {"file_path": "versioneer.py", "file_name": "versioneer.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1043, "end_line": 1124, "span_ids": ["git_pieces_from_vcs"], "tokens": 772}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@register_vcs_handler(\"git\", \"pieces_from_vcs\")\ndef git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):\n \"\"\"Get version from 'git describe' in the root of the source tree.\n\n This only gets called if the git-archive 'subst' keywords were *not*\n expanded, and _version.py hasn't already been rewritten with a short\n version string, meaning we're inside a checked out source tree.\n \"\"\"\n if not os.path.exists(os.path.join(root, \".git\")):\n if verbose:\n print(\"no .git in %s\" % root)\n raise NotThisMethod(\"no .git directory\")\n\n GITS = [\"git\"]\n if sys.platform == \"win32\":\n GITS = [\"git.cmd\", \"git.exe\"]\n # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]\n # if there isn't one, this yields HEX[-dirty] (no NUM)\n describe_out = run_command(GITS, [\"describe\", \"--tags\", \"--dirty\",\n \"--always\", \"--long\",\n \"--match\", \"%s*\" % tag_prefix],\n cwd=root)\n # --long was added in git-1.5.5\n if describe_out is None:\n raise NotThisMethod(\"'git describe' failed\")\n describe_out = describe_out.strip()\n full_out = run_command(GITS, [\"rev-parse\", \"HEAD\"], cwd=root)\n if full_out is None:\n raise NotThisMethod(\"'git rev-parse' failed\")\n full_out = full_out.strip()\n\n pieces = {}\n pieces[\"long\"] = full_out\n pieces[\"short\"] = full_out[:7] # maybe improved later\n pieces[\"error\"] = None\n\n # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]\n # TAG might have hyphens.\n git_describe = describe_out\n\n # look for -dirty suffix\n dirty = git_describe.endswith(\"-dirty\")\n pieces[\"dirty\"] = dirty\n if dirty:\n git_describe = git_describe[:git_describe.rindex(\"-dirty\")]\n\n # now we have TAG-NUM-gHEX or HEX\n\n if \"-\" in git_describe:\n # TAG-NUM-gHEX\n mo = re.search(r'^(.+)-(\\d+)-g([0-9a-f]+)$', git_describe)\n if not mo:\n # unparseable. Maybe git-describe is misbehaving?\n pieces[\"error\"] = (\"unable to parse git-describe output: '%s'\"\n % describe_out)\n return pieces\n\n # tag\n full_tag = mo.group(1)\n if not full_tag.startswith(tag_prefix):\n if verbose:\n fmt = \"tag '%s' doesn't start with prefix '%s'\"\n print(fmt % (full_tag, tag_prefix))\n pieces[\"error\"] = (\"tag '%s' doesn't start with prefix '%s'\"\n % (full_tag, tag_prefix))\n return pieces\n pieces[\"closest-tag\"] = full_tag[len(tag_prefix):]\n\n # distance: number of commits since tag\n pieces[\"distance\"] = int(mo.group(2))\n\n # commit: short hex revision ID\n pieces[\"short\"] = mo.group(3)\n\n else:\n # HEX: no tags\n pieces[\"closest-tag\"] = None\n count_out = run_command(GITS, [\"rev-list\", \"HEAD\", \"--count\"],\n cwd=root)\n pieces[\"distance\"] = int(count_out) # total number of commits\n\n return pieces", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_do_vcs_install_do_vcs_install.run_command_GITS_add_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_do_vcs_install_do_vcs_install.run_command_GITS_add_", "embedding": null, "metadata": {"file_path": "versioneer.py", "file_name": "versioneer.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1127, "end_line": 1162, "span_ids": ["do_vcs_install"], "tokens": 299}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def do_vcs_install(manifest_in, versionfile_source, ipy):\n \"\"\"Git-specific installation logic for Versioneer.\n\n For Git, this means creating/changing .gitattributes to mark _version.py\n for export-time keyword substitution.\n \"\"\"\n GITS = [\"git\"]\n if sys.platform == \"win32\":\n GITS = [\"git.cmd\", \"git.exe\"]\n files = [manifest_in, versionfile_source]\n if ipy:\n files.append(ipy)\n try:\n me = __file__\n if me.endswith(\".pyc\") or me.endswith(\".pyo\"):\n me = os.path.splitext(me)[0] + \".py\"\n versioneer_file = os.path.relpath(me)\n except NameError:\n versioneer_file = \"versioneer.py\"\n files.append(versioneer_file)\n present = False\n try:\n f = open(\".gitattributes\", \"r\")\n for line in f.readlines():\n if line.strip().startswith(versionfile_source):\n if \"export-subst\" in line.strip().split()[1:]:\n present = True\n f.close()\n except EnvironmentError:\n pass\n if not present:\n f = open(\".gitattributes\", \"a+\")\n f.write(\"%s export-subst\\n\" % versionfile_source)\n f.close()\n files.append(\".gitattributes\")\n run_command(GITS, [\"add\", \"--\"] + files)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_versions_from_parentdir_versions_from_parentdir.return._version_dirname_len_p": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_versions_from_parentdir_versions_from_parentdir.return._version_dirname_len_p", "embedding": null, "metadata": {"file_path": "versioneer.py", "file_name": "versioneer.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1165, "end_line": 1179, "span_ids": ["versions_from_parentdir"], "tokens": 156}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def versions_from_parentdir(parentdir_prefix, root, verbose):\n \"\"\"Try to determine the version from the parent directory name.\n\n Source tarballs conventionally unpack into a directory that includes\n both the project name and a version string.\n \"\"\"\n dirname = os.path.basename(root)\n if not dirname.startswith(parentdir_prefix):\n if verbose:\n print(\"guessing rootdir is '%s', but '%s' doesn't start with \"\n \"prefix '%s'\" % (root, dirname, parentdir_prefix))\n raise NotThisMethod(\"rootdir doesn't start with parentdir_prefix\")\n return {\"version\": dirname[len(parentdir_prefix):],\n \"full-revisionid\": None,\n \"dirty\": False, \"error\": None}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_SHORT_VERSION_PY_versions_from_file.return.json_loads_mo_group_1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_SHORT_VERSION_PY_versions_from_file.return.json_loads_mo_group_1_", "embedding": null, "metadata": {"file_path": "versioneer.py", "file_name": "versioneer.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1181, "end_line": 1211, "span_ids": ["versions_from_file", "impl:8"], "tokens": 204}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "SHORT_VERSION_PY = \"\"\"\n# This file was generated by 'versioneer.py' (0.16) from\n# revision-control system data, or from the parent directory name of an\n# unpacked source archive. Distribution tarballs contain a pre-generated copy\n# of this file.\n\nimport json\nimport sys\n\nversion_json = '''\n%s\n''' # END VERSION_JSON\n\n\ndef get_versions():\n return json.loads(version_json)\n\"\"\"\n\n\ndef versions_from_file(filename):\n \"\"\"Try to determine the version from _version.py if present.\"\"\"\n try:\n with open(filename) as f:\n contents = f.read()\n except EnvironmentError:\n raise NotThisMethod(\"unable to read _version.py\")\n mo = re.search(r\"version_json = '''\\n(.*)''' # END VERSION_JSON\",\n contents, re.M | re.S)\n if not mo:\n raise NotThisMethod(\"no version_json in _version.py\")\n return json.loads(mo.group(1))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_write_to_version_file_plus_or_dot.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_write_to_version_file_plus_or_dot.return._", "embedding": null, "metadata": {"file_path": "versioneer.py", "file_name": "versioneer.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1214, "end_line": 1229, "span_ids": ["plus_or_dot", "write_to_version_file"], "tokens": 135}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def write_to_version_file(filename, versions):\n \"\"\"Write the given version number to the given _version.py file.\"\"\"\n os.unlink(filename)\n contents = json.dumps(versions, sort_keys=True,\n indent=1, separators=(\",\", \": \"))\n with open(filename, \"w\") as f:\n f.write(SHORT_VERSION_PY % contents)\n\n print(\"set %s to '%s'\" % (filename, versions[\"version\"]))\n\n\ndef plus_or_dot(pieces):\n \"\"\"Return a + if we don't already have one, else return a .\"\"\"\n if \"+\" in pieces.get(\"closest-tag\", \"\"):\n return \".\"\n return \"+\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_render_pep440_render_pep440_pre.return.rendered": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_render_pep440_render_pep440_pre.return.rendered", "embedding": null, "metadata": {"file_path": "versioneer.py", "file_name": "versioneer.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1232, "end_line": 1270, "span_ids": ["render_pep440_pre", "render_pep440"], "tokens": 317}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def render_pep440(pieces):\n \"\"\"Build up version string, with post-release \"local version identifier\".\n\n Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you\n get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty\n\n Exceptions:\n 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n if pieces[\"distance\"] or pieces[\"dirty\"]:\n rendered += plus_or_dot(pieces)\n rendered += \"%d.g%s\" % (pieces[\"distance\"], pieces[\"short\"])\n if pieces[\"dirty\"]:\n rendered += \".dirty\"\n else:\n # exception #1\n rendered = \"0+untagged.%d.g%s\" % (pieces[\"distance\"],\n pieces[\"short\"])\n if pieces[\"dirty\"]:\n rendered += \".dirty\"\n return rendered\n\n\ndef render_pep440_pre(pieces):\n \"\"\"TAG[.post.devDISTANCE] -- No -dirty.\n\n Exceptions:\n 1: no tags. 0.post.devDISTANCE\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n if pieces[\"distance\"]:\n rendered += \".post.dev%d\" % pieces[\"distance\"]\n else:\n # exception #1\n rendered = \"0.post.dev%d\" % pieces[\"distance\"]\n return rendered", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_render_pep440_post_render_pep440_post.return.rendered": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_render_pep440_post_render_pep440_post.return.rendered", "embedding": null, "metadata": {"file_path": "versioneer.py", "file_name": "versioneer.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1273, "end_line": 1297, "span_ids": ["render_pep440_post"], "tokens": 217}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def render_pep440_post(pieces):\n \"\"\"TAG[.postDISTANCE[.dev0]+gHEX] .\n\n The \".dev0\" means dirty. Note that .dev0 sorts backwards\n (a dirty tree will appear \"older\" than the corresponding clean one),\n but you shouldn't be releasing software with -dirty anyways.\n\n Exceptions:\n 1: no tags. 0.postDISTANCE[.dev0]\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n if pieces[\"distance\"] or pieces[\"dirty\"]:\n rendered += \".post%d\" % pieces[\"distance\"]\n if pieces[\"dirty\"]:\n rendered += \".dev0\"\n rendered += plus_or_dot(pieces)\n rendered += \"g%s\" % pieces[\"short\"]\n else:\n # exception #1\n rendered = \"0.post%d\" % pieces[\"distance\"]\n if pieces[\"dirty\"]:\n rendered += \".dev0\"\n rendered += \"+g%s\" % pieces[\"short\"]\n return rendered", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_render_pep440_old_render_pep440_old.return.rendered": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_render_pep440_old_render_pep440_old.return.rendered", "embedding": null, "metadata": {"file_path": "versioneer.py", "file_name": "versioneer.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1300, "end_line": 1319, "span_ids": ["render_pep440_old"], "tokens": 144}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def render_pep440_old(pieces):\n \"\"\"TAG[.postDISTANCE[.dev0]] .\n\n The \".dev0\" means dirty.\n\n Eexceptions:\n 1: no tags. 0.postDISTANCE[.dev0]\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n if pieces[\"distance\"] or pieces[\"dirty\"]:\n rendered += \".post%d\" % pieces[\"distance\"]\n if pieces[\"dirty\"]:\n rendered += \".dev0\"\n else:\n # exception #1\n rendered = \"0.post%d\" % pieces[\"distance\"]\n if pieces[\"dirty\"]:\n rendered += \".dev0\"\n return rendered", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_render_git_describe_render_git_describe.return.rendered": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_render_git_describe_render_git_describe.return.rendered", "embedding": null, "metadata": {"file_path": "versioneer.py", "file_name": "versioneer.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1322, "end_line": 1339, "span_ids": ["render_git_describe"], "tokens": 129}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def render_git_describe(pieces):\n \"\"\"TAG[-DISTANCE-gHEX][-dirty].\n\n Like 'git describe --tags --dirty --always'.\n\n Exceptions:\n 1: no tags. HEX[-dirty] (note: no 'g' prefix)\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n if pieces[\"distance\"]:\n rendered += \"-%d-g%s\" % (pieces[\"distance\"], pieces[\"short\"])\n else:\n # exception #1\n rendered = pieces[\"short\"]\n if pieces[\"dirty\"]:\n rendered += \"-dirty\"\n return rendered", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_render_git_describe_long_render_git_describe_long.return.rendered": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_render_git_describe_long_render_git_describe_long.return.rendered", "embedding": null, "metadata": {"file_path": "versioneer.py", "file_name": "versioneer.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1342, "end_line": 1359, "span_ids": ["render_git_describe_long"], "tokens": 133}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def render_git_describe_long(pieces):\n \"\"\"TAG-DISTANCE-gHEX[-dirty].\n\n Like 'git describe --tags --dirty --always -long'.\n The distance/hash is unconditional.\n\n Exceptions:\n 1: no tags. HEX[-dirty] (note: no 'g' prefix)\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n rendered += \"-%d-g%s\" % (pieces[\"distance\"], pieces[\"short\"])\n else:\n # exception #1\n rendered = pieces[\"short\"]\n if pieces[\"dirty\"]:\n rendered += \"-dirty\"\n return rendered", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_render_VersioneerBadRootError._The_project_root_direc": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_render_VersioneerBadRootError._The_project_root_direc", "embedding": null, "metadata": {"file_path": "versioneer.py", "file_name": "versioneer.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1362, "end_line": 1393, "span_ids": ["VersioneerBadRootError", "render"], "tokens": 271}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def render(pieces, style):\n \"\"\"Render the given version pieces into the requested style.\"\"\"\n if pieces[\"error\"]:\n return {\"version\": \"unknown\",\n \"full-revisionid\": pieces.get(\"long\"),\n \"dirty\": None,\n \"error\": pieces[\"error\"]}\n\n if not style or style == \"default\":\n style = \"pep440\" # the default\n\n if style == \"pep440\":\n rendered = render_pep440(pieces)\n elif style == \"pep440-pre\":\n rendered = render_pep440_pre(pieces)\n elif style == \"pep440-post\":\n rendered = render_pep440_post(pieces)\n elif style == \"pep440-old\":\n rendered = render_pep440_old(pieces)\n elif style == \"git-describe\":\n rendered = render_git_describe(pieces)\n elif style == \"git-describe-long\":\n rendered = render_git_describe_long(pieces)\n else:\n raise ValueError(\"unknown style '%s'\" % style)\n\n return {\"version\": rendered, \"full-revisionid\": pieces[\"long\"],\n \"dirty\": pieces[\"dirty\"], \"error\": None}\n\n\nclass VersioneerBadRootError(Exception):\n \"\"\"The project root directory is unknown or missing key files.\"\"\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_get_versions_get_versions.return._version_0_unknown_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_get_versions_get_versions.return._version_0_unknown_", "embedding": null, "metadata": {"file_path": "versioneer.py", "file_name": "versioneer.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1396, "end_line": 1468, "span_ids": ["get_versions"], "tokens": 609}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def get_versions(verbose=False):\n \"\"\"Get the project version from whatever source is available.\n\n Returns dict with two keys: 'version' and 'full'.\n \"\"\"\n if \"versioneer\" in sys.modules:\n # see the discussion in cmdclass.py:get_cmdclass()\n del sys.modules[\"versioneer\"]\n\n root = get_root()\n cfg = get_config_from_root(root)\n\n assert cfg.VCS is not None, \"please set [versioneer]VCS= in setup.cfg\"\n handlers = HANDLERS.get(cfg.VCS)\n assert handlers, \"unrecognized VCS '%s'\" % cfg.VCS\n verbose = verbose or cfg.verbose\n assert cfg.versionfile_source is not None, \\\n \"please set versioneer.versionfile_source\"\n assert cfg.tag_prefix is not None, \"please set versioneer.tag_prefix\"\n\n versionfile_abs = os.path.join(root, cfg.versionfile_source)\n\n # extract version from first of: _version.py, VCS command (e.g. 'git\n # describe'), parentdir. This is meant to work for developers using a\n # source checkout, for users of a tarball created by 'setup.py sdist',\n # and for users of a tarball/zipball created by 'git archive' or github's\n # download-from-tag feature or the equivalent in other VCSes.\n\n get_keywords_f = handlers.get(\"get_keywords\")\n from_keywords_f = handlers.get(\"keywords\")\n if get_keywords_f and from_keywords_f:\n try:\n keywords = get_keywords_f(versionfile_abs)\n ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)\n if verbose:\n print(\"got version from expanded keyword %s\" % ver)\n return ver\n except NotThisMethod:\n pass\n\n try:\n ver = versions_from_file(versionfile_abs)\n if verbose:\n print(\"got version from file %s %s\" % (versionfile_abs, ver))\n return ver\n except NotThisMethod:\n pass\n\n from_vcs_f = handlers.get(\"pieces_from_vcs\")\n if from_vcs_f:\n try:\n pieces = from_vcs_f(cfg.tag_prefix, root, verbose)\n ver = render(pieces, cfg.style)\n if verbose:\n print(\"got version from VCS %s\" % ver)\n return ver\n except NotThisMethod:\n pass\n\n try:\n if cfg.parentdir_prefix:\n ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)\n if verbose:\n print(\"got version from parentdir %s\" % ver)\n return ver\n except NotThisMethod:\n pass\n\n if verbose:\n print(\"unable to compute version\")\n\n return {\"version\": \"0+unknown\", \"full-revisionid\": None,\n \"dirty\": None, \"error\": \"unable to compute version\"}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_get_version_get_cmdclass.from_distutils_core_impor": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_get_version_get_cmdclass.from_distutils_core_impor", "embedding": null, "metadata": {"file_path": "versioneer.py", "file_name": "versioneer.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1471, "end_line": 1496, "span_ids": ["get_cmdclass", "get_version"], "tokens": 302}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def get_version():\n \"\"\"Get the short version string for this project.\"\"\"\n return get_versions()[\"version\"]\n\n\ndef get_cmdclass():\n \"\"\"Get the custom setuptools/distutils subclasses used by Versioneer.\"\"\"\n if \"versioneer\" in sys.modules:\n del sys.modules[\"versioneer\"]\n # this fixes the \"python setup.py develop\" case (also 'install' and\n # 'easy_install .'), in which subdependencies of the main project are\n # built (using setup.py bdist_egg) in the same python process. Assume\n # a main project A and a dependency B, which use different versions\n # of Versioneer. A's setup.py imports A's Versioneer, leaving it in\n # sys.modules by the time B's setup.py is executed, causing B to run\n # with the wrong versioneer. Setuptools wraps the sub-dep builds in a\n # sandbox that restores sys.modules to it's pre-build state, so the\n # parent is protected against the child's \"import versioneer\". By\n # removing ourselves from sys.modules here, before the child build\n # happens, we protect the child from the parent's versioneer too.\n # Also see https://github.com/warner/python-versioneer/issues/52\n\n cmds = {}\n\n # we add \"version\" to both distutils and setuptools\n from distutils.core import Command\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_get_cmdclass.cmd_version_get_cmdclass.cmd_version.run.if_vers_error_.print_error_s_vers": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_get_cmdclass.cmd_version_get_cmdclass.cmd_version.run.if_vers_error_.print_error_s_vers", "embedding": null, "metadata": {"file_path": "versioneer.py", "file_name": "versioneer.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1498, "end_line": 1515, "span_ids": ["get_cmdclass"], "tokens": 138}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def get_cmdclass():\n # ... other code\n\n class cmd_version(Command):\n description = \"report generated version string\"\n user_options = []\n boolean_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n vers = get_versions(verbose=True)\n print(\"Version: %s\" % vers[\"version\"])\n print(\" full-revisionid: %s\" % vers.get(\"full-revisionid\"))\n print(\" dirty: %s\" % vers.get(\"dirty\"))\n if vers[\"error\"]:\n print(\" error: %s\" % vers[\"error\"])\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_get_cmdclass.cmds_version_cmd_ver_get_cmdclass.if_setuptools_in_sys_mo.else_.from_distutils_command_bu": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_get_cmdclass.cmds_version_cmd_ver_get_cmdclass.if_setuptools_in_sys_mo.else_.from_distutils_command_bu", "embedding": null, "metadata": {"file_path": "versioneer.py", "file_name": "versioneer.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1516, "end_line": 1532, "span_ids": ["get_cmdclass"], "tokens": 181}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def get_cmdclass():\n # ... other code\n cmds[\"version\"] = cmd_version\n\n # we override \"build_py\" in both distutils and setuptools\n #\n # most invocation pathways end up running build_py:\n # distutils/build -> build_py\n # distutils/install -> distutils/build ->..\n # setuptools/bdist_wheel -> distutils/install ->..\n # setuptools/bdist_egg -> distutils/install_lib -> build_py\n # setuptools/install -> bdist_egg ->..\n # setuptools/develop -> ?\n\n # we override different \"build_py\" commands for both environments\n if \"setuptools\" in sys.modules:\n from setuptools.command.build_py import build_py as _build_py\n else:\n from distutils.command.build_py import build_py as _build_py\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_get_cmdclass.cmd_build_py_get_cmdclass.cmd_build_py.run.if_cfg_versionfile_build_.write_to_version_file_tar": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_get_cmdclass.cmd_build_py_get_cmdclass.cmd_build_py.run.if_cfg_versionfile_build_.write_to_version_file_tar", "embedding": null, "metadata": {"file_path": "versioneer.py", "file_name": "versioneer.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1534, "end_line": 1546, "span_ids": ["get_cmdclass"], "tokens": 132}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def get_cmdclass():\n # ... other code\n\n class cmd_build_py(_build_py):\n def run(self):\n root = get_root()\n cfg = get_config_from_root(root)\n versions = get_versions()\n _build_py.run(self)\n # now locate _version.py in the new build/ directory and replace\n # it with an updated value\n if cfg.versionfile_build:\n target_versionfile = os.path.join(self.build_lib,\n cfg.versionfile_build)\n print(\"UPDATING %s\" % target_versionfile)\n write_to_version_file(target_versionfile, versions)\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_get_cmdclass.cmds_build_py_cmd_bu_get_cmdclass.None_3.else_.from_distutils_command_sd": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_get_cmdclass.cmds_build_py_cmd_bu_get_cmdclass.None_3.else_.from_distutils_command_sd", "embedding": null, "metadata": {"file_path": "versioneer.py", "file_name": "versioneer.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1547, "end_line": 1579, "span_ids": ["get_cmdclass"], "tokens": 297}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def get_cmdclass():\n # ... other code\n cmds[\"build_py\"] = cmd_build_py\n\n if \"cx_Freeze\" in sys.modules: # cx_freeze enabled?\n from cx_Freeze.dist import build_exe as _build_exe\n\n class cmd_build_exe(_build_exe):\n def run(self):\n root = get_root()\n cfg = get_config_from_root(root)\n versions = get_versions()\n target_versionfile = cfg.versionfile_source\n print(\"UPDATING %s\" % target_versionfile)\n write_to_version_file(target_versionfile, versions)\n\n _build_exe.run(self)\n os.unlink(target_versionfile)\n with open(cfg.versionfile_source, \"w\") as f:\n LONG = LONG_VERSION_PY[cfg.VCS]\n f.write(LONG %\n {\"DOLLAR\": \"$\",\n \"STYLE\": cfg.style,\n \"TAG_PREFIX\": cfg.tag_prefix,\n \"PARENTDIR_PREFIX\": cfg.parentdir_prefix,\n \"VERSIONFILE_SOURCE\": cfg.versionfile_source,\n })\n cmds[\"build_exe\"] = cmd_build_exe\n del cmds[\"build_py\"]\n\n # we override different \"sdist\" commands for both environments\n if \"setuptools\" in sys.modules:\n from setuptools.command.sdist import sdist as _sdist\n else:\n from distutils.command.sdist import sdist as _sdist\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_get_cmdclass.cmd_sdist_get_cmdclass.return.cmds": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_get_cmdclass.cmd_sdist_get_cmdclass.return.cmds", "embedding": null, "metadata": {"file_path": "versioneer.py", "file_name": "versioneer.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1581, "end_line": 1603, "span_ids": ["get_cmdclass"], "tokens": 219}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def get_cmdclass():\n # ... other code\n\n class cmd_sdist(_sdist):\n def run(self):\n versions = get_versions()\n self._versioneer_generated_versions = versions\n # unless we update this, the command will keep using the old\n # version\n self.distribution.metadata.version = versions[\"version\"]\n return _sdist.run(self)\n\n def make_release_tree(self, base_dir, files):\n root = get_root()\n cfg = get_config_from_root(root)\n _sdist.make_release_tree(self, base_dir, files)\n # now locate _version.py in the new base_dir directory\n # (remembering that it may be a hardlink) and replace it with an\n # updated value\n target_versionfile = os.path.join(base_dir, cfg.versionfile_source)\n print(\"UPDATING %s\" % target_versionfile)\n write_to_version_file(target_versionfile,\n self._versioneer_generated_versions)\n cmds[\"sdist\"] = cmd_sdist\n\n return cmds", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_CONFIG_ERROR_INIT_PY_SNIPPET._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_CONFIG_ERROR_INIT_PY_SNIPPET._", "embedding": null, "metadata": {"file_path": "versioneer.py", "file_name": "versioneer.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1606, "end_line": 1647, "span_ids": ["impl:10"], "tokens": 243}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "CONFIG_ERROR = \"\"\"\nsetup.cfg is missing the necessary Versioneer configuration. You need\na section like:\n\n [versioneer]\n VCS = git\n style = pep440\n versionfile_source = src/myproject/_version.py\n versionfile_build = myproject/_version.py\n tag_prefix =\n parentdir_prefix = myproject-\n\nYou will also need to edit your setup.py to use the results:\n\n import versioneer\n setup(version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(), ...)\n\nPlease read the docstring in ./versioneer.py for configuration instructions,\nedit setup.cfg, and re-run the installer or 'python versioneer.py setup'.\n\"\"\"\n\nSAMPLE_CONFIG = \"\"\"\n# See the docstring in versioneer.py for instructions. Note that you must\n# re-run 'versioneer.py setup' after changing this section, and commit the\n# resulting files.\n\n[versioneer]\n#VCS = git\n#style = pep440\n#versionfile_source =\n#versionfile_build =\n#tag_prefix =\n#parentdir_prefix =\n\n\"\"\"\n\nINIT_PY_SNIPPET = \"\"\"\nfrom ._version import get_versions\n__version__ = get_versions()['version']\ndel get_versions\n\"\"\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_do_setup_do_setup.return.0": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_do_setup_do_setup.return.0", "embedding": null, "metadata": {"file_path": "versioneer.py", "file_name": "versioneer.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1650, "end_line": 1729, "span_ids": ["do_setup"], "tokens": 758}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def do_setup():\n \"\"\"Main VCS-independent setup function for installing Versioneer.\"\"\"\n root = get_root()\n try:\n cfg = get_config_from_root(root)\n except (EnvironmentError, configparser.NoSectionError,\n configparser.NoOptionError) as e:\n if isinstance(e, (EnvironmentError, configparser.NoSectionError)):\n print(\"Adding sample versioneer config to setup.cfg\",\n file=sys.stderr)\n with open(os.path.join(root, \"setup.cfg\"), \"a\") as f:\n f.write(SAMPLE_CONFIG)\n print(CONFIG_ERROR, file=sys.stderr)\n return 1\n\n print(\" creating %s\" % cfg.versionfile_source)\n with open(cfg.versionfile_source, \"w\") as f:\n LONG = LONG_VERSION_PY[cfg.VCS]\n f.write(LONG % {\"DOLLAR\": \"$\",\n \"STYLE\": cfg.style,\n \"TAG_PREFIX\": cfg.tag_prefix,\n \"PARENTDIR_PREFIX\": cfg.parentdir_prefix,\n \"VERSIONFILE_SOURCE\": cfg.versionfile_source,\n })\n\n ipy = os.path.join(os.path.dirname(cfg.versionfile_source),\n \"__init__.py\")\n if os.path.exists(ipy):\n try:\n with open(ipy, \"r\") as f:\n old = f.read()\n except EnvironmentError:\n old = \"\"\n if INIT_PY_SNIPPET not in old:\n print(\" appending to %s\" % ipy)\n with open(ipy, \"a\") as f:\n f.write(INIT_PY_SNIPPET)\n else:\n print(\" %s unmodified\" % ipy)\n else:\n print(\" %s doesn't exist, ok\" % ipy)\n ipy = None\n\n # Make sure both the top-level \"versioneer.py\" and versionfile_source\n # (PKG/_version.py, used by runtime code) are in MANIFEST.in, so\n # they'll be copied into source distributions. Pip won't be able to\n # install the package without this.\n manifest_in = os.path.join(root, \"MANIFEST.in\")\n simple_includes = set()\n try:\n with open(manifest_in, \"r\") as f:\n for line in f:\n if line.startswith(\"include \"):\n for include in line.split()[1:]:\n simple_includes.add(include)\n except EnvironmentError:\n pass\n # That doesn't cover everything MANIFEST.in can do\n # (https://docs.python.org/2/distutils/sourcedist.html#commands), so\n # it might give some false negatives. Appending redundant 'include'\n # lines is safe, though.\n if \"versioneer.py\" not in simple_includes:\n print(\" appending 'versioneer.py' to MANIFEST.in\")\n with open(manifest_in, \"a\") as f:\n f.write(\"include versioneer.py\\n\")\n else:\n print(\" 'versioneer.py' already in MANIFEST.in\")\n if cfg.versionfile_source not in simple_includes:\n print(\" appending versionfile_source ('%s') to MANIFEST.in\" %\n cfg.versionfile_source)\n with open(manifest_in, \"a\") as f:\n f.write(\"include %s\\n\" % cfg.versionfile_source)\n else:\n print(\" versionfile_source already in MANIFEST.in\")\n\n # Make VCS-specific changes. For git, this means creating/changing\n # .gitattributes to mark _version.py for export-time keyword\n # substitution.\n do_vcs_install(manifest_in, cfg.versionfile_source, ipy)\n return 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_scan_setup_py_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_scan_setup_py_", "embedding": null, "metadata": {"file_path": "versioneer.py", "file_name": "versioneer.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1732, "end_line": 1775, "span_ids": ["scan_setup_py", "impl:16"], "tokens": 351}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def scan_setup_py():\n \"\"\"Validate the contents of setup.py against Versioneer's expectations.\"\"\"\n found = set()\n setters = False\n errors = 0\n with open(\"setup.py\", \"r\") as f:\n for line in f.readlines():\n if \"import versioneer\" in line:\n found.add(\"import\")\n if \"versioneer.get_cmdclass()\" in line:\n found.add(\"cmdclass\")\n if \"versioneer.get_version()\" in line:\n found.add(\"get_version\")\n if \"versioneer.VCS\" in line:\n setters = True\n if \"versioneer.versionfile_source\" in line:\n setters = True\n if len(found) != 3:\n print(\"\")\n print(\"Your setup.py appears to be missing some important items\")\n print(\"(but I might be wrong). Please make sure it has something\")\n print(\"roughly like the following:\")\n print(\"\")\n print(\" import versioneer\")\n print(\" setup( version=versioneer.get_version(),\")\n print(\" cmdclass=versioneer.get_cmdclass(), ...)\")\n print(\"\")\n errors += 1\n if setters:\n print(\"You should remove lines like 'versioneer.VCS = ' and\")\n print(\"'versioneer.versionfile_source = ' . This configuration\")\n print(\"now lives in setup.cfg, and should be removed from setup.py\")\n print(\"\")\n errors += 1\n return errors\n\nif __name__ == \"__main__\":\n cmd = sys.argv[1]\n if cmd == \"setup\":\n errors = do_setup()\n errors += scan_setup_py()\n if errors:\n sys.exit(1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk_types.py_np_register_chunk_type._HANDLED_CHUNK_TYPES_appe": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk_types.py_np_register_chunk_type._HANDLED_CHUNK_TYPES_appe", "embedding": null, "metadata": {"file_path": "dask/array/chunk_types.py", "file_name": "chunk_types.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 105, "span_ids": ["imports", "register_chunk_type"], "tokens": 1019}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import numpy as np\n\n\n# Start list of valid chunk types, to be added to with guarded imports\n_HANDLED_CHUNK_TYPES = [np.ndarray, np.ma.MaskedArray]\n\n\ndef register_chunk_type(type):\n \"\"\"Register the given type as a valid chunk and downcast array type\n\n Parameters\n ----------\n type : type\n Duck array type to be registered as a type Dask can safely wrap as a chunk and\n to which Dask does not defer in arithmetic operations and NumPy\n functions/ufuncs.\n\n Notes\n -----\n A :py:class:`dask.array.Array` can contain any sufficiently \"NumPy-like\" array in\n its chunks. These are also referred to as \"duck arrays\" since they match the most\n important parts of NumPy's array API, and so, behave the same way when relying on\n duck typing.\n\n However, for multiple duck array types to interoperate properly, they need to\n properly defer to each other in arithmetic operations and NumPy functions/ufuncs\n according to a well-defined type casting hierarchy (\n `see NEP 13`_\n ). In an effort to maintain this hierarchy, Dask defers to all other duck array\n types except those in its internal registry. By default, this registry contains\n\n * :py:class:`numpy.ndarray`\n * :py:class:`numpy.ma.MaskedArray`\n * :py:class:`cupy.ndarray`\n * :py:class:`sparse.SparseArray`\n * :py:class:`scipy.sparse.spmatrix`\n\n This function exists to append any other types to this registry. If a type is not\n in this registry, and yet is a downcast type (it comes below\n :py:class:`dask.array.Array` in the type casting hierarchy), a ``TypeError`` will\n be raised due to all operand types returning ``NotImplemented``.\n\n Examples\n --------\n Using a mock ``FlaggedArray`` class as an example chunk type unknown to Dask with\n minimal duck array API:\n\n >>> import numpy.lib.mixins\n >>> class FlaggedArray(numpy.lib.mixins.NDArrayOperatorsMixin):\n ... def __init__(self, a, flag=False):\n ... self.a = a\n ... self.flag = flag\n ... def __repr__(self):\n ... return f\"Flag: {self.flag}, Array: \" + repr(self.a)\n ... def __array__(self):\n ... return np.asarray(self.a)\n ... def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):\n ... if method == '__call__':\n ... downcast_inputs = []\n ... flag = False\n ... for input in inputs:\n ... if isinstance(input, self.__class__):\n ... flag = flag or input.flag\n ... downcast_inputs.append(input.a)\n ... elif isinstance(input, np.ndarray):\n ... downcast_inputs.append(input)\n ... else:\n ... return NotImplemented\n ... return self.__class__(ufunc(*downcast_inputs, **kwargs), flag)\n ... else:\n ... return NotImplemented\n ... @property\n ... def shape(self):\n ... return self.a.shape\n ... @property\n ... def ndim(self):\n ... return self.a.ndim\n ... @property\n ... def dtype(self):\n ... return self.a.dtype\n ... def __getitem__(self, key):\n ... return type(self)(self.a[key], self.flag)\n ... def __setitem__(self, key, value):\n ... self.a[key] = value\n\n Before registering ``FlaggedArray``, both types will attempt to defer to the\n other:\n\n >>> import dask.array as da\n >>> da.ones(5) - FlaggedArray(np.ones(5), True)\n Traceback (most recent call last):\n ...\n TypeError: operand type(s) all returned NotImplemented ...\n\n However, once registered, Dask will be able to handle operations with this new\n type:\n\n >>> da.register_chunk_type(FlaggedArray)\n >>> x = da.ones(5) - FlaggedArray(np.ones(5), True)\n >>> x # doctest: +SKIP\n dask.array\n >>> x.compute()\n Flag: True, Array: array([0., 0., 0., 0., 0.])\n \"\"\"\n _HANDLED_CHUNK_TYPES.append(type)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py__should_delegate_check_if_handled_given_other.return.wrapper": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py__should_delegate_check_if_handled_given_other.return.wrapper", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 173, "end_line": 203, "span_ids": ["check_if_handled_given_other", "_should_delegate"], "tokens": 231}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _should_delegate(other) -> bool:\n \"\"\"Check whether Dask should delegate to the other.\n This implementation follows NEP-13:\n https://numpy.org/neps/nep-0013-ufunc-overrides.html#behavior-in-combination-with-python-s-binary-operations\n \"\"\"\n if hasattr(other, \"__array_ufunc__\") and other.__array_ufunc__ is None:\n return True\n elif (\n hasattr(other, \"__array_ufunc__\")\n and not is_valid_array_chunk(other)\n and type(other).__array_ufunc__ is not Array.__array_ufunc__\n ):\n return True\n return False\n\n\ndef check_if_handled_given_other(f):\n \"\"\"Check if method is handled by Dask given type of other\n\n Ensures proper deferral to upcast types in dunder operations without\n assuming unknown types are automatically downcast types.\n \"\"\"\n\n @wraps(f)\n def wrapper(self, other):\n if _should_delegate(other):\n return NotImplemented\n else:\n return f(self, other)\n\n return wrapper", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_normalize_arg_normalize_arg.if_is_dask_collection_x_.else_.return.x": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_normalize_arg_normalize_arg.if_is_dask_collection_x_.else_.return.x", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 409, "end_line": 428, "span_ids": ["normalize_arg"], "tokens": 161}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def normalize_arg(x):\n \"\"\"Normalize user provided arguments to blockwise or map_blocks\n\n We do a few things:\n\n 1. If they are string literals that might collide with blockwise_token then we\n quote them\n 2. IF they are large (as defined by sizeof) then we put them into the\n graph on their own by using dask.delayed\n \"\"\"\n if is_dask_collection(x):\n return x\n elif isinstance(x, str) and re.match(r\"_\\d+\", x):\n return delayed(x)\n elif isinstance(x, list) and len(x) >= 10:\n return delayed(x)\n elif sizeof(x) > 1e6:\n return delayed(x)\n else:\n return x", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py__pass_extra_kwargs_map_blocks": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py__pass_extra_kwargs_map_blocks", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 431, "end_line": 834, "span_ids": ["map_blocks", "_pass_extra_kwargs"], "tokens": 146}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _pass_extra_kwargs(func, keys, *args, **kwargs):\n \"\"\"Helper for :func:`map_blocks` to pass `block_info` or `block_id`.\n\n For each element of `keys`, a corresponding element of args is changed\n to a keyword argument with that key, before all arguments re passed on\n to `func`.\n \"\"\"\n kwargs.update(zip(keys, args))\n return func(*args[len(keys) :], **kwargs)\n\n\ndef map_blocks(\n func,\n *args,\n name=None,\n token=None,\n dtype=None,\n chunks=None,\n drop_axis=[],\n new_axis=None,\n meta=None,\n **kwargs,\n):\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_map_blocks._Map_a_function_across__map_blocks._Map_a_function_across_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_map_blocks._Map_a_function_across__map_blocks._Map_a_function_across_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 454, "end_line": 631, "span_ids": ["map_blocks"], "tokens": 1961}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def map_blocks(\n func,\n *args,\n name=None,\n token=None,\n dtype=None,\n chunks=None,\n drop_axis=[],\n new_axis=None,\n meta=None,\n **kwargs,\n):\n \"\"\"Map a function across all blocks of a dask array.\n\n Note that ``map_blocks`` will attempt to automatically determine the output\n array type by calling ``func`` on 0-d versions of the inputs. Please refer to\n the ``meta`` keyword argument below if you expect that the function will not\n succeed when operating on 0-d arrays.\n\n Parameters\n ----------\n func : callable\n Function to apply to every block in the array.\n args : dask arrays or other objects\n dtype : np.dtype, optional\n The ``dtype`` of the output array. It is recommended to provide this.\n If not provided, will be inferred by applying the function to a small\n set of fake data.\n chunks : tuple, optional\n Chunk shape of resulting blocks if the function does not preserve\n shape. If not provided, the resulting array is assumed to have the same\n block structure as the first input array.\n drop_axis : number or iterable, optional\n Dimensions lost by the function.\n new_axis : number or iterable, optional\n New dimensions created by the function. Note that these are applied\n after ``drop_axis`` (if present).\n token : string, optional\n The key prefix to use for the output array. If not provided, will be\n determined from the function name.\n name : string, optional\n The key name to use for the output array. Note that this fully\n specifies the output key name, and must be unique. If not provided,\n will be determined by a hash of the arguments.\n meta : array-like, optional\n The ``meta`` of the output array, when specified is expected to be an\n array of the same type and dtype of that returned when calling ``.compute()``\n on the array returned by this function. When not provided, ``meta`` will be\n inferred by applying the function to a small set of fake data, usually a\n 0-d array. It's important to ensure that ``func`` can successfully complete\n computation without raising exceptions when 0-d is passed to it, providing\n ``meta`` will be required otherwise. If the output type is known beforehand\n (e.g., ``np.ndarray``, ``cupy.ndarray``), an empty array of such type dtype\n can be passed, for example: ``meta=np.array((), dtype=np.int32)``.\n **kwargs :\n Other keyword arguments to pass to function. Values must be constants\n (not dask.arrays)\n\n See Also\n --------\n dask.array.blockwise : Generalized operation with control over block alignment.\n\n Examples\n --------\n >>> import dask.array as da\n >>> x = da.arange(6, chunks=3)\n\n >>> x.map_blocks(lambda x: x * 2).compute()\n array([ 0, 2, 4, 6, 8, 10])\n\n The ``da.map_blocks`` function can also accept multiple arrays.\n\n >>> d = da.arange(5, chunks=2)\n >>> e = da.arange(5, chunks=2)\n\n >>> f = da.map_blocks(lambda a, b: a + b**2, d, e)\n >>> f.compute()\n array([ 0, 2, 6, 12, 20])\n\n If the function changes shape of the blocks then you must provide chunks\n explicitly.\n\n >>> y = x.map_blocks(lambda x: x[::2], chunks=((2, 2),))\n\n You have a bit of freedom in specifying chunks. If all of the output chunk\n sizes are the same, you can provide just that chunk size as a single tuple.\n\n >>> a = da.arange(18, chunks=(6,))\n >>> b = a.map_blocks(lambda x: x[:3], chunks=(3,))\n\n If the function changes the dimension of the blocks you must specify the\n created or destroyed dimensions.\n\n >>> b = a.map_blocks(lambda x: x[None, :, None], chunks=(1, 6, 1),\n ... new_axis=[0, 2])\n\n If ``chunks`` is specified but ``new_axis`` is not, then it is inferred to\n add the necessary number of axes on the left.\n\n Map_blocks aligns blocks by block positions without regard to shape. In the\n following example we have two arrays with the same number of blocks but\n with different shape and chunk sizes.\n\n >>> x = da.arange(1000, chunks=(100,))\n >>> y = da.arange(100, chunks=(10,))\n\n The relevant attribute to match is numblocks.\n\n >>> x.numblocks\n (10,)\n >>> y.numblocks\n (10,)\n\n If these match (up to broadcasting rules) then we can map arbitrary\n functions across blocks\n\n >>> def func(a, b):\n ... return np.array([a.max(), b.max()])\n\n >>> da.map_blocks(func, x, y, chunks=(2,), dtype='i8')\n dask.array\n\n >>> _.compute()\n array([ 99, 9, 199, 19, 299, 29, 399, 39, 499, 49, 599, 59, 699,\n 69, 799, 79, 899, 89, 999, 99])\n\n Your block function get information about where it is in the array by\n accepting a special ``block_info`` keyword argument.\n\n >>> def func(block, block_info=None):\n ... pass\n\n This will receive the following information:\n\n >>> block_info # doctest: +SKIP\n {0: {'shape': (1000,),\n 'num-chunks': (10,),\n 'chunk-location': (4,),\n 'array-location': [(400, 500)]},\n None: {'shape': (1000,),\n 'num-chunks': (10,),\n 'chunk-location': (4,),\n 'array-location': [(400, 500)],\n 'chunk-shape': (100,),\n 'dtype': dtype('float64')}}\n\n For each argument and keyword arguments that are dask arrays (the positions\n of which are the first index), you will receive the shape of the full\n array, the number of chunks of the full array in each dimension, the chunk\n location (for example the fourth chunk over in the first dimension), and\n the array location (for example the slice corresponding to ``40:50``). The\n same information is provided for the output, with the key ``None``, plus\n the shape and dtype that should be returned.\n\n These features can be combined to synthesize an array from scratch, for\n example:\n\n >>> def func(block_info=None):\n ... loc = block_info[None]['array-location'][0]\n ... return np.arange(loc[0], loc[1])\n\n >>> da.map_blocks(func, chunks=((4, 4),), dtype=np.float_)\n dask.array\n\n >>> _.compute()\n array([0, 1, 2, 3, 4, 5, 6, 7])\n\n You may specify the key name prefix of the resulting task in the graph with\n the optional ``token`` keyword argument.\n\n >>> x.map_blocks(lambda x: x + 1, name='increment') # doctest: +SKIP\n dask.array\n\n For functions that may not handle 0-d arrays, it's also possible to specify\n ``meta`` with an empty array matching the type of the expected result. In\n the example below, ``func`` will result in an ``IndexError`` when computing\n ``meta``:\n\n >>> da.map_blocks(lambda x: x[2], da.random.random(5), meta=np.array(()))\n dask.array\n\n Similarly, it's possible to specify a non-NumPy array to ``meta``, and provide\n a ``dtype``:\n\n >>> import cupy # doctest: +SKIP\n >>> rs = da.random.RandomState(RandomState=cupy.random.RandomState) # doctest: +SKIP\n >>> dt = np.float32\n >>> da.map_blocks(lambda x: x[2], rs.random(5, dtype=dt), meta=cupy.array((), dtype=dt)) # doctest: +SKIP\n dask.array\n \"\"\"\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_map_blocks.if_not_callable_func__map_blocks._prepare_to_inject_it_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_map_blocks.if_not_callable_func__map_blocks._prepare_to_inject_it_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 632, "end_line": 717, "span_ids": ["map_blocks"], "tokens": 713}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def map_blocks(\n func,\n *args,\n name=None,\n token=None,\n dtype=None,\n chunks=None,\n drop_axis=[],\n new_axis=None,\n meta=None,\n **kwargs,\n):\n if not callable(func):\n msg = (\n \"First argument must be callable function, not %s\\n\"\n \"Usage: da.map_blocks(function, x)\\n\"\n \" or: da.map_blocks(function, x, y, z)\"\n )\n raise TypeError(msg % type(func).__name__)\n if token:\n warnings.warn(\"The token= keyword to map_blocks has been moved to name=\")\n name = token\n\n name = \"%s-%s\" % (name or funcname(func), tokenize(func, *args, **kwargs))\n new_axes = {}\n\n if isinstance(drop_axis, Number):\n drop_axis = [drop_axis]\n if isinstance(new_axis, Number):\n new_axis = [new_axis] # TODO: handle new_axis\n\n arrs = [a for a in args if isinstance(a, Array)]\n\n argpairs = [\n (a, tuple(range(a.ndim))[::-1]) if isinstance(a, Array) else (a, None)\n for a in args\n ]\n if arrs:\n out_ind = tuple(range(max(a.ndim for a in arrs)))[::-1]\n else:\n out_ind = ()\n\n original_kwargs = kwargs\n\n if dtype is None and meta is None:\n try:\n meta = compute_meta(func, dtype, *args, **kwargs)\n except Exception:\n pass\n\n dtype = apply_infer_dtype(func, args, original_kwargs, \"map_blocks\")\n\n if drop_axis:\n out_ind = tuple(x for i, x in enumerate(out_ind) if i not in drop_axis)\n if new_axis is None and chunks is not None and len(out_ind) < len(chunks):\n new_axis = range(len(chunks) - len(out_ind))\n if new_axis:\n # new_axis = [x + len(drop_axis) for x in new_axis]\n out_ind = list(out_ind)\n for ax in sorted(new_axis):\n n = len(out_ind) + len(drop_axis)\n out_ind.insert(ax, n)\n if chunks is not None:\n new_axes[n] = chunks[ax]\n else:\n new_axes[n] = 1\n out_ind = tuple(out_ind)\n if max(new_axis) > max(out_ind):\n raise ValueError(\"New_axis values do not fill in all dimensions\")\n\n if chunks is not None:\n if len(chunks) != len(out_ind):\n raise ValueError(\n \"Provided chunks have {0} dims, expected {1} \"\n \"dims.\".format(len(chunks), len(out_ind))\n )\n adjust_chunks = dict(zip(out_ind, chunks))\n else:\n adjust_chunks = None\n\n out = blockwise(\n func,\n out_ind,\n *concat(argpairs),\n name=name,\n new_axes=new_axes,\n dtype=dtype,\n concatenate=True,\n align_arrays=False,\n adjust_chunks=adjust_chunks,\n meta=meta,\n **kwargs,\n )\n\n extra_argpairs = []\n extra_names = []\n # If func has block_id as an argument, construct an array of block IDs and\n # prepare to inject it.\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_map_blocks.if_has_keyword_func_blo_map_blocks._objects_and_prepare_to_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_map_blocks.if_has_keyword_func_blo_map_blocks._objects_and_prepare_to_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 718, "end_line": 734, "span_ids": ["map_blocks"], "tokens": 211}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def map_blocks(\n func,\n *args,\n name=None,\n token=None,\n dtype=None,\n chunks=None,\n drop_axis=[],\n new_axis=None,\n meta=None,\n **kwargs,\n):\n # ... other code\n if has_keyword(func, \"block_id\"):\n block_id_name = \"block-id-\" + out.name\n block_id_dsk = {\n (block_id_name,) + block_id: block_id\n for block_id in product(*(range(len(c)) for c in out.chunks))\n }\n block_id_array = Array(\n block_id_dsk,\n block_id_name,\n chunks=tuple((1,) * len(c) for c in out.chunks),\n dtype=np.object_,\n )\n extra_argpairs.append((block_id_array, out_ind))\n extra_names.append(\"block_id\")\n\n # If func has block_info as an argument, construct an array of block info\n # objects and prepare to inject it.\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_map_blocks.None_11_map_blocks.None_11.extra_names_append_block": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_map_blocks.None_11_map_blocks.None_11.extra_names_append_block", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 735, "end_line": 809, "span_ids": ["map_blocks"], "tokens": 696}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def map_blocks(\n func,\n *args,\n name=None,\n token=None,\n dtype=None,\n chunks=None,\n drop_axis=[],\n new_axis=None,\n meta=None,\n **kwargs,\n):\n # ... other code\n if has_keyword(func, \"block_info\"):\n starts = {}\n num_chunks = {}\n shapes = {}\n\n for i, (arg, in_ind) in enumerate(argpairs):\n if in_ind is not None:\n shapes[i] = arg.shape\n if drop_axis:\n # We concatenate along dropped axes, so we need to treat them\n # as if there is only a single chunk.\n starts[i] = [\n (\n cached_cumsum(arg.chunks[j], initial_zero=True)\n if ind in out_ind\n else [0, arg.shape[j]]\n )\n for j, ind in enumerate(in_ind)\n ]\n num_chunks[i] = tuple(len(s) - 1 for s in starts[i])\n else:\n starts[i] = [\n cached_cumsum(c, initial_zero=True) for c in arg.chunks\n ]\n num_chunks[i] = arg.numblocks\n out_starts = [cached_cumsum(c, initial_zero=True) for c in out.chunks]\n\n block_info_name = \"block-info-\" + out.name\n block_info_dsk = {}\n for block_id in product(*(range(len(c)) for c in out.chunks)):\n # Get position of chunk, indexed by axis labels\n location = {out_ind[i]: loc for i, loc in enumerate(block_id)}\n info = {}\n for i, shape in shapes.items():\n # Compute chunk key in the array, taking broadcasting into\n # account. We don't directly know which dimensions are\n # broadcast, but any dimension with only one chunk can be\n # treated as broadcast.\n arr_k = tuple(\n location.get(ind, 0) if num_chunks[i][j] > 1 else 0\n for j, ind in enumerate(argpairs[i][1])\n )\n info[i] = {\n \"shape\": shape,\n \"num-chunks\": num_chunks[i],\n \"array-location\": [\n (starts[i][ij][j], starts[i][ij][j + 1])\n for ij, j in enumerate(arr_k)\n ],\n \"chunk-location\": arr_k,\n }\n\n info[None] = {\n \"shape\": out.shape,\n \"num-chunks\": out.numblocks,\n \"array-location\": [\n (out_starts[ij][j], out_starts[ij][j + 1])\n for ij, j in enumerate(block_id)\n ],\n \"chunk-location\": block_id,\n \"chunk-shape\": tuple(\n out.chunks[ij][j] for ij, j in enumerate(block_id)\n ),\n \"dtype\": dtype,\n }\n block_info_dsk[(block_info_name,) + block_id] = info\n\n block_info = Array(\n block_info_dsk,\n block_info_name,\n chunks=tuple((1,) * len(c) for c in out.chunks),\n dtype=np.object_,\n )\n extra_argpairs.append((block_info, out_ind))\n extra_names.append(\"block_info\")\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.cumsum_Array.cumsum.return.cumsum_self_axis_dtype_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.cumsum_Array.cumsum.return.cumsum_self_axis_dtype_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2332, "end_line": 2347, "span_ids": ["Array.cumsum"], "tokens": 193}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n @derived_from(np.ndarray)\n def cumsum(self, axis, dtype=None, out=None, *, method=\"sequential\"):\n \"\"\"Dask added an additional keyword-only argument ``method``.\n\n method : {'sequential', 'blelloch'}, optional\n Choose which method to use to perform the cumsum. Default is 'sequential'.\n\n * 'sequential' performs the cumsum of each prior block before the current block.\n * 'blelloch' is a work-efficient parallel cumsum. It exposes parallelism by\n first taking the sum of each block and combines the sums via a binary tree.\n This method may be faster or more memory efficient depending on workload,\n scheduler, and hardware. More benchmarking is necessary.\n \"\"\"\n from .reductions import cumsum\n\n return cumsum(self, axis, dtype, out=out, method=method)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.cumprod_Array.cumprod.return.cumprod_self_axis_dtype": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.cumprod_Array.cumprod.return.cumprod_self_axis_dtype", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2349, "end_line": 2364, "span_ids": ["Array.cumprod"], "tokens": 193}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n @derived_from(np.ndarray)\n def cumprod(self, axis, dtype=None, out=None, *, method=\"sequential\"):\n \"\"\"Dask added an additional keyword-only argument ``method``.\n\n method : {'sequential', 'blelloch'}, optional\n Choose which method to use to perform the cumprod. Default is 'sequential'.\n\n * 'sequential' performs the cumprod of each prior block before the current block.\n * 'blelloch' is a work-efficient parallel cumprod. It exposes parallelism by first\n taking the product of each block and combines the products via a binary tree.\n This method may be faster or more memory efficient depending on workload,\n scheduler, and hardware. More benchmarking is necessary.\n \"\"\"\n from .reductions import cumprod\n\n return cumprod(self, axis, dtype, out=out, method=method)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.squeeze_Array.clip.return.clip_self_min_max_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.squeeze_Array.clip.return.clip_self_min_max_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2366, "end_line": 2401, "span_ids": ["Array.conj", "Array.squeeze", "Array.real", "Array.rechunk", "Array.clip", "Array.imag"], "tokens": 197}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n @derived_from(np.ndarray)\n def squeeze(self, axis=None):\n from .routines import squeeze\n\n return squeeze(self, axis)\n\n def rechunk(\n self, chunks=\"auto\", threshold=None, block_size_limit=None, balance=False\n ):\n \"\"\" See da.rechunk for docstring \"\"\"\n from . import rechunk # avoid circular import\n\n return rechunk(self, chunks, threshold, block_size_limit, balance)\n\n @property\n def real(self):\n from .ufunc import real\n\n return real(self)\n\n @property\n def imag(self):\n from .ufunc import imag\n\n return imag(self)\n\n def conj(self):\n from .ufunc import conj\n\n return conj(self)\n\n @derived_from(np.ndarray)\n def clip(self, min=None, max=None):\n from .ufunc import clip\n\n return clip(self, min, max)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_from_npy_stack_from_npy_stack.return.Array_dsk_name_chunks_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_from_npy_stack_from_npy_stack.return.Array_dsk_name_chunks_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4981, "end_line": 5008, "span_ids": ["from_npy_stack"], "tokens": 211}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def from_npy_stack(dirname, mmap_mode=\"r\"):\n \"\"\"Load dask array from stack of npy files\n\n See ``da.to_npy_stack`` for docstring\n\n Parameters\n ----------\n dirname: string\n Directory of .npy files\n mmap_mode: (None or 'r')\n Read data in memory map mode\n \"\"\"\n with open(os.path.join(dirname, \"info\"), \"rb\") as f:\n info = pickle.load(f)\n\n dtype = info[\"dtype\"]\n chunks = info[\"chunks\"]\n axis = info[\"axis\"]\n\n name = \"from-npy-stack-%s\" % dirname\n keys = list(product([name], *[range(len(c)) for c in chunks]))\n values = [\n (np.load, os.path.join(dirname, \"%d.npy\" % i), mmap_mode)\n for i in range(len(chunks[axis]))\n ]\n dsk = dict(zip(keys, values))\n\n return Array(dsk, name, chunks, dtype)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_new_da_object_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_new_da_object_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 5011, "end_line": 5027, "span_ids": ["new_da_object", "impl:19"], "tokens": 163}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def new_da_object(dsk, name, chunks, meta=None, dtype=None):\n \"\"\"Generic constructor for dask.array or dask.dataframe objects.\n\n Decides the appropriate output class based on the type of `meta` provided.\n \"\"\"\n if is_dataframe_like(meta) or is_series_like(meta) or is_index_like(meta):\n from ..dataframe.core import new_dd_object\n\n assert all(len(c) == 1 for c in chunks[1:])\n divisions = [None] * (len(chunks[0]) + 1)\n return new_dd_object(dsk, name, meta, divisions)\n else:\n return Array(dsk, name=name, chunks=chunks, meta=meta, dtype=dtype)\n\n\nfrom .utils import meta_from_array, compute_meta", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_empty_like_empty_like.return.empty_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_empty_like_empty_like.return.empty_", "embedding": null, "metadata": {"file_path": "dask/array/creation.py", "file_name": "creation.py", "file_type": "text/x-python", "category": "implementation", "start_line": 30, "end_line": 83, "span_ids": ["empty_like"], "tokens": 432}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def empty_like(a, dtype=None, order=\"C\", chunks=None, name=None, shape=None):\n \"\"\"\n Return a new array with the same shape and type as a given array.\n\n Parameters\n ----------\n a : array_like\n The shape and data-type of `a` define these same attributes of the\n returned array.\n dtype : data-type, optional\n Overrides the data type of the result.\n order : {'C', 'F'}, optional\n Whether to store multidimensional data in C- or Fortran-contiguous\n (row- or column-wise) order in memory.\n chunks : sequence of ints\n The number of samples on each block. Note that the last block will have\n fewer samples if ``len(array) % chunks != 0``.\n name : str, optional\n An optional keyname for the array. Defaults to hashing the input\n keyword arguments.\n shape : int or sequence of ints, optional.\n Overrides the shape of the result.\n\n Returns\n -------\n out : ndarray\n Array of uninitialized (arbitrary) data with the same\n shape and type as `a`.\n\n See Also\n --------\n ones_like : Return an array of ones with shape and type of input.\n zeros_like : Return an array of zeros with shape and type of input.\n empty : Return a new uninitialized array.\n ones : Return a new array setting values to one.\n zeros : Return a new array setting values to zero.\n\n Notes\n -----\n This function does *not* initialize the returned array; to do that use\n `zeros_like` or `ones_like` instead. It may be marginally faster than\n the functions that do set the array values.\n \"\"\"\n\n a = asarray(a, name=False)\n shape, chunks = _get_like_function_shapes_chunks(a, chunks, shape)\n return empty(\n shape,\n dtype=(dtype or a.dtype),\n order=order,\n chunks=chunks,\n name=name,\n meta=a._meta,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_ones_like_ones_like.return.ones_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_ones_like_ones_like.return.ones_", "embedding": null, "metadata": {"file_path": "dask/array/creation.py", "file_name": "creation.py", "file_type": "text/x-python", "category": "implementation", "start_line": 86, "end_line": 132, "span_ids": ["ones_like"], "tokens": 372}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def ones_like(a, dtype=None, order=\"C\", chunks=None, name=None, shape=None):\n \"\"\"\n Return an array of ones with the same shape and type as a given array.\n\n Parameters\n ----------\n a : array_like\n The shape and data-type of `a` define these same attributes of\n the returned array.\n dtype : data-type, optional\n Overrides the data type of the result.\n order : {'C', 'F'}, optional\n Whether to store multidimensional data in C- or Fortran-contiguous\n (row- or column-wise) order in memory.\n chunks : sequence of ints\n The number of samples on each block. Note that the last block will have\n fewer samples if ``len(array) % chunks != 0``.\n name : str, optional\n An optional keyname for the array. Defaults to hashing the input\n keyword arguments.\n shape : int or sequence of ints, optional.\n Overrides the shape of the result.\n\n Returns\n -------\n out : ndarray\n Array of ones with the same shape and type as `a`.\n\n See Also\n --------\n zeros_like : Return an array of zeros with shape and type of input.\n empty_like : Return an empty array with shape and type of input.\n zeros : Return a new array setting values to zero.\n ones : Return a new array setting values to one.\n empty : Return a new uninitialized array.\n \"\"\"\n\n a = asarray(a, name=False)\n shape, chunks = _get_like_function_shapes_chunks(a, chunks, shape)\n return ones(\n shape,\n dtype=(dtype or a.dtype),\n order=order,\n chunks=chunks,\n name=name,\n meta=a._meta,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_zeros_like_zeros_like.return.zeros_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_zeros_like_zeros_like.return.zeros_", "embedding": null, "metadata": {"file_path": "dask/array/creation.py", "file_name": "creation.py", "file_type": "text/x-python", "category": "implementation", "start_line": 135, "end_line": 181, "span_ids": ["zeros_like"], "tokens": 372}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def zeros_like(a, dtype=None, order=\"C\", chunks=None, name=None, shape=None):\n \"\"\"\n Return an array of zeros with the same shape and type as a given array.\n\n Parameters\n ----------\n a : array_like\n The shape and data-type of `a` define these same attributes of\n the returned array.\n dtype : data-type, optional\n Overrides the data type of the result.\n order : {'C', 'F'}, optional\n Whether to store multidimensional data in C- or Fortran-contiguous\n (row- or column-wise) order in memory.\n chunks : sequence of ints\n The number of samples on each block. Note that the last block will have\n fewer samples if ``len(array) % chunks != 0``.\n name : str, optional\n An optional keyname for the array. Defaults to hashing the input\n keyword arguments.\n shape : int or sequence of ints, optional.\n Overrides the shape of the result.\n\n Returns\n -------\n out : ndarray\n Array of zeros with the same shape and type as `a`.\n\n See Also\n --------\n ones_like : Return an array of ones with shape and type of input.\n empty_like : Return an empty array with shape and type of input.\n zeros : Return a new array setting values to zero.\n ones : Return a new array setting values to one.\n empty : Return a new uninitialized array.\n \"\"\"\n\n a = asarray(a, name=False)\n shape, chunks = _get_like_function_shapes_chunks(a, chunks, shape)\n return zeros(\n shape,\n dtype=(dtype or a.dtype),\n order=order,\n chunks=chunks,\n name=name,\n meta=a._meta,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/optimization.py_optimize_optimize.return.optimize_slices_dsk_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/optimization.py_optimize_optimize.return.optimize_slices_dsk_", "embedding": null, "metadata": {"file_path": "dask/array/optimization.py", "file_name": "optimization.py", "file_type": "text/x-python", "category": "implementation", "start_line": 24, "end_line": 76, "span_ids": ["optimize"], "tokens": 339}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def optimize(\n dsk,\n keys,\n fuse_keys=None,\n fast_functions=None,\n inline_functions_fast_functions=(getter_inline,),\n rename_fused_keys=True,\n **kwargs\n):\n \"\"\"Optimize dask for array computation\n\n 1. Cull tasks not necessary to evaluate keys\n 2. Remove full slicing, e.g. x[:]\n 3. Inline fast functions like getitem and np.transpose\n \"\"\"\n if not isinstance(keys, (list, set)):\n keys = [keys]\n keys = list(flatten(keys))\n\n if not isinstance(dsk, HighLevelGraph):\n dsk = HighLevelGraph.from_collections(id(dsk), dsk, dependencies=())\n\n dsk = optimize_blockwise(dsk, keys=keys)\n dsk = fuse_roots(dsk, keys=keys)\n dsk = dsk.cull(set(keys))\n dependencies = dsk.get_all_dependencies()\n\n if not config.get(\"optimization.fuse.active\"):\n return dsk\n\n dsk = ensure_dict(dsk)\n\n # Low level task optimizations\n if fast_functions is not None:\n inline_functions_fast_functions = fast_functions\n\n hold = hold_keys(dsk, dependencies)\n\n dsk, dependencies = fuse(\n dsk,\n hold + keys + (fuse_keys or []),\n dependencies,\n rename_keys=rename_fused_keys,\n )\n if inline_functions_fast_functions:\n dsk = inline_functions(\n dsk,\n keys,\n dependencies=dependencies,\n fast_functions=inline_functions_fast_functions,\n )\n\n return optimize_slices(dsk)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_warnings_concrete": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_warnings_concrete", "embedding": null, "metadata": {"file_path": "dask/array/overlap.py", "file_name": "overlap.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 21, "span_ids": ["imports"], "tokens": 112}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import warnings\nfrom operator import getitem\nfrom itertools import product\nfrom numbers import Integral\nfrom tlz import merge, pipe, concat, partial, get\nfrom tlz.curried import map\n\nfrom . import chunk\nfrom .core import (\n Array,\n map_blocks,\n concatenate,\n concatenate3,\n reshapelist,\n unify_chunks,\n)\nfrom .creation import empty_like, full_like\nfrom ..highlevelgraph import HighLevelGraph\nfrom ..base import tokenize\nfrom ..core import flatten\nfrom ..utils import concrete", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_fractional_slice_fractional_slice.if_all_ind_slice_None_.else_.return._getitem_rounded_index_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_fractional_slice_fractional_slice.if_all_ind_slice_None_.else_.return._getitem_rounded_index_", "embedding": null, "metadata": {"file_path": "dask/array/overlap.py", "file_name": "overlap.py", "file_type": "text/x-python", "category": "implementation", "start_line": 24, "end_line": 61, "span_ids": ["fractional_slice"], "tokens": 384}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def fractional_slice(task, axes):\n \"\"\"\n\n >>> fractional_slice(('x', 5.1), {0: 2}) # doctest: +SKIP\n (getitem, ('x', 6), (slice(0, 2),))\n\n >>> fractional_slice(('x', 3, 5.1), {0: 2, 1: 3}) # doctest: +SKIP\n (getitem, ('x', 3, 5), (slice(None, None, None), slice(-3, None)))\n\n >>> fractional_slice(('x', 2.9, 5.1), {0: 2, 1: 3}) # doctest: +SKIP\n (getitem, ('x', 3, 5), (slice(0, 2), slice(-3, None)))\n \"\"\"\n rounded = (task[0],) + tuple(int(round(i)) for i in task[1:])\n\n index = []\n for i, (t, r) in enumerate(zip(task[1:], rounded[1:])):\n depth = axes.get(i, 0)\n if isinstance(depth, tuple):\n left_depth = depth[0]\n right_depth = depth[1]\n else:\n left_depth = depth\n right_depth = depth\n\n if t == r:\n index.append(slice(None, None, None))\n elif t < r and right_depth:\n index.append(slice(0, right_depth))\n elif t > r and left_depth:\n index.append(slice(-left_depth, None))\n else:\n index.append(slice(0, 0))\n index = tuple(index)\n\n if all(ind == slice(None, None, None) for ind in index):\n return task\n else:\n return (getitem, rounded, index)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_add_dummy_padding_map_overlap": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_add_dummy_padding_map_overlap", "embedding": null, "metadata": {"file_path": "dask/array/overlap.py", "file_name": "overlap.py", "file_type": "text/x-python", "category": "implementation", "start_line": 497, "end_line": 756, "span_ids": ["map_overlap", "add_dummy_padding"], "tokens": 325}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def add_dummy_padding(x, depth, boundary):\n \"\"\"\n Pads an array which has 'none' as the boundary type.\n Used to simplify trimming arrays which use 'none'.\n\n >>> import dask.array as da\n >>> x = da.arange(6, chunks=3)\n >>> add_dummy_padding(x, {0: 1}, {0: 'none'}).compute() # doctest: +NORMALIZE_WHITESPACE\n array([..., 0, 1, 2, 3, 4, 5, ...])\n \"\"\"\n for k, v in boundary.items():\n d = depth.get(k, 0)\n if v == \"none\" and d > 0:\n empty_shape = list(x.shape)\n empty_shape[k] = d\n\n empty_chunks = list(x.chunks)\n empty_chunks[k] = (d,)\n\n empty = empty_like(\n getattr(x, \"_meta\", x),\n shape=empty_shape,\n chunks=empty_chunks,\n dtype=x.dtype,\n )\n\n out_chunks = list(x.chunks)\n ax_chunks = list(out_chunks[k])\n ax_chunks[0] += d\n ax_chunks[-1] += d\n out_chunks[k] = tuple(ax_chunks)\n\n x = concatenate([empty, x, empty], axis=k)\n x = x.rechunk(out_chunks)\n return x\n\n\ndef map_overlap(\n func, *args, depth=None, boundary=None, trim=True, align_arrays=True, **kwargs\n):\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_map_overlap._Map_a_function_over_bl_map_overlap._Map_a_function_over_bl": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_map_overlap._Map_a_function_over_bl_map_overlap._Map_a_function_over_bl", "embedding": null, "metadata": {"file_path": "dask/array/overlap.py", "file_name": "overlap.py", "file_type": "text/x-python", "category": "implementation", "start_line": 537, "end_line": 684, "span_ids": ["map_overlap"], "tokens": 1801}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def map_overlap(\n func, *args, depth=None, boundary=None, trim=True, align_arrays=True, **kwargs\n):\n \"\"\"Map a function over blocks of arrays with some overlap\n\n We share neighboring zones between blocks of the array, map a\n function, and then trim away the neighboring strips.\n\n Note that this function will attempt to automatically determine the output\n array type before computing it, please refer to the ``meta`` keyword argument\n in ``map_blocks`` if you expect that the function will not succeed when\n operating on 0-d arrays.\n\n Parameters\n ----------\n func: function\n The function to apply to each extended block.\n If multiple arrays are provided, then the function should expect to\n receive chunks of each array in the same order.\n args : dask arrays\n depth: int, tuple, dict or list\n The number of elements that each block should share with its neighbors\n If a tuple or dict then this can be different per axis.\n If a list then each element of that list must be an int, tuple or dict\n defining depth for the corresponding array in `args`.\n Asymmetric depths may be specified using a dict value of (-/+) tuples.\n Note that asymmetric depths are currently only supported when\n ``boundary`` is 'none'.\n The default value is 0.\n boundary: str, tuple, dict or list\n How to handle the boundaries.\n Values include 'reflect', 'periodic', 'nearest', 'none',\n or any constant value like 0 or np.nan.\n If a list then each element must be a str, tuple or dict defining the\n boundary for the corresponding array in `args`.\n The default value is 'reflect'.\n trim: bool\n Whether or not to trim ``depth`` elements from each block after\n calling the map function.\n Set this to False if your mapping function already does this for you\n align_arrays: bool\n Whether or not to align chunks along equally sized dimensions when\n multiple arrays are provided. This allows for larger chunks in some\n arrays to be broken into smaller ones that match chunk sizes in other\n arrays such that they are compatible for block function mapping. If\n this is false, then an error will be thrown if arrays do not already\n have the same number of blocks in each dimension.\n **kwargs:\n Other keyword arguments valid in ``map_blocks``\n\n Examples\n --------\n >>> import numpy as np\n >>> import dask.array as da\n\n >>> x = np.array([1, 1, 2, 3, 3, 3, 2, 1, 1])\n >>> x = da.from_array(x, chunks=5)\n >>> def derivative(x):\n ... return x - np.roll(x, 1)\n\n >>> y = x.map_overlap(derivative, depth=1, boundary=0)\n >>> y.compute()\n array([ 1, 0, 1, 1, 0, 0, -1, -1, 0])\n\n >>> x = np.arange(16).reshape((4, 4))\n >>> d = da.from_array(x, chunks=(2, 2))\n >>> d.map_overlap(lambda x: x + x.size, depth=1).compute()\n array([[16, 17, 18, 19],\n [20, 21, 22, 23],\n [24, 25, 26, 27],\n [28, 29, 30, 31]])\n\n >>> func = lambda x: x + x.size\n >>> depth = {0: 1, 1: 1}\n >>> boundary = {0: 'reflect', 1: 'none'}\n >>> d.map_overlap(func, depth, boundary).compute() # doctest: +NORMALIZE_WHITESPACE\n array([[12, 13, 14, 15],\n [16, 17, 18, 19],\n [20, 21, 22, 23],\n [24, 25, 26, 27]])\n\n The ``da.map_overlap`` function can also accept multiple arrays.\n\n >>> func = lambda x, y: x + y\n >>> x = da.arange(8).reshape(2, 4).rechunk((1, 2))\n >>> y = da.arange(4).rechunk(2)\n >>> da.map_overlap(func, x, y, depth=1).compute() # doctest: +NORMALIZE_WHITESPACE\n array([[ 0, 2, 4, 6],\n [ 4, 6, 8, 10]])\n\n When multiple arrays are given, they do not need to have the\n same number of dimensions but they must broadcast together.\n Arrays are aligned block by block (just as in ``da.map_blocks``)\n so the blocks must have a common chunk size. This common chunking\n is determined automatically as long as ``align_arrays`` is True.\n\n >>> x = da.arange(8, chunks=4)\n >>> y = da.arange(8, chunks=2)\n >>> r = da.map_overlap(func, x, y, depth=1, align_arrays=True)\n >>> len(r.to_delayed())\n 4\n\n >>> da.map_overlap(func, x, y, depth=1, align_arrays=False).compute()\n Traceback (most recent call last):\n ...\n ValueError: Shapes do not align {'.0': {2, 4}}\n\n Note also that this function is equivalent to ``map_blocks``\n by default. A non-zero ``depth`` must be defined for any\n overlap to appear in the arrays provided to ``func``.\n\n >>> func = lambda x: x.sum()\n >>> x = da.ones(10, dtype='int')\n >>> block_args = dict(chunks=(), drop_axis=0)\n >>> da.map_blocks(func, x, **block_args).compute()\n 10\n >>> da.map_overlap(func, x, **block_args).compute()\n 10\n >>> da.map_overlap(func, x, **block_args, depth=1).compute()\n 12\n\n For functions that may not handle 0-d arrays, it's also possible to specify\n ``meta`` with an empty array matching the type of the expected result. In\n the example below, ``func`` will result in an ``IndexError`` when computing\n ``meta``:\n\n >>> x = np.arange(16).reshape((4, 4))\n >>> d = da.from_array(x, chunks=(2, 2))\n >>> y = d.map_overlap(lambda x: x + x[2], depth=1, meta=np.array(()))\n >>> y\n dask.array<_trim, shape=(4, 4), dtype=float64, chunksize=(2, 2), chunktype=numpy.ndarray>\n >>> y.compute()\n array([[ 4, 6, 8, 10],\n [ 8, 10, 12, 14],\n [20, 22, 24, 26],\n [24, 26, 28, 30]])\n\n Similarly, it's possible to specify a non-NumPy array to ``meta``:\n\n >>> import cupy # doctest: +SKIP\n >>> x = cupy.arange(16).reshape((4, 4)) # doctest: +SKIP\n >>> d = da.from_array(x, chunks=(2, 2)) # doctest: +SKIP\n >>> y = d.map_overlap(lambda x: x + x[2], depth=1, meta=cupy.array(())) # doctest: +SKIP\n >>> y # doctest: +SKIP\n dask.array<_trim, shape=(4, 4), dtype=float64, chunksize=(2, 2), chunktype=cupy.ndarray>\n >>> y.compute() # doctest: +SKIP\n array([[ 4, 6, 8, 10],\n [ 8, 10, 12, 14],\n [20, 22, 24, 26],\n [24, 26, 28, 30]])\n \"\"\"\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py_format_chunks__get_chunks.return.tuple_chunks_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py_format_chunks__get_chunks.return.tuple_chunks_", "embedding": null, "metadata": {"file_path": "dask/array/rechunk.py", "file_name": "rechunk.py", "file_type": "text/x-python", "category": "implementation", "start_line": 674, "end_line": 698, "span_ids": ["format_plan", "format_chunks", "_get_chunks"], "tokens": 184}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def format_chunks(chunks):\n \"\"\"\n >>> format_chunks((10 * (3,), 3 * (10,)))\n (10*[3], 3*[10])\n \"\"\"\n assert isinstance(chunks, tuple)\n return tuple(format_blocks(c) for c in chunks)\n\n\ndef format_plan(plan):\n \"\"\"\n >>> format_plan([((10, 10, 10), (15, 15)), ((30,), (10, 10, 10))])\n [(3*[10], 2*[15]), ([30], 3*[10])]\n \"\"\"\n return [format_chunks(c) for c in plan]\n\n\ndef _get_chunks(n, chunksize):\n leftover = n % chunksize\n n_chunks = n // chunksize\n\n chunks = [chunksize] * n_chunks\n if leftover:\n chunks.append(leftover)\n return tuple(chunks)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py__balance_chunksizes_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py__balance_chunksizes_", "embedding": null, "metadata": {"file_path": "dask/array/rechunk.py", "file_name": "rechunk.py", "file_type": "text/x-python", "category": "implementation", "start_line": 701, "end_line": 736, "span_ids": ["_balance_chunksizes"], "tokens": 258}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _balance_chunksizes(chunks: Tuple[int, ...]) -> Tuple[int, ...]:\n \"\"\"\n Balance the chunk sizes\n\n Parameters\n ----------\n chunks : Tuple[int, ...]\n Chunk sizes for Dask array.\n\n Returns\n -------\n new_chunks : Tuple[int, ...]\n New chunks for Dask array with balanced sizes.\n \"\"\"\n median_len = np.median(chunks).astype(int)\n n_chunks = len(chunks)\n eps = median_len // 2\n if min(chunks) <= 0.5 * max(chunks):\n n_chunks -= 1\n\n new_chunks = [\n _get_chunks(sum(chunks), chunk_len)\n for chunk_len in range(median_len - eps, median_len + eps + 1)\n ]\n possible_chunks = [c for c in new_chunks if len(c) == n_chunks]\n if not len(possible_chunks):\n warn(\n \"chunk size balancing not possible with given chunks. \"\n \"Try increasing the chunk size.\"\n )\n return chunks\n\n diffs = [max(c) - min(c) for c in possible_chunks]\n best_chunk_size = np.argmin(diffs)\n return possible_chunks[best_chunk_size]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py__prefixscan_combine__prefixscan_first.return.func_x_axis_axis_dtype_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py__prefixscan_combine__prefixscan_first.return.func_x_axis_axis_dtype_", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1121, "end_line": 1164, "span_ids": ["_prefixscan_combine", "_prefixscan_first"], "tokens": 300}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _prefixscan_combine(func, binop, pre, x, axis, dtype):\n \"\"\"Combine results of a parallel prefix scan such as cumsum\n\n Parameters\n ----------\n func : callable\n Cumulative function (e.g. ``np.cumsum``)\n binop : callable\n Associative function (e.g. ``add``)\n pre : np.array\n The value calculated in parallel from ``preop``.\n For example, the sum of all the previous blocks.\n x : np.array\n Current block\n axis : int\n dtype : dtype\n\n Returns\n -------\n np.array\n \"\"\"\n # We could compute this in two tasks.\n # This would allow us to do useful work (i.e., func), while waiting on `pre`.\n # Using one task may guide the scheduler to do better and reduce scheduling overhead.\n return binop(pre, func(x, axis=axis, dtype=dtype))\n\n\ndef _prefixscan_first(func, x, axis, dtype):\n \"\"\"Compute the prefix scan (e.g., cumsum) on the first block\n\n Parameters\n ----------\n func : callable\n Cumulative function (e.g. ``np.cumsum``)\n x : np.array\n Current block\n axis : int\n dtype : dtype\n\n Returns\n -------\n np.array\n \"\"\"\n return func(x, axis=axis, dtype=dtype)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_prefixscan_blelloch_prefixscan_blelloch.level.0": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_prefixscan_blelloch_prefixscan_blelloch.level.0", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1167, "end_line": 1220, "span_ids": ["prefixscan_blelloch"], "tokens": 517}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def prefixscan_blelloch(func, preop, binop, x, axis=None, dtype=None, out=None):\n \"\"\"Generic function to perform parallel cumulative scan (a.k.a prefix scan)\n\n The Blelloch prefix scan is work-efficient and exposes parallelism.\n A parallel cumsum works by first taking the sum of each block, then do a binary tree\n merge followed by a fan-out (i.e., the Brent-Kung pattern). We then take the cumsum\n of each block and add the sum of the previous blocks.\n\n When performing a cumsum across N chunks, this method has 2 * lg(N) levels of dependencies.\n In contrast, the sequential method has N levels of dependencies.\n\n Floating point operations should be more accurate with this method compared to sequential.\n\n Parameters\n ----------\n func : callable\n Cumulative function (e.g. ``np.cumsum``)\n preop : callable\n Function to get the final value of a cumulative function (e.g., ``np.sum``)\n binop : callable\n Associative function (e.g. ``add``)\n x : dask array\n axis : int\n dtype : dtype\n\n Returns\n -------\n dask array\n \"\"\"\n if axis is None:\n x = x.flatten()\n axis = 0\n if dtype is None:\n dtype = getattr(func(np.empty((0,), dtype=x.dtype)), \"dtype\", object)\n assert isinstance(axis, Integral)\n axis = validate_axis(axis, x.ndim)\n name = \"{0}-{1}\".format(func.__name__, tokenize(func, axis, preop, binop, x, dtype))\n base_key = (name,)\n\n # Right now, the metadata for batches is incorrect, but this should be okay\n batches = x.map_blocks(preop, axis=axis, keepdims=True, dtype=dtype)\n # We don't need the last index until the end\n *indices, last_index = full_indices = [\n list(\n product(\n *[range(nb) if j != axis else [i] for j, nb in enumerate(x.numblocks)]\n )\n )\n for i in range(x.numblocks[axis])\n ]\n prefix_vals = [[(batches.name,) + index for index in vals] for vals in indices]\n dsk = {}\n n_vals = len(prefix_vals)\n level = 0\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_prefixscan_blelloch.if_n_vals_2__prefixscan_blelloch.return.handle_out_out_result_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_prefixscan_blelloch.if_n_vals_2__prefixscan_blelloch.return.handle_out_out_result_", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1221, "end_line": 1284, "span_ids": ["prefixscan_blelloch"], "tokens": 577}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def prefixscan_blelloch(func, preop, binop, x, axis=None, dtype=None, out=None):\n # ... other code\n if n_vals >= 2:\n # Upsweep\n stride = 1\n stride2 = 2\n while stride2 <= n_vals:\n for i in range(stride2 - 1, n_vals, stride2):\n new_vals = []\n for index, left_val, right_val in zip(\n indices[i], prefix_vals[i - stride], prefix_vals[i]\n ):\n key = base_key + index + (level, i)\n dsk[key] = (binop, left_val, right_val)\n new_vals.append(key)\n prefix_vals[i] = new_vals\n stride = stride2\n stride2 *= 2\n level += 1\n\n # Downsweep\n # With `n_vals == 3`, we would have `stride = 1` and `stride = 0`, but we need\n # to do a downsweep iteration, so make sure stride2 is at least 2.\n stride2 = builtins.max(2, 2 ** ceil(log2(n_vals // 2)))\n stride = stride2 // 2\n while stride > 0:\n for i in range(stride2 + stride - 1, n_vals, stride2):\n new_vals = []\n for index, left_val, right_val in zip(\n indices[i], prefix_vals[i - stride], prefix_vals[i]\n ):\n key = base_key + index + (level, i)\n dsk[key] = (binop, left_val, right_val)\n new_vals.append(key)\n prefix_vals[i] = new_vals\n stride2 = stride\n stride //= 2\n level += 1\n\n if full_indices:\n for index in full_indices[0]:\n dsk[base_key + index] = (\n _prefixscan_first,\n func,\n (x.name,) + index,\n axis,\n dtype,\n )\n for indexes, vals in zip(drop(1, full_indices), prefix_vals):\n for index, val in zip(indexes, vals):\n dsk[base_key + index] = (\n _prefixscan_combine,\n func,\n binop,\n val,\n (x.name,) + index,\n axis,\n dtype,\n )\n if len(full_indices) < 2:\n deps = [x]\n else:\n deps = [x, batches]\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=deps)\n result = Array(graph, name, x.chunks, batches.dtype)\n return handle_out(out, result)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_cumreduction_cumreduction.for_ind_in_indices_.dsk_name_ind_m_n": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_cumreduction_cumreduction.for_ind_in_indices_.dsk_name_ind_m_n", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1287, "end_line": 1364, "span_ids": ["cumreduction"], "tokens": 702}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def cumreduction(\n func,\n binop,\n ident,\n x,\n axis=None,\n dtype=None,\n out=None,\n method=\"sequential\",\n preop=None,\n):\n \"\"\"Generic function for cumulative reduction\n\n Parameters\n ----------\n func: callable\n Cumulative function like np.cumsum or np.cumprod\n binop: callable\n Associated binary operator like ``np.cumsum->add`` or ``np.cumprod->mul``\n ident: Number\n Associated identity like ``np.cumsum->0`` or ``np.cumprod->1``\n x: dask Array\n axis: int\n dtype: dtype\n method : {'sequential', 'blelloch'}, optional\n Choose which method to use to perform the cumsum. Default is 'sequential'.\n\n * 'sequential' performs the scan of each prior block before the current block.\n * 'blelloch' is a work-efficient parallel scan. It exposes parallelism by first\n calling ``preop`` on each block and combines the values via a binary tree.\n This method may be faster or more memory efficient depending on workload,\n scheduler, and hardware. More benchmarking is necessary.\n preop: callable, optional\n Function used by 'blelloch' method like `np.cumsum->np.sum`` or ``np.cumprod->np.prod``\n\n Returns\n -------\n dask array\n\n See also\n --------\n cumsum\n cumprod\n \"\"\"\n if method == \"blelloch\":\n if preop is None:\n raise TypeError(\n 'cumreduction with \"blelloch\" method required `preop=` argument'\n )\n return prefixscan_blelloch(func, preop, binop, x, axis, dtype, out=out)\n elif method != \"sequential\":\n raise ValueError(\n f'Invalid method for cumreduction. Expected \"sequential\" or \"blelloch\". Got: {method!r}'\n )\n\n if axis is None:\n x = x.flatten()\n axis = 0\n if dtype is None:\n dtype = getattr(func(np.empty((0,), dtype=x.dtype)), \"dtype\", object)\n assert isinstance(axis, Integral)\n axis = validate_axis(axis, x.ndim)\n\n m = x.map_blocks(func, axis=axis, dtype=dtype)\n\n name = \"{0}-{1}\".format(func.__name__, tokenize(func, axis, binop, ident, x, dtype))\n n = x.numblocks[axis]\n full = slice(None, None, None)\n slc = (full,) * axis + (slice(-1, None),) + (full,) * (x.ndim - axis - 1)\n\n indices = list(\n product(*[range(nb) if i != axis else [0] for i, nb in enumerate(x.numblocks)])\n )\n dsk = dict()\n for ind in indices:\n shape = tuple(x.chunks[i][ii] if i != axis else 1 for i, ii in enumerate(ind))\n dsk[(name, \"extra\") + ind] = (np.full, shape, ident, m.dtype)\n dsk[(name,) + ind] = (m.name,) + ind\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_cumreduction.for_i_in_range_1_n__cumreduction.return.handle_out_out_result_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_cumreduction.for_i_in_range_1_n__cumreduction.return.handle_out_out_result_", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1366, "end_line": 1384, "span_ids": ["cumreduction"], "tokens": 228}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def cumreduction(\n func,\n binop,\n ident,\n x,\n axis=None,\n dtype=None,\n out=None,\n method=\"sequential\",\n preop=None,\n):\n # ... other code\n\n for i in range(1, n):\n last_indices = indices\n indices = list(\n product(\n *[range(nb) if ii != axis else [i] for ii, nb in enumerate(x.numblocks)]\n )\n )\n for old, ind in zip(last_indices, indices):\n this_slice = (name, \"extra\") + ind\n dsk[this_slice] = (\n binop,\n (name, \"extra\") + old,\n (operator.getitem, (m.name,) + old, slc),\n )\n dsk[(name,) + ind] = (binop, this_slice, (m.name,) + ind)\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[m])\n result = Array(graph, name, x.chunks, m.dtype)\n return handle_out(out, result)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py__cumsum_merge__cumprod_merge.return.a_b": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py__cumsum_merge__cumprod_merge.return.a_b", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1387, "end_line": 1398, "span_ids": ["_cumsum_merge", "_cumprod_merge"], "tokens": 137}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _cumsum_merge(a, b):\n if isinstance(a, np.ma.masked_array) or isinstance(b, np.ma.masked_array):\n values = np.ma.getdata(a) + np.ma.getdata(b)\n return np.ma.masked_array(values, mask=np.ma.getmaskarray(b))\n return a + b\n\n\ndef _cumprod_merge(a, b):\n if isinstance(a, np.ma.masked_array) or isinstance(b, np.ma.masked_array):\n values = np.ma.getdata(a) * np.ma.getdata(b)\n return np.ma.masked_array(values, mask=np.ma.getmaskarray(b))\n return a * b", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_cumsum_cumsum.return.cumreduction_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_cumsum_cumsum.return.cumreduction_", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1401, "end_line": 1424, "span_ids": ["cumsum"], "tokens": 203}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef cumsum(x, axis=None, dtype=None, out=None, method=\"sequential\"):\n \"\"\"Dask added an additional keyword-only argument ``method``.\n\n method : {'sequential', 'blelloch'}, optional\n Choose which method to use to perform the cumsum. Default is 'sequential'.\n\n * 'sequential' performs the cumsum of each prior block before the current block.\n * 'blelloch' is a work-efficient parallel cumsum. It exposes parallelism by\n first taking the sum of each block and combines the sums via a binary tree.\n This method may be faster or more memory efficient depending on workload,\n scheduler, and hardware. More benchmarking is necessary.\n \"\"\"\n return cumreduction(\n np.cumsum,\n _cumsum_merge,\n 0,\n x,\n axis,\n dtype,\n out=out,\n method=method,\n preop=np.sum,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_cumprod_cumprod.return.cumreduction_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_cumprod_cumprod.return.cumreduction_", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1427, "end_line": 1450, "span_ids": ["cumprod"], "tokens": 203}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef cumprod(x, axis=None, dtype=None, out=None, method=\"sequential\"):\n \"\"\"Dask added an additional keyword-only argument ``method``.\n\n method : {'sequential', 'blelloch'}, optional\n Choose which method to use to perform the cumprod. Default is 'sequential'.\n\n * 'sequential' performs the cumprod of each prior block before the current block.\n * 'blelloch' is a work-efficient parallel cumprod. It exposes parallelism by first\n taking the product of each block and combines the products via a binary tree.\n This method may be faster or more memory efficient depending on workload,\n scheduler, and hardware. More benchmarking is necessary.\n \"\"\"\n return cumreduction(\n np.cumprod,\n _cumprod_merge,\n 1,\n x,\n axis,\n dtype,\n out=out,\n method=method,\n preop=np.prod,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_take_take.warned.split_is_not_None": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_take_take.warned.split_is_not_None", "embedding": null, "metadata": {"file_path": "dask/array/slicing.py", "file_name": "slicing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 561, "end_line": 626, "span_ids": ["take"], "tokens": 737}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def take(outname, inname, chunks, index, itemsize, axis=0):\n \"\"\"Index array with an iterable of index\n\n Handles a single index by a single list\n\n Mimics ``np.take``\n\n >>> chunks, dsk = take('y', 'x', [(20, 20, 20, 20)], [5, 1, 47, 3], 8, axis=0)\n >>> chunks\n ((2, 1, 1),)\n >>> dsk # doctest: +SKIP\n {('y', 0): (getitem, (np.concatenate, [(getitem, ('x', 0), ([1, 3, 5],)),\n (getitem, ('x', 2), ([7],))],\n 0),\n (2, 0, 4, 1))}\n\n When list is sorted we retain original block structure\n\n >>> chunks, dsk = take('y', 'x', [(20, 20, 20, 20)], [1, 3, 5, 47], 8, axis=0)\n >>> chunks\n ((3, 1),)\n >>> dsk # doctest: +SKIP\n {('y', 0): (getitem, ('x', 0), ([1, 3, 5],)),\n ('y', 2): (getitem, ('x', 2), ([7],))}\n\n When any indexed blocks would otherwise grow larger than\n dask.config.array.chunk-size, we might split them,\n depending on the value of ``dask.config.slicing.split-large-chunks``.\n\n >>> import dask\n >>> with dask.config.set({\"array.slicing.split-large-chunks\": True}):\n ... chunks, dsk = take('y', 'x', [(1, 1, 1), (1000, 1000), (1000, 1000)],\n ... [0] + [1] * 6 + [2], axis=0, itemsize=8)\n >>> chunks\n ((1, 3, 3, 1), (1000, 1000), (1000, 1000))\n \"\"\"\n from .core import PerformanceWarning\n\n plan = slicing_plan(chunks[axis], index)\n if len(plan) >= len(chunks[axis]) * 10:\n factor = math.ceil(len(plan) / len(chunks[axis]))\n\n warnings.warn(\n \"Slicing with an out-of-order index is generating %d \"\n \"times more chunks\" % factor,\n PerformanceWarning,\n stacklevel=6,\n )\n index = np.asarray(index)\n\n # Check for chunks from the plan that would violate the user's\n # configured chunk size.\n nbytes = utils.parse_bytes(config.get(\"array.chunk-size\"))\n other_chunks = [chunks[i] for i in range(len(chunks)) if i != axis]\n other_numel = np.prod([sum(x) for x in other_chunks])\n\n if math.isnan(other_numel):\n warnsize = maxsize = math.inf\n else:\n maxsize = math.ceil(nbytes / (other_numel * itemsize))\n warnsize = maxsize * 5\n\n split = config.get(\"array.slicing.split-large-chunks\", None)\n\n # Warn only when the default is not specified.\n warned = split is not None\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_take.for___index_list_in_plan_take.return.tuple_chunks2_dsk": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_take.for___index_list_in_plan_take.return.tuple_chunks2_dsk", "embedding": null, "metadata": {"file_path": "dask/array/slicing.py", "file_name": "slicing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 628, "end_line": 673, "span_ids": ["take"], "tokens": 458}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def take(outname, inname, chunks, index, itemsize, axis=0):\n # ... other code\n\n for _, index_list in plan:\n if not warned and len(index_list) > warnsize:\n msg = (\n \"Slicing is producing a large chunk. To accept the large\\n\"\n \"chunk and silence this warning, set the option\\n\"\n \" >>> with dask.config.set(**{'array.slicing.split_large_chunks': False}):\\n\"\n \" ... array[indexer]\\n\\n\"\n \"To avoid creating the large chunks, set the option\\n\"\n \" >>> with dask.config.set(**{'array.slicing.split_large_chunks': True}):\\n\"\n \" ... array[indexer]\"\n )\n warnings.warn(msg, PerformanceWarning, stacklevel=6)\n warned = True\n\n where_index = []\n index_lists = []\n for where_idx, index_list in plan:\n index_length = len(index_list)\n if split and index_length > maxsize:\n index_sublist = np.array_split(\n index_list, math.ceil(index_length / maxsize)\n )\n index_lists.extend(index_sublist)\n where_index.extend([where_idx] * len(index_sublist))\n else:\n index_lists.append(np.array(index_list))\n where_index.append(where_idx)\n\n dims = [range(len(bd)) for bd in chunks]\n\n indims = list(dims)\n indims[axis] = list(range(len(where_index)))\n keys = list(product([outname], *indims))\n\n outdims = list(dims)\n outdims[axis] = where_index\n slices = [[colon] * len(bd) for bd in chunks]\n slices[axis] = index_lists\n slices = list(product(*slices))\n inkeys = list(product([inname], *outdims))\n values = [(getitem, inkey, slc) for inkey, slc in zip(inkeys, slices)]\n\n chunks2 = list(chunks)\n chunks2[axis] = tuple(map(len, index_lists))\n dsk = dict(zip(keys, values))\n return tuple(chunks2), dsk", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_map_blocks_dtype_inference_test_map_blocks_dtype_inference.assert_dtype_in_msg": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_map_blocks_dtype_inference_test_map_blocks_dtype_inference.assert_dtype_in_msg", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 1488, "end_line": 1511, "span_ids": ["test_map_blocks_dtype_inference"], "tokens": 229}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_map_blocks_dtype_inference():\n x = np.arange(50).reshape((5, 10))\n y = np.arange(10)\n dx = da.from_array(x, chunks=5)\n dy = da.from_array(y, chunks=5)\n\n def foo(x, *args, **kwargs):\n cast = kwargs.pop(\"cast\", \"i8\")\n return (x + sum(args)).astype(cast)\n\n assert_eq(dx.map_blocks(foo, dy, 1), foo(dx, dy, 1))\n assert_eq(dx.map_blocks(foo, dy, 1, cast=\"f8\"), foo(dx, dy, 1, cast=\"f8\"))\n assert_eq(\n dx.map_blocks(foo, dy, 1, cast=\"f8\", dtype=\"f8\"),\n foo(dx, dy, 1, cast=\"f8\", dtype=\"f8\"),\n )\n\n def foo(x):\n raise RuntimeError(\"Woops\")\n\n with pytest.raises(ValueError) as e:\n dx.map_blocks(foo)\n msg = str(e.value)\n assert \"dtype\" in msg", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_utils.py_test_meta_from_array_literal_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_utils.py_test_meta_from_array_literal_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_utils.py", "file_name": "test_array_utils.py", "file_type": "text/x-python", "category": "test", "start_line": 52, "end_line": 80, "span_ids": ["test_meta_from_array_literal", "test_meta_from_array_type_inputs"], "tokens": 246}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"meta\", [\"\", \"str\", u\"\", u\"str\", b\"\", b\"str\"])\n@pytest.mark.parametrize(\"dtype\", [None, \"bool\", \"int\", \"float\"])\ndef test_meta_from_array_literal(meta, dtype):\n if dtype is None:\n assert meta_from_array(meta, dtype=dtype).dtype.kind in \"SU\"\n else:\n assert (\n meta_from_array(meta, dtype=dtype).dtype == np.array([], dtype=dtype).dtype\n )\n\n\ndef test_meta_from_array_type_inputs():\n x = meta_from_array(np.ndarray, ndim=2, dtype=np.float32)\n assert isinstance(x, np.ndarray)\n assert x.ndim == 2\n assert x.dtype == np.float32\n\n x = da.Array(\n {(\"x\", 0, 0): (np.ones, (5, 5))},\n name=\"x\",\n chunks=(5, 5),\n shape=(5, 5),\n meta=np.ndarray,\n dtype=float,\n )\n assert_eq(x, x)\n\n assert da.from_array(np.ones(5).astype(np.int32), meta=np.ndarray).dtype == np.int32", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_np_functions": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy.py_np_functions", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy.py", "file_name": "test_cupy.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 194, "span_ids": ["imports"], "tokens": 85}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import numpy as np\nimport pytest\n\nimport dask.array as da\nfrom dask.array.utils import assert_eq, same_keys, AxisError, IS_NEP18_ACTIVE\nfrom dask.array.gufunc import apply_gufunc\nfrom dask.sizeof import sizeof\n\ncupy = pytest.importorskip(\"cupy\")\ncupyx = pytest.importorskip(\"cupyx\")\n\n\nfunctions =\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_test_is_valid_chunk_type_test_direct_deferral_wrapping_override.assert_eq_res_2_np_ara": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_test_is_valid_chunk_type_test_direct_deferral_wrapping_override.assert_eq_res_2_np_ara", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_dispatch.py", "file_name": "test_dispatch.py", "file_type": "text/x-python", "category": "test", "start_line": 208, "end_line": 234, "span_ids": ["test_is_valid_chunk_type", "test_direct_deferral_wrapping_override"], "tokens": 228}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"arr_type, result\",\n [\n (WrappedArray, False),\n (da.Array, False),\n (EncapsulateNDArray, True),\n (np.ma.MaskedArray, True),\n (np.ndarray, True),\n (float, False),\n (int, False),\n ],\n)\ndef test_is_valid_chunk_type(arr_type, result):\n \"\"\" Test is_valid_chunk_type for correctness\"\"\"\n assert is_valid_chunk_type(arr_type) is result\n\n\ndef test_direct_deferral_wrapping_override():\n \"\"\" Directly test Dask defering to an upcast type and the ability to still wrap it.\"\"\"\n a = da.from_array(np.arange(4))\n b = WrappedArray(np.arange(4))\n assert a.__add__(b) is NotImplemented\n # Note: remove dask_graph to be able to wrap b in a dask array\n setattr(b, \"__dask_graph__\", None)\n res = a + da.from_array(b)\n assert isinstance(res, da.Array)\n assert_eq(res, 2 * np.arange(4))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_UnknownScalarThatUnderstandsArrayOps_UnknownScalarThatUnderstandsArrayOps.__array_ufunc__.return.UnknownScalarThatUndersta": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_UnknownScalarThatUnderstandsArrayOps_UnknownScalarThatUnderstandsArrayOps.__array_ufunc__.return.UnknownScalarThatUndersta", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_dispatch.py", "file_name": "test_dispatch.py", "file_type": "text/x-python", "category": "test", "start_line": 237, "end_line": 246, "span_ids": ["UnknownScalarThatUnderstandsArrayOps", "UnknownScalarThatUnderstandsArrayOps.__array_ufunc__"], "tokens": 122}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class UnknownScalarThatUnderstandsArrayOps(np.lib.mixins.NDArrayOperatorsMixin):\n def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):\n outputs = kwargs.get(\"out\", ())\n for item in inputs + outputs:\n if hasattr(item, \"__array_ufunc__\") and not isinstance(\n item, (np.ndarray, Array, UnknownScalarThatUnderstandsArrayOps)\n ):\n return NotImplemented\n # This is a dummy scalar that just returns a new object for every op\n return UnknownScalarThatUnderstandsArrayOps()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_test_delegation_unknown_scalar_that_understands_arr_ops_test_delegation_unknown_scalar_that_understands_arr_ops.assert_type_np_multiply_a": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_test_delegation_unknown_scalar_that_understands_arr_ops_test_delegation_unknown_scalar_that_understands_arr_ops.assert_type_np_multiply_a", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_dispatch.py", "file_name": "test_dispatch.py", "file_type": "text/x-python", "category": "test", "start_line": 249, "end_line": 256, "span_ids": ["test_delegation_unknown_scalar_that_understands_arr_ops"], "tokens": 129}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"arr\", [da.from_array([1, 2]), np.asarray([1, 2])])\ndef test_delegation_unknown_scalar_that_understands_arr_ops(arr):\n s = UnknownScalarThatUnderstandsArrayOps()\n assert type(arr * s) == UnknownScalarThatUnderstandsArrayOps\n assert type(s * arr) == UnknownScalarThatUnderstandsArrayOps\n # Explicit tests of numpy NEP-13 dispatching\n assert type(np.multiply(s, arr)) == UnknownScalarThatUnderstandsArrayOps\n assert type(np.multiply(arr, s)) == UnknownScalarThatUnderstandsArrayOps", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_UnknownScalar_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_UnknownScalar_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_dispatch.py", "file_name": "test_dispatch.py", "file_type": "text/x-python", "category": "test", "start_line": 259, "end_line": 286, "span_ids": ["UnknownScalar", "test_delegation_unknown_scalar", "test_delegation_specific_cases", "UnknownScalar:4", "UnknownScalar.__mul__"], "tokens": 200}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class UnknownScalar:\n __array_ufunc__ = None\n\n def __mul__(self, other):\n return 42\n\n __rmul__ = __mul__\n\n\n@pytest.mark.parametrize(\"arr\", [da.from_array([1, 2]), np.asarray([1, 2])])\ndef test_delegation_unknown_scalar(arr):\n s = UnknownScalar()\n assert arr * s == 42\n assert s * arr == 42\n with pytest.raises(\n TypeError, match=\"operand 'UnknownScalar' does not support ufuncs\"\n ):\n np.multiply(s, arr)\n\n\ndef test_delegation_specific_cases():\n a = da.from_array([\"a\", \"b\", \".\", \"d\"])\n # Fixes GH6631\n assert_eq(a == \".\", [False, False, True, False])\n assert_eq(\".\" == a, [False, False, True, False])\n # Fixes GH6611\n assert \"b\" in a", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_svd_dtype_preservation_test_svd_compressed_deterministic.assert_all_da_compute_u_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_svd_dtype_preservation_test_svd_compressed_deterministic.assert_all_da_compute_u_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 483, "end_line": 497, "span_ids": ["test_svd_compressed_deterministic", "test_svd_dtype_preservation"], "tokens": 215}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"chunks\", [(10, 50), (50, 10), (-1, -1)])\n@pytest.mark.parametrize(\"dtype\", [np.float32, np.float64])\ndef test_svd_dtype_preservation(chunks, dtype):\n x = da.random.random((50, 50), chunks=chunks).astype(dtype)\n u, s, v = svd(x)\n assert u.dtype == s.dtype == v.dtype == dtype\n\n\ndef test_svd_compressed_deterministic():\n m, n = 30, 25\n x = da.random.RandomState(1234).random_sample(size=(m, n), chunks=(5, 5))\n u, s, vt = svd_compressed(x, 3, seed=1234)\n u2, s2, vt2 = svd_compressed(x, 3, seed=1234)\n\n assert all(da.compute((u == u2).all(), (s == s2).all(), (vt == vt2).all()))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_lstsq_test_lstsq.None_6": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_lstsq_test_lstsq.None_6", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 793, "end_line": 832, "span_ids": ["test_lstsq"], "tokens": 463}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize((\"nrow\", \"ncol\", \"chunk\"), [(20, 10, 5), (100, 10, 10)])\ndef test_lstsq(nrow, ncol, chunk):\n np.random.seed(1)\n A = np.random.randint(1, 20, (nrow, ncol))\n b = np.random.randint(1, 20, nrow)\n\n dA = da.from_array(A, (chunk, ncol))\n db = da.from_array(b, chunk)\n\n x, r, rank, s = np.linalg.lstsq(A, b, rcond=-1)\n dx, dr, drank, ds = da.linalg.lstsq(dA, db)\n\n assert_eq(dx, x)\n assert_eq(dr, r)\n assert drank.compute() == rank\n assert_eq(ds, s)\n\n # reduce rank causes multicollinearity, only compare rank\n A[:, 1] = A[:, 2]\n dA = da.from_array(A, (chunk, ncol))\n db = da.from_array(b, chunk)\n x, r, rank, s = np.linalg.lstsq(\n A, b, rcond=np.finfo(np.double).eps * max(nrow, ncol)\n )\n assert rank == ncol - 1\n dx, dr, drank, ds = da.linalg.lstsq(dA, db)\n assert drank.compute() == rank\n\n # 2D case\n A = np.random.randint(1, 20, (nrow, ncol))\n b2D = np.random.randint(1, 20, (nrow, ncol // 2))\n dA = da.from_array(A, (chunk, ncol))\n db2D = da.from_array(b2D, (chunk, ncol // 2))\n x, r, rank, s = np.linalg.lstsq(A, b2D, rcond=-1)\n dx, dr, drank, ds = da.linalg.lstsq(dA, db2D)\n\n assert_eq(dx, x)\n assert_eq(dr, r)\n assert drank.compute() == rank\n assert_eq(ds, s)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_svd_flip_correction_test_svd_flip_correction.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_svd_flip_correction_test_svd_flip_correction.None_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 856, "end_line": 878, "span_ids": ["test_svd_flip_correction"], "tokens": 279}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"shape\", [(10, 20), (10, 10), (20, 10)])\n@pytest.mark.parametrize(\"chunks\", [(-1, -1), (10, -1), (-1, 10)])\n@pytest.mark.parametrize(\"dtype\", [\"f4\", \"f8\"])\ndef test_svd_flip_correction(shape, chunks, dtype):\n # Verify that sign-corrected SVD results can still\n # be used to reconstruct inputs\n x = da.random.random(size=shape, chunks=chunks).astype(dtype)\n u, s, v = da.linalg.svd(x)\n\n # Choose precision in evaluation based on float precision\n decimal = 9 if np.dtype(dtype).itemsize > 4 else 6\n\n # Validate w/ dask inputs\n uf, vf = svd_flip(u, v)\n assert uf.dtype == u.dtype\n assert vf.dtype == v.dtype\n np.testing.assert_almost_equal(np.asarray(np.dot(uf * s, vf)), x, decimal=decimal)\n\n # Validate w/ numpy inputs\n uc, vc = svd_flip(*da.compute(u, v))\n assert uc.dtype == u.dtype\n assert vc.dtype == v.dtype\n np.testing.assert_almost_equal(np.asarray(np.dot(uc * s, vc)), x, decimal=decimal)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_svd_flip_sign_test_svd_flip_sign.assert_eq_v_y_T_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_svd_flip_sign_test_svd_flip_sign.assert_eq_v_y_T_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 881, "end_line": 899, "span_ids": ["test_svd_flip_sign"], "tokens": 233}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"dtype\", [\"f2\", \"f4\", \"f8\", \"f16\", \"c8\", \"c16\", \"c32\"])\n@pytest.mark.parametrize(\"u_based\", [True, False])\ndef test_svd_flip_sign(dtype, u_based):\n try:\n x = np.array(\n [[1, -1, 1, -1], [1, -1, 1, -1], [-1, 1, 1, -1], [-1, 1, 1, -1]],\n dtype=dtype,\n )\n except TypeError:\n pytest.skip(\"128-bit floats not supported by NumPy\")\n u, v = svd_flip(x, x.T, u_based_decision=u_based)\n assert u.dtype == x.dtype\n assert v.dtype == x.dtype\n # Verify that all singular vectors have same\n # sign except for the last one (i.e. last column)\n y = x.copy()\n y[:, -1] *= y.dtype.type(-1)\n assert_eq(u, y)\n assert_eq(v, y.T)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_disable_lowlevel_fusion_test_disable_lowlevel_fusion.with_dask_config_set_op.assert_eq_y_1_3_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_disable_lowlevel_fusion_test_disable_lowlevel_fusion.with_dask_config_set_op.assert_eq_y_1_3_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 375, "end_line": 388, "span_ids": ["test_disable_lowlevel_fusion"], "tokens": 157}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_disable_lowlevel_fusion():\n \"\"\"Check that by disabling fusion, the HLG survives through optimizations\"\"\"\n\n with dask.config.set({\"optimization.fuse.active\": False}):\n y = da.ones(3, chunks=(3,), dtype=\"int\")\n optimize = y.__dask_optimize__\n dsk1 = y.__dask_graph__()\n dsk2 = optimize(dsk1, y.__dask_keys__())\n assert isinstance(dsk1, HighLevelGraph)\n assert isinstance(dsk2, HighLevelGraph)\n assert dsk1 == dsk2\n y = y.persist()\n assert isinstance(y.__dask_graph__(), HighLevelGraph)\n assert_eq(y, [1] * 3)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_map_overlap_assumes_shape_matches_first_array_if_trim_is_false_test_map_overlap_assumes_shape_matches_first_array_if_trim_is_false.assert_z2_shape_10_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_map_overlap_assumes_shape_matches_first_array_if_trim_is_false_test_map_overlap_assumes_shape_matches_first_array_if_trim_is_false.assert_z2_shape_10_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 444, "end_line": 456, "span_ids": ["test_map_overlap_assumes_shape_matches_first_array_if_trim_is_false"], "tokens": 132}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_map_overlap_assumes_shape_matches_first_array_if_trim_is_false():\n # https://github.com/dask/dask/issues/6681\n x1 = da.ones((10,), chunks=(5, 5))\n x2 = x1.rechunk(10)\n\n def oversum(x):\n return x[2:-2]\n\n z1 = da.map_overlap(oversum, x1, depth=2, trim=False)\n assert z1.shape == (10,)\n\n z2 = da.map_overlap(oversum, x2, depth=2, trim=False)\n assert z2.shape == (10,)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_map_overlap_deprecated_signature_test_map_overlap_deprecated_signature.None_2.assert_y_shape_3_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_map_overlap_deprecated_signature_test_map_overlap_deprecated_signature.None_2.assert_y_shape_3_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 459, "end_line": 479, "span_ids": ["test_map_overlap_deprecated_signature"], "tokens": 176}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_map_overlap_deprecated_signature():\n def func(x):\n return np.array(x.sum())\n\n x = da.ones(3)\n\n # Old positional signature: func, depth, boundary, trim\n with pytest.warns(FutureWarning):\n y = da.map_overlap(x, func, 0, \"reflect\", True)\n assert y.compute() == 3\n assert y.shape == (3,)\n\n with pytest.warns(FutureWarning):\n y = da.map_overlap(x, func, 1, \"reflect\", True)\n assert y.compute() == 5\n assert y.shape == (3,)\n\n with pytest.warns(FutureWarning):\n y = da.map_overlap(x, func, 1, \"reflect\", False)\n assert y.compute() == 5\n assert y.shape == (3,)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_plan_rechunk_heterogeneous_test_plan_rechunk_heterogeneous.None_4": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_plan_rechunk_heterogeneous_test_plan_rechunk_heterogeneous.None_4", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 403, "end_line": 430, "span_ids": ["test_plan_rechunk_heterogeneous"], "tokens": 333}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_plan_rechunk_heterogeneous():\n c = (10,) * 1 # coarse\n f = (1,) * 10 # fine\n cf = c + f\n cc = c + c\n ff = f + f\n fc = f + c\n\n # No intermediate required\n steps = _plan((cc, cf), (ff, ff))\n _assert_steps(steps, [(ff, ff)])\n steps = _plan((cf, fc), (ff, cf))\n _assert_steps(steps, [(ff, cf)])\n\n # An intermediate is used to reduce graph size\n steps = _plan((cc, cf), (ff, cc))\n _assert_steps(steps, [(cc, cc), (ff, cc)])\n\n steps = _plan((cc, cf, cc), (ff, cc, cf))\n _assert_steps(steps, [(cc, cc, cc), (ff, cc, cf)])\n\n # Imposing a memory limit => the first intermediate is constrained:\n # * cc -> ff would increase the graph size: no\n # * ff -> cf would increase the block size too much: no\n # * cf -> cc fits the bill (graph size /= 10, block size neutral)\n # * cf -> fc also fits the bill (graph size and block size neutral)\n steps = _plan((cc, ff, cf), (ff, cf, cc), block_size_limit=100)\n _assert_steps(steps, [(cc, ff, cc), (ff, cf, cc)])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_zero_test_rechunk_bad_keys.assert_100_in_str_info": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_zero_test_rechunk_bad_keys.assert_100_in_str_info", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 754, "end_line": 779, "span_ids": ["test_rechunk_bad_keys", "test_rechunk_zero"], "tokens": 241}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_rechunk_zero():\n with dask.config.set({\"array.chunk-size\": \"1B\"}):\n x = da.ones(10, chunks=(5,))\n y = x.rechunk(\"auto\")\n assert y.chunks == ((1,) * 10,)\n\n\ndef test_rechunk_bad_keys():\n x = da.zeros((2, 3, 4), chunks=1)\n assert x.rechunk({-1: 4}).chunks == ((1, 1), (1, 1, 1), (4,))\n assert x.rechunk({-x.ndim: 2}).chunks == ((2,), (1, 1, 1), (1, 1, 1, 1))\n\n with pytest.raises(TypeError) as info:\n x.rechunk({\"blah\": 4})\n\n assert \"blah\" in str(info.value)\n\n with pytest.raises(ValueError) as info:\n x.rechunk({100: 4})\n\n assert \"100\" in str(info.value)\n\n with pytest.raises(ValueError) as info:\n x.rechunk({-100: 4})\n\n assert \"-100\" in str(info.value)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_balance_basics_test_balance_chunks_unchanged.assert_balanced_chunks_0_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_balance_basics_test_balance_chunks_unchanged.assert_balanced_chunks_0_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 782, "end_line": 799, "span_ids": ["test_balance_chunks_unchanged", "test_balance_basics"], "tokens": 180}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_balance_basics():\n arr_len = 220\n\n x = da.from_array(np.arange(arr_len), chunks=100)\n balanced = x.rechunk(chunks=100, balance=True)\n unbalanced = x.rechunk(chunks=100, balance=False)\n assert unbalanced.chunks[0] == (100, 100, 20)\n assert balanced.chunks[0] == (110, 110)\n\n\ndef test_balance_chunks_unchanged():\n arr_len = 220\n\n x = da.from_array(np.arange(arr_len))\n balanced = x.rechunk(chunks=100, balance=True)\n unbalanced = x.rechunk(chunks=100, balance=False)\n assert unbalanced.chunks[0] == (100, 100, 20)\n assert balanced.chunks[0] == (110, 110)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_balance_small_test_balance_small.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_balance_small_test_balance_small.None_3", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 802, "end_line": 817, "span_ids": ["test_balance_small"], "tokens": 173}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_balance_small():\n arr_len = 13\n\n x = da.from_array(np.arange(arr_len))\n balanced = x.rechunk(chunks=4, balance=True)\n unbalanced = x.rechunk(chunks=4, balance=False)\n assert balanced.chunks[0] == (5, 5, 3)\n assert unbalanced.chunks[0] == (4, 4, 4, 1)\n\n arr_len = 7\n\n x = da.from_array(np.arange(arr_len))\n balanced = x.rechunk(chunks=3, balance=True)\n unbalanced = x.rechunk(chunks=3, balance=False)\n assert balanced.chunks[0] == (4, 3)\n assert unbalanced.chunks[0] == (3, 3, 1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_balance_n_chunks_size_test_balance_raises.x_rechunk_chunks_arr_len_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_balance_n_chunks_size_test_balance_raises.x_rechunk_chunks_arr_len_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 820, "end_line": 842, "span_ids": ["test_balance_n_chunks_size", "test_balance_raises"], "tokens": 226}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_balance_n_chunks_size():\n arr_len = 100\n n_chunks = 8\n\n x = da.from_array(np.arange(arr_len))\n balanced = x.rechunk(chunks=arr_len // n_chunks, balance=True)\n unbalanced = x.rechunk(chunks=arr_len // n_chunks, balance=False)\n assert balanced.chunks[0] == (13,) * 7 + (9,)\n assert unbalanced.chunks[0] == (12,) * 8 + (4,)\n\n\ndef test_balance_raises():\n arr_len = 100\n n_chunks = 11\n\n x = da.from_array(np.arange(arr_len))\n with pytest.warns(UserWarning, match=\"Try increasing the chunk size\"):\n balanced = x.rechunk(chunks=arr_len // n_chunks, balance=True)\n unbalanced = x.rechunk(chunks=arr_len // n_chunks, balance=False)\n assert balanced.chunks == unbalanced.chunks\n\n n_chunks = 10\n x.rechunk(chunks=arr_len // n_chunks, balance=True)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_balance_basics_2d_test_balance_different_inputs.assert_balanced_chunks_1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_balance_basics_2d_test_balance_different_inputs.assert_balanced_chunks_1_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 845, "end_line": 872, "span_ids": ["test_balance_different_inputs", "test_balance_2d_negative_dimension", "test_balance_basics_2d"], "tokens": 295}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_balance_basics_2d():\n N = 210\n\n x = da.from_array(np.random.uniform(size=(N, N)))\n balanced = x.rechunk(chunks=(100, 100), balance=True)\n unbalanced = x.rechunk(chunks=(100, 100), balance=False)\n assert unbalanced.chunks == ((100, 100, 10), (100, 100, 10))\n assert balanced.chunks == ((105, 105), (105, 105))\n\n\ndef test_balance_2d_negative_dimension():\n N = 210\n\n x = da.from_array(np.random.uniform(size=(N, N)))\n balanced = x.rechunk(chunks=(100, -1), balance=True)\n unbalanced = x.rechunk(chunks=(100, -1), balance=False)\n assert unbalanced.chunks == ((100, 100, 10), (N,))\n assert balanced.chunks == ((105, 105), (N,))\n\n\ndef test_balance_different_inputs():\n N = 210\n\n x = da.from_array(np.random.uniform(size=(N, N)))\n balanced = x.rechunk(chunks=(\"10MB\", -1), balance=True)\n unbalanced = x.rechunk(chunks=(\"10MB\", -1), balance=False)\n assert balanced.chunks == unbalanced.chunks\n assert balanced.chunks[1] == (N,)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_balance_split_into_n_chunks_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_balance_split_into_n_chunks_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 875, "end_line": 899, "span_ids": ["test_balance_split_into_n_chunks"], "tokens": 154}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_balance_split_into_n_chunks():\n # Some prime numbers around 1000\n array_lens = [\n 991,\n 997,\n 1009,\n 1013,\n 1019,\n 1021,\n 1031,\n 1033,\n 1039,\n 1049,\n 1051,\n 1061,\n 1063,\n 1069,\n ]\n\n for N in array_lens:\n for nchunks in range(1, 20):\n x = da.from_array(np.random.uniform(size=N))\n y = x.rechunk(chunks=len(x) // nchunks, balance=True)\n assert len(y.chunks[0]) == nchunks", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_median_test_median.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_median_test_median.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reductions.py", "file_name": "test_reductions.py", "file_type": "text/x-python", "category": "test", "start_line": 688, "end_line": 697, "span_ids": ["test_median"], "tokens": 128}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"func\", [\"median\", \"nanmedian\"])\n@pytest.mark.parametrize(\"axis\", [0, [0, 1], 1, -1])\n@pytest.mark.parametrize(\"keepdims\", [True, False])\ndef test_median(axis, keepdims, func):\n x = np.arange(100).reshape((2, 5, 10))\n d = da.from_array(x, chunks=2)\n assert_eq(\n getattr(da, func)(d, axis=axis, keepdims=keepdims),\n getattr(np, func)(x, axis=axis, keepdims=keepdims),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_object_reduction_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_object_reduction_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reductions.py", "file_name": "test_reductions.py", "file_type": "text/x-python", "category": "test", "start_line": 700, "end_line": 717, "span_ids": ["test_nan_func_does_not_warn", "test_object_reduction"], "tokens": 156}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"method\", [\"sum\", \"mean\", \"prod\"])\ndef test_object_reduction(method):\n arr = da.ones(1).astype(object)\n result = getattr(arr, method)().compute()\n assert result == 1\n\n\n@pytest.mark.parametrize(\"func\", [\"nanvar\", \"nanstd\"])\ndef test_nan_func_does_not_warn(func):\n # non-regression test for #6105\n x = np.ones((10,)) * np.nan\n x[0] = 1\n x[1] = 2\n d = da.from_array(x, chunks=2)\n with pytest.warns(None) as rec:\n getattr(da, func)(d).compute()\n assert not rec # did not warn", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reshape.py_test_reshape_unknown_sizes_test_reshape_unknown_sizes.None_1.A_reshape_60_1_1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reshape.py_test_reshape_unknown_sizes_test_reshape_unknown_sizes.None_1.A_reshape_60_1_1_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reshape.py", "file_name": "test_reshape.py", "file_type": "text/x-python", "category": "test", "start_line": 68, "end_line": 81, "span_ids": ["test_reshape_unknown_sizes"], "tokens": 121}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_reshape_unknown_sizes():\n a = np.random.random((10, 6, 6))\n A = da.from_array(a, chunks=(5, 2, 3))\n\n a2 = a.reshape((60, -1))\n A2 = A.reshape((60, -1))\n\n assert A2.shape == (60, 6)\n assert_eq(A2, a2)\n\n with pytest.raises(ValueError):\n a.reshape((60, -1, -1))\n with pytest.raises(ValueError):\n A.reshape((60, -1, -1))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reshape.py_test_reshape_all_chunked_no_merge_test_reshape_all_chunked_no_merge.assert_eq_result_base_re": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reshape.py_test_reshape_all_chunked_no_merge_test_reshape_all_chunked_no_merge.assert_eq_result_base_re", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reshape.py", "file_name": "test_reshape.py", "file_type": "text/x-python", "category": "test", "start_line": 84, "end_line": 127, "span_ids": ["test_reshape_all_chunked_no_merge"], "tokens": 702}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"inshape, inchunks, outshape, outchunks\",\n [\n # (2, 3, 4) -> (6, 4)\n ((2, 3, 4), ((1, 1), (1, 2), (2, 2)), (6, 4), ((1, 2, 1, 2), (2, 2))),\n # (1, 2, 3, 4) -> (12, 4)\n ((1, 2, 3, 4), ((1,), (1, 1), (1, 2), (2, 2)), (6, 4), ((1, 2, 1, 2), (2, 2))),\n # (2, 2, 3, 4) -> (12, 4)\n (\n (2, 2, 3, 4),\n ((1, 1), (1, 1), (1, 2), (2, 2)),\n (12, 4),\n ((1, 2, 1, 2, 1, 2, 1, 2), (2, 2)),\n ),\n # (2, 2, 3, 4) -> (4, 3, 4)\n (\n (2, 2, 3, 4),\n ((1, 1), (1, 1), (1, 2), (2, 2)),\n (4, 3, 4),\n ((1, 1, 1, 1), (1, 2), (2, 2)),\n ),\n # (2, 2, 3, 4) -> (4, 3, 4)\n ((2, 2, 3, 4), ((1, 1), (2,), (1, 2), (4,)), (4, 3, 4), ((2, 2), (1, 2), (4,))),\n # (2, 3, 4) -> (24,).\n ((2, 3, 4), ((1, 1), (1, 1, 1), (2, 2)), (24,), ((2,) * 12,)),\n ],\n)\ndef test_reshape_all_chunked_no_merge(inshape, inchunks, outshape, outchunks):\n # https://github.com/dask/dask/issues/5544#issuecomment-712280433\n # When the early axes are completely chunked then we are just moving blocks\n # and can avoid any rechunking. The result inchunks are the same as the\n # input chunks.\n base = np.arange(np.prod(inshape)).reshape(inshape)\n a = da.from_array(base, chunks=inchunks)\n\n # test directly\n inchunks2, outchunks2 = reshape_rechunk(a.shape, outshape, inchunks)\n assert inchunks2 == inchunks\n assert outchunks2 == outchunks\n\n # and via reshape\n result = a.reshape(outshape)\n assert result.chunks == outchunks\n assert_eq(result, base.reshape(outshape))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reshape.py_test_reshape_all_not_chunked_merge_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reshape.py_test_reshape_all_not_chunked_merge_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reshape.py", "file_name": "test_reshape.py", "file_type": "text/x-python", "category": "test", "start_line": 130, "end_line": 152, "span_ids": ["test_reshape_all_not_chunked_merge"], "tokens": 241}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"inshape, inchunks, expected_inchunks, outshape, outchunks\",\n [\n # (2, 3, 4) -> (24,). This does merge, since the second dim isn't fully chunked!\n ((2, 3, 4), ((1, 1), (1, 2), (2, 2)), ((1, 1), (3,), (4,)), (24,), ((12, 12),)),\n ],\n)\ndef test_reshape_all_not_chunked_merge(\n inshape, inchunks, expected_inchunks, outshape, outchunks\n):\n base = np.arange(np.prod(inshape)).reshape(inshape)\n a = da.from_array(base, chunks=inchunks)\n\n # test directly\n inchunks2, outchunks2 = reshape_rechunk(a.shape, outshape, inchunks)\n assert inchunks2 == expected_inchunks\n assert outchunks2 == outchunks\n\n # and via reshape\n result = a.reshape(outshape)\n assert result.chunks == outchunks\n assert_eq(result, base.reshape(outshape))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_getitem_avoids_large_chunks_test_getitem_avoids_large_chunks.with_dask_config_set_ar.None_2.assert_result_chunks_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_getitem_avoids_large_chunks_test_getitem_avoids_large_chunks.with_dask_config_set_ar.None_2.assert_result_chunks_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 884, "end_line": 912, "span_ids": ["test_getitem_avoids_large_chunks"], "tokens": 287}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_getitem_avoids_large_chunks():\n with dask.config.set({\"array.chunk-size\": \"0.1Mb\"}):\n a = np.arange(2 * 128 * 128, dtype=\"int64\").reshape(2, 128, 128)\n arr = da.from_array(a, chunks=(1, 128, 128))\n indexer = [0] + [1] * 11\n expected = a[indexer]\n\n # By default, we warn\n with pytest.warns(da.PerformanceWarning):\n result = arr[indexer]\n\n assert_eq(result, expected)\n assert result.chunks == ((1, 11), (128,), (128,))\n\n # Users can silence the warning\n with dask.config.set({\"array.slicing.split-large-chunks\": False}):\n with pytest.warns(None) as e:\n result = arr[indexer]\n assert len(e) == 0\n assert_eq(result, expected)\n\n # Users can silence the warning\n with dask.config.set({\"array.slicing.split-large-chunks\": True}):\n with pytest.warns(None) as e:\n result = arr[indexer]\n assert len(e) == 0 # no\n assert_eq(result, expected)\n\n assert result.chunks == ((1,) * 12, (128,), (128,))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_getitem_avoids_large_chunks_missing_test_getitem_avoids_large_chunks_missing.with_dask_config_set_ar.assert_eq_result_expecte": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_getitem_avoids_large_chunks_missing_test_getitem_avoids_large_chunks_missing.with_dask_config_set_ar.assert_eq_result_expecte", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 915, "end_line": 935, "span_ids": ["test_getitem_avoids_large_chunks_missing"], "tokens": 226}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"chunks\",\n [\n ((1, 1, 1, 1), (np.nan,), (np.nan,)),\n pytest.param(\n ((np.nan, np.nan, np.nan, np.nan), (500,), (500,)),\n marks=pytest.mark.xfail(reason=\"https://github.com/dask/dask/issues/6586\"),\n ),\n ],\n)\ndef test_getitem_avoids_large_chunks_missing(chunks):\n # We cannot apply the \"avoid large chunks\" optimization when\n # the chunks have unknown sizes.\n with dask.config.set({\"array.slicing.split-large-chunks\": True}):\n a = np.arange(4 * 500 * 500).reshape(4, 500, 500)\n arr = da.from_array(a, chunks=(1, 500, 500))\n arr._chunks = chunks\n indexer = [0, 1] + [2] * 100 + [3]\n expected = a[indexer]\n result = arr[indexer]\n assert_eq(result, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_take_avoids_large_chunks_test_take_avoids_large_chunks.with_dask_config_set_ar.None_11": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_take_avoids_large_chunks_test_take_avoids_large_chunks.with_dask_config_set_ar.None_11", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 938, "end_line": 962, "span_ids": ["test_take_avoids_large_chunks"], "tokens": 411}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_take_avoids_large_chunks():\n # unit test for https://github.com/dask/dask/issues/6270\n with dask.config.set({\"array.slicing.split-large-chunks\": True}):\n chunks = ((1, 1, 1, 1), (500,), (500,))\n itemsize = 8\n index = np.array([0, 1] + [2] * 101 + [3])\n chunks2, dsk = take(\"a\", \"b\", chunks, index, itemsize)\n assert chunks2 == ((1, 1, 51, 50, 1), (500,), (500,))\n assert len(dsk) == 5\n\n index = np.array([0] * 101 + [1, 2, 3])\n chunks2, dsk = take(\"a\", \"b\", chunks, index, itemsize)\n assert chunks2 == ((51, 50, 1, 1, 1), (500,), (500,))\n assert len(dsk) == 5\n\n index = np.array([0, 1, 2] + [3] * 101)\n chunks2, dsk = take(\"a\", \"b\", chunks, index, itemsize)\n assert chunks2 == ((1, 1, 1, 51, 50), (500,), (500,))\n assert len(dsk) == 5\n\n chunks = ((500,), (1, 1, 1, 1), (500,))\n index = np.array([0, 1, 2] + [3] * 101)\n chunks2, dsk = take(\"a\", \"b\", chunks, index, itemsize, axis=1)\n assert chunks2 == ((500,), (1, 1, 1, 51, 50), (500,))\n assert len(dsk) == 5", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_take_uses_config_test_take_uses_config.with_dask_config_set_ar.assert_len_dsk_4": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_take_uses_config_test_take_uses_config.with_dask_config_set_ar.assert_len_dsk_4", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 965, "end_line": 973, "span_ids": ["test_take_uses_config"], "tokens": 142}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_take_uses_config():\n with dask.config.set({\"array.slicing.split-large-chunks\": True}):\n chunks = ((1, 1, 1, 1), (500,), (500,))\n index = np.array([0, 1] + [2] * 101 + [3])\n itemsize = 8\n with config.set({\"array.chunk-size\": \"10GB\"}):\n chunks2, dsk = take(\"a\", \"b\", chunks, index, itemsize)\n assert chunks2 == ((1, 1, 101, 1), (500,), (500,))\n assert len(dsk) == 4", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_svg.py_test_draw_sizes_test_draw_sizes.assert_b_c_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_svg.py_test_draw_sizes_test_draw_sizes.assert_b_c_5", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_svg.py", "file_name": "test_svg.py", "file_type": "text/x-python", "category": "test", "start_line": 70, "end_line": 79, "span_ids": ["test_draw_sizes"], "tokens": 133}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_draw_sizes():\n assert draw_sizes((10, 10), size=100) == (100, 100) # respect symmetry\n assert draw_sizes((10, 10), size=200) == (200, 200) # respect size keyword\n assert draw_sizes((10, 5), size=100) == (100, 50) # respect small ratios\n\n a, b, c = draw_sizes((1000, 100, 10))\n assert a > b\n assert b > c\n assert a < b * 5\n assert b < c * 5", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_svg.py_test_too_many_lines_fills_sides_darker_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_svg.py_test_too_many_lines_fills_sides_darker_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_svg.py", "file_name": "test_svg.py", "file_type": "text/x-python", "category": "test", "start_line": 82, "end_line": 92, "span_ids": ["test_too_many_lines_fills_sides_darker", "test_3d"], "tokens": 110}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_too_many_lines_fills_sides_darker():\n data = da.ones((16000, 2400, 3600), chunks=(1, 2400, 3600))\n text = data.to_svg()\n assert \"8B4903\" in text\n assert text.count(\"\\n\") < 300\n\n\ndef test_3d():\n text = da.ones((10, 10, 10, 10, 10)).to_svg()\n assert text.count(\" {}>\".format(self.indices, self.output)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_Blockwise._dict_Blockwise._dict.return.self__cached_dict_dsk_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_Blockwise._dict_Blockwise._dict.return.self__cached_dict_dsk_", "embedding": null, "metadata": {"file_path": "dask/blockwise.py", "file_name": "blockwise.py", "file_type": "text/x-python", "category": "implementation", "start_line": 261, "end_line": 305, "span_ids": ["Blockwise._dict"], "tokens": 384}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Blockwise(Layer):\n\n @property\n def _dict(self):\n if hasattr(self, \"_cached_dict\"):\n return self._cached_dict[\"dsk\"]\n else:\n keys = tuple(map(blockwise_token, range(len(self.indices))))\n dsk, _ = fuse(self.dsk, [self.output])\n func = SubgraphCallable(dsk, self.output, keys)\n\n key_deps = {}\n non_blockwise_keys = set()\n dsk = make_blockwise_graph(\n func,\n self.output,\n self.output_indices,\n *list(toolz.concat(self.indices)),\n new_axes=self.new_axes,\n numblocks=self.numblocks,\n concatenate=self.concatenate,\n key_deps=key_deps,\n non_blockwise_keys=non_blockwise_keys,\n )\n\n if self.io_subgraph:\n # This is an IO layer.\n for k in dsk:\n io_key = (self.io_name,) + tuple([k[i] for i in range(1, len(k))])\n if io_key in dsk[k]:\n # Inject IO-function arguments into the blockwise graph\n # as a single (packed) tuple.\n io_item = self.io_subgraph.get(io_key)\n io_item = list(io_item[1:]) if len(io_item) > 1 else []\n new_task = [io_item if v == io_key else v for v in dsk[k]]\n dsk[k] = tuple(new_task)\n\n # Clear IO \"placeholder\" dependencies\n for k in key_deps:\n if k[0] == self.output:\n key_deps[k] = set()\n\n self._cached_dict = {\n \"dsk\": dsk,\n \"basic_layer\": BasicLayer(dsk, key_deps, non_blockwise_keys),\n }\n return self._cached_dict[\"dsk\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_Blockwise.get_output_keys_Blockwise.get_output_keys.return._self_output_p_for_p_i": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_Blockwise.get_output_keys_Blockwise.get_output_keys.return._self_output_p_for_p_i", "embedding": null, "metadata": {"file_path": "dask/blockwise.py", "file_name": "blockwise.py", "file_type": "text/x-python", "category": "implementation", "start_line": 307, "end_line": 333, "span_ids": ["Blockwise.get_output_keys"], "tokens": 225}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Blockwise(Layer):\n\n def get_output_keys(self):\n\n # TODO: Handle N-D Collections and more-complex\n # tensor operations.\n\n # Only deal with 1-D collections (for now).\n # Otherwise, we allow dict materialization.\n if len(self.output_indices) != 1:\n return super().get_output_keys()\n\n # Check inputs.\n # Only deal with 1-to-1 input-output collection\n # mapping (for now).\n input_cnt = 0\n in_name = None\n for _name, _ind in self.indices:\n if _ind is not None:\n if len(_ind) != 1 or input_cnt:\n return super().get_output_keys()\n in_name = _name\n input_cnt += 1\n\n # At this point, we can assume:\n # - Input and output indices are aligned\n # - Collection is 1-D\n # - Only one input collection\n return {(self.output, p) for p in range(self.numblocks[in_name][0])}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_find_all_possible_keys_find_all_possible_keys.return.ret": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_find_all_possible_keys_find_all_possible_keys.return.ret", "embedding": null, "metadata": {"file_path": "dask/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 194, "end_line": 219, "span_ids": ["find_all_possible_keys"], "tokens": 179}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def find_all_possible_keys(tasks) -> set:\n \"\"\"Returns all possible keys in `tasks` including hashable literals.\n\n The definition of a key in a Dask graph is any hashable object\n that is not a task. This function returns all such objects in\n `tasks` even if the object is in fact a literal.\n\n \"\"\"\n ret = set()\n while tasks:\n work = []\n for w in tasks:\n typ = type(w)\n if typ is tuple and w and callable(w[0]): # istask(w)\n work.extend(w[1:])\n elif typ is list:\n work.extend(w)\n elif typ is dict:\n work.extend(w.values())\n else:\n try:\n ret.add(w)\n except TypeError: # not hashable\n pass\n tasks = work\n return ret", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.index__Frame.index_24.self._meta.result__meta": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.index__Frame.index_24.self._meta.result__meta", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 444, "end_line": 463, "span_ids": ["_Frame.index", "_Frame.index_24"], "tokens": 133}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @property\n def index(self):\n \"\"\"Return dask Index instance\"\"\"\n return self.map_partitions(\n getattr,\n \"index\",\n token=self._name + \"-index\",\n meta=self._meta.index,\n enforce_metadata=False,\n )\n\n @index.setter\n def index(self, value):\n self.divisions = value.divisions\n result = map_partitions(\n methods.assign_index, self, value, enforce_metadata=False\n )\n self.dask = result.dask\n self._name = result._name\n self._meta = result._meta", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.nlargest_Series.isin.return.super_isin_values_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.nlargest_Series.isin.return.super_isin_values_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3013, "end_line": 3040, "span_ids": ["Series.nsmallest", "Series.isin", "Series.nlargest"], "tokens": 181}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Series(_Frame):\n\n @derived_from(pd.Series)\n def nlargest(self, n=5, split_every=None):\n return aca(\n self,\n chunk=M.nlargest,\n aggregate=M.nlargest,\n meta=self._meta,\n token=\"series-nlargest\",\n split_every=split_every,\n n=n,\n )\n\n @derived_from(pd.Series)\n def nsmallest(self, n=5, split_every=None):\n return aca(\n self,\n chunk=M.nsmallest,\n aggregate=M.nsmallest,\n meta=self._meta,\n token=\"series-nsmallest\",\n split_every=split_every,\n n=n,\n )\n\n @derived_from(pd.Series)\n def isin(self, values):\n # Added just to get the different docstring for Series\n return super().isin(values)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__build_agg_args_list__build_agg_args_list.return.dict_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__build_agg_args_list__build_agg_args_list.return.dict_", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 826, "end_line": 850, "span_ids": ["_build_agg_args_list"], "tokens": 137}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _build_agg_args_list(result_column, func, input_column):\n intermediate = _make_agg_id(\"list\", input_column)\n\n return dict(\n chunk_funcs=[\n (\n intermediate,\n _apply_func_to_column,\n dict(column=input_column, func=lambda s: s.apply(list)),\n )\n ],\n aggregate_funcs=[\n (\n intermediate,\n _apply_func_to_column,\n dict(\n column=intermediate,\n func=lambda s0: s0.apply(\n lambda chunks: list(it.chain.from_iterable(chunks))\n ),\n ),\n )\n ],\n finalizer=(result_column, itemgetter(intermediate), dict()),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py_SeriesGroupBy_SeriesGroupBy.__init__.super___init___df_by_b": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py_SeriesGroupBy_SeriesGroupBy.__init__.super___init___df_by_b", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1796, "end_line": 1817, "span_ids": ["SeriesGroupBy.__init__", "SeriesGroupBy"], "tokens": 169}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class SeriesGroupBy(_GroupBy):\n\n _token_prefix = \"series-groupby-\"\n\n def __init__(self, df, by=None, slice=None, **kwargs):\n # for any non series object, raise pandas-compat error message\n\n if isinstance(df, Series):\n if isinstance(by, Series):\n pass\n elif isinstance(by, list):\n if len(by) == 0:\n raise ValueError(\"No group keys passed!\")\n\n non_series_items = [item for item in by if not isinstance(item, Series)]\n # raise error from pandas, if applicable\n df._meta.groupby(non_series_items)\n else:\n # raise error from pandas, if applicable\n df._meta.groupby(by)\n\n super().__init__(df, by=by, slice=slice, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/hyperloglog.py__Implementation_of_Hype_compute_first_bit.return.33_bits_sum_axis_1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/hyperloglog.py__Implementation_of_Hype_compute_first_bit.return.33_bits_sum_axis_1_", "embedding": null, "metadata": {"file_path": "dask/dataframe/hyperloglog.py", "file_name": "hyperloglog.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 22, "span_ids": ["compute_first_bit", "docstring"], "tokens": 182}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\"Implementation of HyperLogLog\n\nThis implements the HyperLogLog algorithm for cardinality estimation, found\nin\n\n Philippe Flajolet, \u00c9ric Fusy, Olivier Gandouet and Fr\u00e9d\u00e9ric Meunier.\n \"HyperLogLog: the analysis of a near-optimal cardinality estimation\n algorithm\". 2007 Conference on Analysis of Algorithms. Nice, France\n (2007)\n\n\"\"\"\nimport numpy as np\nimport pandas as pd\nfrom pandas.util import hash_pandas_object\n\n\ndef compute_first_bit(a):\n \"Compute the position of the first nonzero bit for each int in an array.\"\n # TODO: consider making this less memory-hungry\n bits = np.bitwise_and.outer(a, 1 << np.arange(32))\n bits = bits.cumsum(axis=1).astype(bool)\n return 33 - bits.sum(axis=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_BlockwiseReadCSV_BlockwiseReadCSV.__repr__.return._BlockwiseReadCSV_name_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_BlockwiseReadCSV_BlockwiseReadCSV.__repr__.return._BlockwiseReadCSV_name_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/csv.py", "file_name": "csv.py", "file_type": "text/x-python", "category": "implementation", "start_line": 114, "end_line": 163, "span_ids": ["BlockwiseReadCSV.__repr__", "BlockwiseReadCSV", "BlockwiseReadCSV.__init__"], "tokens": 242}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class BlockwiseReadCSV(Blockwise):\n \"\"\"\n Specialized Blockwise Layer for read_csv.\n\n Enables HighLevelGraph optimizations.\n \"\"\"\n\n def __init__(\n self,\n name,\n reader,\n blocks,\n is_first,\n head,\n header,\n kwargs,\n dtypes,\n columns,\n enforce,\n path,\n ):\n self.name = name\n self.blocks = blocks\n self.io_name = \"blockwise-io-\" + name\n dsk_io = CSVSubgraph(\n self.io_name,\n reader,\n blocks,\n is_first,\n head,\n header,\n kwargs,\n dtypes,\n columns,\n enforce,\n path,\n )\n super().__init__(\n self.name,\n \"i\",\n None,\n [(self.io_name, \"i\")],\n {self.io_name: (len(self.blocks),)},\n io_subgraph=(self.io_name, dsk_io),\n )\n\n def __repr__(self):\n return \"BlockwiseReadCSV\".format(\n self.name, len(self.blocks), list(self.columns)\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_from_math_import_ceil_lock.Lock_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_from_math_import_ceil_lock.Lock_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/io.py", "file_name": "io.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 21, "span_ids": ["imports"], "tokens": 122}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from math import ceil\nfrom operator import getitem\nimport os\nfrom threading import Lock\n\nimport pandas as pd\nimport numpy as np\nfrom tlz import merge\n\nfrom ...base import tokenize\nfrom ... import array as da\nfrom ...dataframe.core import new_dd_object\nfrom ...delayed import delayed\n\nfrom ..core import DataFrame, Series, Index, new_dd_object, has_parallel_type\nfrom ..shuffle import set_partition\nfrom ..utils import insert_meta_param_description, check_meta, make_meta, is_series_like\n\nfrom ...utils import M, ensure_dict\n\nlock = Lock()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py__meta_from_array__meta_from_array.return.meta__constructor_data_c": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py__meta_from_array__meta_from_array.return.meta__constructor_data_c", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/io.py", "file_name": "io.py", "file_type": "text/x-python", "category": "implementation", "start_line": 24, "end_line": 78, "span_ids": ["_meta_from_array"], "tokens": 520}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _meta_from_array(x, columns=None, index=None, meta=None):\n \"\"\" Create empty DataFrame or Series which has correct dtype \"\"\"\n\n if x.ndim > 2:\n raise ValueError(\n \"from_array does not input more than 2D array, got\"\n \" array with shape %r\" % (x.shape,)\n )\n\n if index is not None:\n if not isinstance(index, Index):\n raise ValueError(\"'index' must be an instance of dask.dataframe.Index\")\n index = index._meta\n\n if meta is None:\n meta = pd.DataFrame()\n\n if getattr(x.dtype, \"names\", None) is not None:\n # record array has named columns\n if columns is None:\n columns = list(x.dtype.names)\n elif np.isscalar(columns):\n raise ValueError(\"For a struct dtype, columns must be a list.\")\n elif not all(i in x.dtype.names for i in columns):\n extra = sorted(set(columns).difference(x.dtype.names))\n raise ValueError(\"dtype {0} doesn't have fields {1}\".format(x.dtype, extra))\n fields = x.dtype.fields\n dtypes = [fields[n][0] if n in fields else \"f8\" for n in columns]\n elif x.ndim == 1:\n if np.isscalar(columns) or columns is None:\n return meta._constructor_sliced(\n [], name=columns, dtype=x.dtype, index=index\n )\n elif len(columns) == 1:\n return meta._constructor(\n np.array([], dtype=x.dtype), columns=columns, index=index\n )\n raise ValueError(\n \"For a 1d array, columns must be a scalar or single element list\"\n )\n else:\n if np.isnan(x.shape[1]):\n raise ValueError(\"Shape along axis 1 must be known\")\n if columns is None:\n columns = list(range(x.shape[1])) if x.ndim == 2 else [0]\n elif len(columns) != x.shape[1]:\n raise ValueError(\n \"Number of column names must match width of the \"\n \"array. Got {0} names for {1} \"\n \"columns\".format(len(columns), x.shape[1])\n )\n dtypes = [x.dtype] * len(columns)\n\n data = {c: np.array([], dtype=dt) for (c, dt) in zip(columns, dtypes)}\n return meta._constructor(data, columns=columns, index=index)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_from_pandas_from_pandas.nrows.len_data_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_from_pandas_from_pandas.nrows.len_data_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/io.py", "file_name": "io.py", "file_type": "text/x-python", "category": "implementation", "start_line": 134, "end_line": 205, "span_ids": ["from_pandas"], "tokens": 749}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def from_pandas(data, npartitions=None, chunksize=None, sort=True, name=None):\n \"\"\"\n Construct a Dask DataFrame from a Pandas DataFrame\n\n This splits an in-memory Pandas dataframe into several parts and constructs\n a dask.dataframe from those parts on which Dask.dataframe can operate in\n parallel.\n\n Note that, despite parallelism, Dask.dataframe may not always be faster\n than Pandas. We recommend that you stay with Pandas for as long as\n possible before switching to Dask.dataframe.\n\n Parameters\n ----------\n data : pandas.DataFrame or pandas.Series\n The DataFrame/Series with which to construct a Dask DataFrame/Series\n npartitions : int, optional\n The number of partitions of the index to create. Note that depending on\n the size and index of the dataframe, the output may have fewer\n partitions than requested.\n chunksize : int, optional\n The number of rows per index partition to use.\n sort: bool\n Sort input first to obtain cleanly divided partitions or don't sort and\n don't get cleanly divided partitions\n name: string, optional\n An optional keyname for the dataframe. Defaults to hashing the input\n\n Returns\n -------\n dask.DataFrame or dask.Series\n A dask DataFrame/Series partitioned along the index\n\n Examples\n --------\n >>> from dask.dataframe import from_pandas\n >>> df = pd.DataFrame(dict(a=list('aabbcc'), b=list(range(6))),\n ... index=pd.date_range(start='20100101', periods=6))\n >>> ddf = from_pandas(df, npartitions=3)\n >>> ddf.divisions # doctest: +NORMALIZE_WHITESPACE\n (Timestamp('2010-01-01 00:00:00', freq='D'),\n Timestamp('2010-01-03 00:00:00', freq='D'),\n Timestamp('2010-01-05 00:00:00', freq='D'),\n Timestamp('2010-01-06 00:00:00', freq='D'))\n >>> ddf = from_pandas(df.a, npartitions=3) # Works with Series too!\n >>> ddf.divisions # doctest: +NORMALIZE_WHITESPACE\n (Timestamp('2010-01-01 00:00:00', freq='D'),\n Timestamp('2010-01-03 00:00:00', freq='D'),\n Timestamp('2010-01-05 00:00:00', freq='D'),\n Timestamp('2010-01-06 00:00:00', freq='D'))\n\n Raises\n ------\n TypeError\n If something other than a ``pandas.DataFrame`` or ``pandas.Series`` is\n passed in.\n\n See Also\n --------\n from_array : Construct a dask.DataFrame from an array that has record dtype\n read_csv : Construct a dask.DataFrame from a CSV file\n \"\"\"\n if isinstance(getattr(data, \"index\", None), pd.MultiIndex):\n raise NotImplementedError(\"Dask does not support MultiIndex Dataframes.\")\n\n if not has_parallel_type(data):\n raise TypeError(\"Input must be a pandas DataFrame or Series\")\n\n if (npartitions is None) == (chunksize is None):\n raise ValueError(\"Exactly one of npartitions and chunksize must be specified.\")\n\n nrows = len(data)\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_from_pandas.if_chunksize_is_None__from_pandas.return.new_dd_object_dsk_name_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_from_pandas.if_chunksize_is_None__from_pandas.return.new_dd_object_dsk_name_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/io.py", "file_name": "io.py", "file_type": "text/x-python", "category": "implementation", "start_line": 207, "end_line": 229, "span_ids": ["from_pandas"], "tokens": 228}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def from_pandas(data, npartitions=None, chunksize=None, sort=True, name=None):\n # ... other code\n\n if chunksize is None:\n chunksize = int(ceil(nrows / npartitions))\n\n name = name or (\"from_pandas-\" + tokenize(data, chunksize))\n\n if not nrows:\n return new_dd_object({(name, 0): data}, name, data, [None, None])\n\n if sort and not data.index.is_monotonic_increasing:\n data = data.sort_index(ascending=True)\n if sort:\n divisions, locations = sorted_division_locations(\n data.index, chunksize=chunksize\n )\n else:\n locations = list(range(0, nrows, chunksize)) + [len(data)]\n divisions = [None] * len(locations)\n\n dsk = {\n (name, i): data.iloc[start:stop]\n for i, (start, stop) in enumerate(zip(locations[:-1], locations[1:]))\n }\n return new_dd_object(dsk, name, data, divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowEngine._pandas_to_arrow_table_ArrowEngine.write_partition.if_return_metadata_.else_.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowEngine._pandas_to_arrow_table_ArrowEngine.write_partition.if_return_metadata_.else_.return._", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/arrow.py", "file_name": "arrow.py", "file_type": "text/x-python", "category": "implementation", "start_line": 960, "end_line": 1024, "span_ids": ["ArrowEngine._pandas_to_arrow_table", "ArrowEngine.write_partition"], "tokens": 388}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ArrowEngine(Engine):\n\n @classmethod\n def _pandas_to_arrow_table(\n cls, df: pd.DataFrame, preserve_index=False, schema=None\n ) -> pa.Table:\n table = pa.Table.from_pandas(df, preserve_index=preserve_index, schema=schema)\n return table\n\n @classmethod\n def write_partition(\n cls,\n df,\n path,\n fs,\n filename,\n partition_on,\n return_metadata,\n fmd=None,\n compression=None,\n index_cols=None,\n schema=None,\n **kwargs,\n ):\n _meta = None\n preserve_index = False\n if _index_in_schema(index_cols, schema):\n df.set_index(index_cols, inplace=True)\n preserve_index = True\n else:\n index_cols = []\n\n t = cls._pandas_to_arrow_table(df, preserve_index=preserve_index, schema=schema)\n\n if partition_on:\n md_list = _write_partitioned(\n t,\n path,\n filename,\n partition_on,\n fs,\n index_cols=index_cols,\n compression=compression,\n **kwargs,\n )\n if md_list:\n _meta = md_list[0]\n for i in range(1, len(md_list)):\n _append_row_groups(_meta, md_list[i])\n else:\n md_list = []\n with fs.open(fs.sep.join([path, filename]), \"wb\") as fil:\n pq.write_table(\n t,\n fil,\n compression=compression,\n metadata_collector=md_list,\n **kwargs,\n )\n if md_list:\n _meta = md_list[0]\n _meta.set_file_path(filename)\n # Return the schema needed to write the metadata\n if return_metadata:\n return [{\"schema\": t.schema, \"meta\": _meta}]\n else:\n return []", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_ParquetSubgraph.__getitem___ParquetSubgraph.__getitem__.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_ParquetSubgraph.__getitem___ParquetSubgraph.__getitem__.return._", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 58, "end_line": 84, "span_ids": ["ParquetSubgraph.__getitem__"], "tokens": 154}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ParquetSubgraph(Layer):\n\n def __getitem__(self, key):\n try:\n name, i = key\n except ValueError:\n # too many / few values to unpack\n raise KeyError(key) from None\n\n if name != self.name:\n raise KeyError(key)\n\n if i not in self.part_ids:\n raise KeyError(key)\n\n part = self.parts[i]\n if not isinstance(part, list):\n part = [part]\n\n return (\n read_parquet_part,\n self.fs,\n self.engine.read_partition,\n self.meta,\n [p[\"piece\"] for p in part],\n self.columns,\n self.index,\n toolz.merge(part[0][\"kwargs\"], self.kwargs or {}),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_ParquetSubgraph.__len___ParquetSubgraph.map_tasks.return.self": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_ParquetSubgraph.__len___ParquetSubgraph.map_tasks.return.self", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 86, "end_line": 115, "span_ids": ["ParquetSubgraph.__len__", "ParquetSubgraph.cull", "ParquetSubgraph.get_dependencies", "ParquetSubgraph.map_tasks", "ParquetSubgraph.__iter__", "ParquetSubgraph.is_materialized"], "tokens": 202}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ParquetSubgraph(Layer):\n\n def __len__(self):\n return len(self.part_ids)\n\n def __iter__(self):\n for i in self.part_ids:\n yield (self.name, i)\n\n def is_materialized(self):\n return False # Never materialized\n\n def get_dependencies(self, all_hlg_keys):\n return {k: set() for k in self}\n\n def cull(self, keys, all_hlg_keys):\n ret = ParquetSubgraph(\n name=self.name,\n engine=self.engine,\n fs=self.fs,\n meta=self.meta,\n columns=self.columns,\n index=self.index,\n parts=self.parts,\n kwargs=self.kwargs,\n part_ids={i for i in self.part_ids if (self.name, i) in keys},\n )\n return ret, ret.get_dependencies(all_hlg_keys)\n\n def map_tasks(self, func):\n # ParquetSubgraph has no input tasks\n return self", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_BlockwiseParquet_BlockwiseParquet.__repr__.return._BlockwiseParquet_name_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_BlockwiseParquet_BlockwiseParquet.__repr__.return._BlockwiseParquet_name_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 118, "end_line": 163, "span_ids": ["BlockwiseParquet.__repr__", "BlockwiseParquet", "BlockwiseParquet.__init__"], "tokens": 289}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class BlockwiseParquet(Blockwise):\n \"\"\"\n Specialized Blockwise Layer for read_parquet.\n\n Enables HighLevelGraph optimizations.\n \"\"\"\n\n def __init__(\n self, name, engine, fs, meta, columns, index, parts, kwargs, part_ids=None\n ):\n self.name = name\n self.engine = engine\n self.fs = fs\n self.meta = meta\n self.columns = columns\n self.index = index\n self.parts = parts\n self.kwargs = kwargs\n self.part_ids = list(range(len(parts))) if part_ids is None else part_ids\n\n self.io_name = \"blockwise-io-\" + name\n dsk_io = ParquetSubgraph(\n self.io_name,\n self.engine,\n self.fs,\n self.meta,\n self.columns,\n self.index,\n self.parts,\n self.kwargs,\n part_ids=self.part_ids,\n )\n\n super().__init__(\n self.name,\n \"i\",\n None,\n [(self.io_name, \"i\")],\n {self.io_name: (len(self.part_ids),)},\n io_subgraph=(self.io_name, dsk_io),\n )\n\n def __repr__(self):\n return \"BlockwiseParquet\".format(\n self.name, len(self.part_ids), list(self.columns)\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/sql.py_read_sql_table.sa_read_sql_table.if_head_rows_0_.else_.if_divisions_is_None_and_.raise_ValueError_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/sql.py_read_sql_table.sa_read_sql_table.if_head_rows_0_.else_.if_divisions_is_None_and_.raise_ValueError_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/sql.py", "file_name": "sql.py", "file_type": "text/x-python", "category": "implementation", "start_line": 102, "end_line": 160, "span_ids": ["read_sql_table"], "tokens": 552}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def read_sql_table(\n table,\n uri,\n index_col,\n divisions=None,\n npartitions=None,\n limits=None,\n columns=None,\n bytes_per_chunk=\"256 MiB\",\n head_rows=5,\n schema=None,\n meta=None,\n engine_kwargs=None,\n **kwargs,\n):\n import sqlalchemy as sa\n from sqlalchemy import sql\n from sqlalchemy.sql import elements\n\n if index_col is None:\n raise ValueError(\"Must specify index column to partition on\")\n\n engine_kwargs = {} if engine_kwargs is None else engine_kwargs\n engine = sa.create_engine(uri, **engine_kwargs)\n m = sa.MetaData()\n if isinstance(table, str):\n table = sa.Table(table, m, autoload=True, autoload_with=engine, schema=schema)\n\n index = table.columns[index_col] if isinstance(index_col, str) else index_col\n if not isinstance(index_col, (str, elements.Label)):\n raise ValueError(\n \"Use label when passing an SQLAlchemy instance as the index (%s)\" % index\n )\n if divisions and npartitions:\n raise TypeError(\"Must supply either divisions or npartitions, not both\")\n\n columns = (\n [(table.columns[c] if isinstance(c, str) else c) for c in columns]\n if columns\n else list(table.columns)\n )\n if index_col not in columns:\n columns.append(\n table.columns[index_col] if isinstance(index_col, str) else index_col\n )\n\n if isinstance(index_col, str):\n kwargs[\"index_col\"] = index_col\n else:\n # function names get pandas auto-named\n kwargs[\"index_col\"] = index_col.name\n\n if head_rows > 0:\n # derive metadata from first few rows\n q = sql.select(columns).limit(head_rows).select_from(table)\n head = pd.read_sql(q, engine, **kwargs)\n\n if head.empty:\n # no results at all\n name = table.name\n schema = table.schema\n head = pd.read_sql_table(name, uri, schema=schema, index_col=index_col)\n return from_pandas(head, npartitions=1)\n\n bytes_per_row = (head.memory_usage(deep=True, index=True)).sum() / head_rows\n if meta is None:\n meta = head.iloc[:0]\n elif meta is None:\n raise ValueError(\"Must provide meta if head_rows is 0\")\n else:\n if divisions is None and npartitions is None:\n raise ValueError(\n \"Must provide divisions or npartitions when using explicit meta.\"\n )\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/sql.py_to_sql.if_not_isinstance_uri_st_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/sql.py_to_sql.if_not_isinstance_uri_st_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/sql.py", "file_name": "sql.py", "file_type": "text/x-python", "category": "implementation", "start_line": 354, "end_line": 422, "span_ids": ["_extra_deps", "to_sql"], "tokens": 497}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def to_sql(\n df,\n name: str,\n uri: str,\n schema=None,\n if_exists: str = \"fail\",\n index: bool = True,\n index_label=None,\n chunksize=None,\n dtype=None,\n method=None,\n compute=True,\n parallel=False,\n):\n if not isinstance(uri, str):\n raise ValueError(f\"Expected URI to be a string, got {type(uri)}.\")\n\n # This is the only argument we add on top of what Pandas supports\n kwargs = dict(\n name=name,\n con=uri,\n schema=schema,\n if_exists=if_exists,\n index=index,\n index_label=index_label,\n chunksize=chunksize,\n dtype=dtype,\n )\n\n if method:\n if not PANDAS_GT_0240:\n raise NotImplementedError(\n \"'method' requires pandas>=0.24.0. You have version %s\" % PANDAS_VERSION\n )\n else:\n kwargs[\"method\"] = method\n\n def make_meta(meta):\n return meta.to_sql(**kwargs)\n\n make_meta = delayed(make_meta)\n meta_task = make_meta(df._meta)\n\n # Partitions should always append to the empty table created from `meta` above\n worker_kwargs = dict(kwargs, if_exists=\"append\")\n\n if parallel:\n # Perform the meta insert, then one task that inserts all blocks concurrently:\n result = [\n _extra_deps(\n d.to_sql,\n extras=meta_task,\n **worker_kwargs,\n dask_key_name=\"to_sql-%s\" % tokenize(d, **worker_kwargs),\n )\n for d in df.to_delayed()\n ]\n else:\n # Chain the \"meta\" insert and each block's insert\n result = []\n last = meta_task\n for d in df.to_delayed():\n result.append(\n _extra_deps(\n d.to_sql,\n extras=last,\n **worker_kwargs,\n dask_key_name=\"to_sql-%s\" % tokenize(d, **worker_kwargs),\n )\n )\n last = result[-1]\n result = dask.delayed(result)\n\n if compute:\n dask.compute(result)\n else:\n return result\n\n\n@delayed\ndef _extra_deps(func, *args, extras=None, **kwargs):\n return func(*args, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_optimize_blockwise_parquet_test_optimize_blockwise_parquet.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_optimize_blockwise_parquet_test_optimize_blockwise_parquet.None_2", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2324, "end_line": 2365, "span_ids": ["test_optimize_blockwise_parquet"], "tokens": 338}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_optimize_blockwise_parquet(tmpdir):\n\n size = 40\n npartitions = 2\n tmp = str(tmpdir)\n df = pd.DataFrame({\"a\": np.arange(size, dtype=np.int32)})\n expect = dd.from_pandas(df, npartitions=npartitions)\n expect.to_parquet(tmp)\n ddf = dd.read_parquet(tmp)\n\n # `ddf` should now have ONE Blockwise layer\n layers = ddf.__dask_graph__().layers\n assert len(layers) == 1\n assert isinstance(list(layers.values())[0], Blockwise)\n\n # Check single-layer result\n assert_eq(ddf, expect)\n\n # Increment by 1\n ddf += 1\n expect += 1\n\n # Increment by 10\n ddf += 10\n expect += 10\n\n # `ddf` should now have THREE Blockwise layers\n layers = ddf.__dask_graph__().layers\n assert len(layers) == 3\n assert all(isinstance(layer, Blockwise) for layer in layers.values())\n\n # Check that `optimize_blockwise` fuses all three\n # `Blockwise` layers together\n keys = [(ddf._name, i) for i in range(npartitions)]\n graph = optimize_blockwise(ddf.__dask_graph__(), keys)\n layers = graph.layers\n name = list(layers.keys())[0]\n assert len(layers) == 1\n assert isinstance(layers[name], Blockwise)\n\n # Check final result\n assert_eq(ddf, expect)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_divisions_with_null_partition_test_divisions_with_null_partition.assert_ddf_read_divisions": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_divisions_with_null_partition_test_divisions_with_null_partition.assert_ddf_read_divisions", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2815, "end_line": 2821, "span_ids": ["test_divisions_with_null_partition"], "tokens": 115}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_divisions_with_null_partition(tmpdir, engine):\n df = pd.DataFrame({\"a\": [1, 2, None, None], \"b\": [1, 2, 3, 4]})\n ddf = dd.from_pandas(df, npartitions=2)\n ddf.to_parquet(str(tmpdir), engine=engine, write_index=False)\n\n ddf_read = dd.read_parquet(str(tmpdir), engine=engine, index=\"a\")\n assert ddf_read.divisions == (None, None, None)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_parquet_pyarrow_write_empty_metadata_test_parquet_pyarrow_write_empty_metadata.try_.except_AttributeError_.pytest_fail_Unexpected_A": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_parquet_pyarrow_write_empty_metadata_test_parquet_pyarrow_write_empty_metadata.try_.except_AttributeError_.pytest_fail_Unexpected_A", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2824, "end_line": 2849, "span_ids": ["test_parquet_pyarrow_write_empty_metadata"], "tokens": 236}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_parquet_pyarrow_write_empty_metadata(tmpdir):\n # https://github.com/dask/dask/issues/6600\n tmpdir = str(tmpdir)\n\n df_a = dask.delayed(pd.DataFrame.from_dict)(\n {\"x\": [], \"y\": []}, dtype=(\"int\", \"int\")\n )\n df_b = dask.delayed(pd.DataFrame.from_dict)(\n {\"x\": [1, 1, 2, 2], \"y\": [1, 0, 1, 0]}, dtype=(\"int64\", \"int64\")\n )\n df_c = dask.delayed(pd.DataFrame.from_dict)(\n {\"x\": [1, 2, 1, 2], \"y\": [1, 0, 1, 0]}, dtype=(\"int64\", \"int64\")\n )\n\n df = dd.from_delayed([df_a, df_b, df_c])\n\n try:\n df.to_parquet(\n tmpdir,\n engine=\"pyarrow\",\n partition_on=[\"x\"],\n append=False,\n )\n\n except AttributeError:\n pytest.fail(\"Unexpected AttributeError\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_parquet_pyarrow_write_empty_metadata_append_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_parquet_pyarrow_write_empty_metadata_append_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2852, "end_line": 2886, "span_ids": ["test_parquet_pyarrow_write_empty_metadata_append"], "tokens": 329}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_parquet_pyarrow_write_empty_metadata_append(tmpdir):\n # https://github.com/dask/dask/issues/6600\n tmpdir = str(tmpdir)\n\n df_a = dask.delayed(pd.DataFrame.from_dict)(\n {\"x\": [1, 1, 2, 2], \"y\": [1, 0, 1, 0]}, dtype=(\"int64\", \"int64\")\n )\n df_b = dask.delayed(pd.DataFrame.from_dict)(\n {\"x\": [1, 2, 1, 2], \"y\": [2, 0, 2, 0]}, dtype=(\"int64\", \"int64\")\n )\n\n df1 = dd.from_delayed([df_a, df_b])\n df1.to_parquet(\n tmpdir,\n engine=\"pyarrow\",\n partition_on=[\"x\"],\n append=False,\n )\n\n df_c = dask.delayed(pd.DataFrame.from_dict)(\n {\"x\": [], \"y\": []}, dtype=(\"int64\", \"int64\")\n )\n df_d = dask.delayed(pd.DataFrame.from_dict)(\n {\"x\": [3, 3, 4, 4], \"y\": [1, 0, 1, 0]}, dtype=(\"int64\", \"int64\")\n )\n\n df2 = dd.from_delayed([df_c, df_d])\n df2.to_parquet(\n tmpdir,\n engine=\"pyarrow\",\n partition_on=[\"x\"],\n append=True,\n ignore_divisions=True,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_passing_engine_as_uri_raises_helpful_error_test_passing_engine_as_uri_raises_helpful_error.with_tmpfile_as_f_.with_pytest_raises_ValueE.ddf_to_sql_test_engine": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_passing_engine_as_uri_raises_helpful_error_test_passing_engine_as_uri_raises_helpful_error.with_tmpfile_as_f_.with_pytest_raises_ValueE.ddf_to_sql_test_engine", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_sql.py", "file_name": "test_sql.py", "file_type": "text/x-python", "category": "test", "start_line": 62, "end_line": 73, "span_ids": ["test_passing_engine_as_uri_raises_helpful_error"], "tokens": 136}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_passing_engine_as_uri_raises_helpful_error(db):\n # https://github.com/dask/dask/issues/6473\n from sqlalchemy import create_engine\n\n df = pd.DataFrame([{\"i\": i, \"s\": str(i) * 2} for i in range(4)])\n ddf = dd.from_pandas(df, npartitions=2)\n\n with tmpfile() as f:\n db = \"sqlite:///%s\" % f\n engine = create_engine(db)\n with pytest.raises(ValueError, match=\"Expected URI to be a string\"):\n ddf.to_sql(\"test\", engine, if_exists=\"replace\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_meta_test_meta_no_head_rows.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_meta_test_meta_no_head_rows.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_sql.py", "file_name": "test_sql.py", "file_type": "text/x-python", "category": "test", "start_line": 249, "end_line": 285, "span_ids": ["test_meta_no_head_rows", "test_meta"], "tokens": 268}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_meta(db):\n data = read_sql_table(\n \"test\", db, index_col=\"number\", meta=dd.from_pandas(df, npartitions=1)\n ).compute()\n assert (data.name == df.name).all()\n assert data.index.name == \"number\"\n assert_eq(data, df)\n\n\ndef test_meta_no_head_rows(db):\n data = read_sql_table(\n \"test\",\n db,\n index_col=\"number\",\n meta=dd.from_pandas(df, npartitions=1),\n npartitions=2,\n head_rows=0,\n )\n assert len(data.divisions) == 3\n data = data.compute()\n assert (data.name == df.name).all()\n assert data.index.name == \"number\"\n assert_eq(data, df)\n\n data = read_sql_table(\n \"test\",\n db,\n index_col=\"number\",\n meta=dd.from_pandas(df, npartitions=1),\n divisions=[0, 3, 6],\n head_rows=0,\n )\n assert len(data.divisions) == 3\n data = data.compute()\n assert (data.name == df.name).all()\n assert data.index.name == \"number\"\n assert_eq(data, df)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_no_meta_no_head_rows_test_datetimes.with_tmpfile_as_f_.assert_eq_data_map_partit": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_no_meta_no_head_rows_test_datetimes.with_tmpfile_as_f_.assert_eq_data_map_partit", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_sql.py", "file_name": "test_sql.py", "file_type": "text/x-python", "category": "test", "start_line": 288, "end_line": 314, "span_ids": ["test_range", "test_no_meta_no_head_rows", "test_datetimes"], "tokens": 274}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_no_meta_no_head_rows(db):\n with pytest.raises(ValueError):\n read_sql_table(\"test\", db, index_col=\"number\", head_rows=0, npartitions=1)\n\n\ndef test_range(db):\n data = read_sql_table(\"test\", db, npartitions=2, index_col=\"number\", limits=[1, 4])\n assert data.index.min().compute() == 1\n assert data.index.max().compute() == 4\n\n\ndef test_datetimes():\n import datetime\n\n now = datetime.datetime.now()\n d = datetime.timedelta(seconds=1)\n df = pd.DataFrame(\n {\"a\": list(\"ghjkl\"), \"b\": [now + i * d for i in range(2, -3, -1)]}\n )\n with tmpfile() as f:\n uri = \"sqlite:///%s\" % f\n df.to_sql(\"test\", uri, index=False, if_exists=\"replace\")\n data = read_sql_table(\"test\", uri, npartitions=2, index_col=\"b\")\n assert data.index.dtype.kind == \"M\"\n assert data.divisions[0] == df.b.min()\n df2 = df.set_index(\"b\")\n assert_eq(data.map_partitions(lambda x: x.sort_index()), df2.sort_index())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_key_stringify_key_stringify.return.task": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_key_stringify_key_stringify.return.task", "embedding": null, "metadata": {"file_path": "dask/dataframe/shuffle.py", "file_name": "shuffle.py", "file_type": "text/x-python", "category": "implementation", "start_line": 29, "end_line": 48, "span_ids": ["key_stringify"], "tokens": 199}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def key_stringify(task):\n \"\"\"Convert all keys in `task` to strings.\n\n This is a fast version of distributed.utils.str_graph()\n that only handles keys of the from: `(\"a string\", ...)`\n \"\"\"\n from distributed.utils import tokey\n\n typ = type(task)\n if typ is tuple and task and callable(task[0]):\n return (task[0],) + tuple(key_stringify(x) for x in task[1:])\n if typ is list:\n return [key_stringify(v) for v in task]\n if typ is dict:\n return {k: key_stringify(v) for k, v in task.items()}\n if typ is tuple and task and type(task[0]) is str:\n return tokey(task)\n elif typ is tuple: # If the tuple itself isn't a key, check its elements\n return tuple(key_stringify(v) for v in task)\n return task", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_SimpleShuffleLayer_SimpleShuffleLayer.__dask_distributed_pack__.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_SimpleShuffleLayer_SimpleShuffleLayer.__dask_distributed_pack__.return._", "embedding": null, "metadata": {"file_path": "dask/dataframe/shuffle.py", "file_name": "shuffle.py", "file_type": "text/x-python", "category": "implementation", "start_line": 51, "end_line": 153, "span_ids": ["SimpleShuffleLayer._dict", "SimpleShuffleLayer.get_output_keys", "SimpleShuffleLayer.__iter__", "SimpleShuffleLayer.__getitem__", "SimpleShuffleLayer.__len__", "SimpleShuffleLayer.__dask_distributed_pack__", "SimpleShuffleLayer.__repr__", "SimpleShuffleLayer.__reduce__", "SimpleShuffleLayer", "SimpleShuffleLayer.__init__", "SimpleShuffleLayer.is_materialized"], "tokens": 674}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class SimpleShuffleLayer(Layer):\n \"\"\"Simple HighLevelGraph Shuffle layer\n\n High-level graph layer for a simple shuffle operation in which\n each output partition depends on all input partitions.\n\n Parameters\n ----------\n name : str\n Name of new shuffled output collection.\n column : str or list of str\n Column(s) to be used to map rows to output partitions (by hashing).\n npartitions : int\n Number of output partitions.\n npartitions_input : int\n Number of partitions in the original (un-shuffled) DataFrame.\n ignore_index: bool, default False\n Ignore index during shuffle. If ``True``, performance may improve,\n but index values will not be preserved.\n name_input : str\n Name of input collection.\n meta_input : pd.DataFrame-like object\n Empty metadata of input collection.\n parts_out : list of int (optional)\n List of required output-partition indices.\n \"\"\"\n\n def __init__(\n self,\n name,\n column,\n npartitions,\n npartitions_input,\n ignore_index,\n name_input,\n meta_input,\n parts_out=None,\n ):\n self.name = name\n self.column = column\n self.npartitions = npartitions\n self.npartitions_input = npartitions_input\n self.ignore_index = ignore_index\n self.name_input = name_input\n self.meta_input = meta_input\n self.parts_out = parts_out or range(npartitions)\n\n def get_output_keys(self):\n return {(self.name, part) for part in self.parts_out}\n\n def __repr__(self):\n return \"SimpleShuffleLayer\".format(\n self.name, self.npartitions\n )\n\n def is_materialized(self):\n return hasattr(self, \"_cached_dict\")\n\n @property\n def _dict(self):\n \"\"\"Materialize full dict representation\"\"\"\n if hasattr(self, \"_cached_dict\"):\n return self._cached_dict\n else:\n dsk = self._construct_graph()\n self._cached_dict = dsk\n return self._cached_dict\n\n def __getitem__(self, key):\n return self._dict[key]\n\n def __iter__(self):\n return iter(self._dict)\n\n def __len__(self):\n return len(self._dict)\n\n def __reduce__(self):\n attrs = [\n \"name\",\n \"column\",\n \"npartitions\",\n \"npartitions_input\",\n \"ignore_index\",\n \"name_input\",\n \"meta_input\",\n \"parts_out\",\n ]\n return (SimpleShuffleLayer, tuple(getattr(self, attr) for attr in attrs))\n\n def __dask_distributed_pack__(self):\n from distributed.protocol.serialize import to_serialize\n\n return {\n \"name\": self.name,\n \"column\": self.column,\n \"npartitions\": self.npartitions,\n \"npartitions_input\": self.npartitions_input,\n \"ignore_index\": self.ignore_index,\n \"name_input\": self.name_input,\n \"meta_input\": to_serialize(self.meta_input),\n \"parts_out\": list(self.parts_out),\n }", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_SimpleShuffleLayer.__dask_distributed_unpack___SimpleShuffleLayer.__dask_distributed_unpack__.dependencies_update_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_SimpleShuffleLayer.__dask_distributed_unpack___SimpleShuffleLayer.__dask_distributed_unpack__.dependencies_update_", "embedding": null, "metadata": {"file_path": "dask/dataframe/shuffle.py", "file_name": "shuffle.py", "file_type": "text/x-python", "category": "implementation", "start_line": 155, "end_line": 177, "span_ids": ["SimpleShuffleLayer.__dask_distributed_unpack__"], "tokens": 212}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class SimpleShuffleLayer(Layer):\n\n @classmethod\n def __dask_distributed_unpack__(cls, state, dsk, dependencies):\n from distributed.worker import dumps_task\n from distributed.utils import tokey\n\n # msgpack will convert lists into tuples, here\n # we convert them back to lists\n if isinstance(state[\"column\"], tuple):\n state[\"column\"] = list(state[\"column\"])\n if \"inputs\" in state:\n state[\"inputs\"] = list(state[\"inputs\"])\n\n # Materialize the layer\n raw = dict(cls(**state))\n\n # Convert all keys to strings and dump tasks\n raw = {tokey(k): key_stringify(v) for k, v in raw.items()}\n dsk.update(toolz.valmap(dumps_task, raw))\n\n # TODO: use shuffle-knowledge to calculate dependencies more efficiently\n dependencies.update(\n {k: keys_in_tasks(dsk, [v], as_list=True) for k, v in raw.items()}\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_SimpleShuffleLayer._keys_to_parts_SimpleShuffleLayer._cull_dependencies.return.deps": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_SimpleShuffleLayer._keys_to_parts_SimpleShuffleLayer._cull_dependencies.return.deps", "embedding": null, "metadata": {"file_path": "dask/dataframe/shuffle.py", "file_name": "shuffle.py", "file_type": "text/x-python", "category": "implementation", "start_line": 179, "end_line": 205, "span_ids": ["SimpleShuffleLayer._cull_dependencies", "SimpleShuffleLayer._keys_to_parts"], "tokens": 196}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class SimpleShuffleLayer(Layer):\n\n def _keys_to_parts(self, keys):\n \"\"\"Simple utility to convert keys to partition indices.\"\"\"\n parts = set()\n for key in keys:\n try:\n _name, _part = key\n except ValueError:\n continue\n if _name != self.name:\n continue\n parts.add(_part)\n return parts\n\n def _cull_dependencies(self, keys, parts_out=None):\n \"\"\"Determine the necessary dependencies to produce `keys`.\n\n For a simple shuffle, output partitions always depend on\n all input partitions. This method does not require graph\n materialization.\n \"\"\"\n deps = defaultdict(set)\n parts_out = parts_out or self._keys_to_parts(keys)\n for part in parts_out:\n deps[(self.name, part)] |= {\n (self.name_input, i) for i in range(self.npartitions_input)\n }\n return deps", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_SimpleShuffleLayer._cull_SimpleShuffleLayer.cull.if_parts_out_self_part.else_.return.self_culled_deps": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_SimpleShuffleLayer._cull_SimpleShuffleLayer.cull.if_parts_out_self_part.else_.return.self_culled_deps", "embedding": null, "metadata": {"file_path": "dask/dataframe/shuffle.py", "file_name": "shuffle.py", "file_type": "text/x-python", "category": "implementation", "start_line": 207, "end_line": 233, "span_ids": ["SimpleShuffleLayer._cull", "SimpleShuffleLayer.cull"], "tokens": 212}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class SimpleShuffleLayer(Layer):\n\n def _cull(self, parts_out):\n return SimpleShuffleLayer(\n self.name,\n self.column,\n self.npartitions,\n self.npartitions_input,\n self.ignore_index,\n self.name_input,\n self.meta_input,\n parts_out=parts_out,\n )\n\n def cull(self, keys, all_keys):\n \"\"\"Cull a SimpleShuffleLayer HighLevelGraph layer.\n\n The underlying graph will only include the necessary\n tasks to produce the keys (indicies) included in `parts_out`.\n Therefore, \"culling\" the layer only requires us to reset this\n parameter.\n \"\"\"\n parts_out = self._keys_to_parts(keys)\n culled_deps = self._cull_dependencies(keys, parts_out=parts_out)\n if parts_out != self.parts_out:\n culled_layer = self._cull(parts_out)\n return culled_layer, culled_deps\n else:\n return self, culled_deps", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_SimpleShuffleLayer._construct_graph_SimpleShuffleLayer._construct_graph.return.dsk": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_SimpleShuffleLayer._construct_graph_SimpleShuffleLayer._construct_graph.return.dsk", "embedding": null, "metadata": {"file_path": "dask/dataframe/shuffle.py", "file_name": "shuffle.py", "file_type": "text/x-python", "category": "implementation", "start_line": 235, "end_line": 266, "span_ids": ["SimpleShuffleLayer._construct_graph"], "tokens": 252}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class SimpleShuffleLayer(Layer):\n\n def _construct_graph(self):\n \"\"\"Construct graph for a simple shuffle operation.\"\"\"\n\n shuffle_group_name = \"group-\" + self.name\n shuffle_split_name = \"split-\" + self.name\n\n dsk = {}\n for part_out in self.parts_out:\n _concat_list = [\n (shuffle_split_name, part_out, part_in)\n for part_in in range(self.npartitions_input)\n ]\n dsk[(self.name, part_out)] = (_concat, _concat_list, self.ignore_index)\n for _, _part_out, _part_in in _concat_list:\n dsk[(shuffle_split_name, _part_out, _part_in)] = (\n getitem,\n (shuffle_group_name, _part_in),\n _part_out,\n )\n if (shuffle_group_name, _part_in) not in dsk:\n dsk[(shuffle_group_name, _part_in)] = (\n shuffle_group,\n (self.name_input, _part_in),\n self.column,\n 0,\n self.npartitions,\n self.npartitions,\n self.ignore_index,\n self.npartitions,\n )\n\n return dsk", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_ShuffleLayer_ShuffleLayer.__dask_distributed_pack__.return.ret": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_ShuffleLayer_ShuffleLayer.__dask_distributed_pack__.return.ret", "embedding": null, "metadata": {"file_path": "dask/dataframe/shuffle.py", "file_name": "shuffle.py", "file_type": "text/x-python", "category": "implementation", "start_line": 269, "end_line": 356, "span_ids": ["ShuffleLayer.__init__", "ShuffleLayer", "ShuffleLayer.__repr__", "ShuffleLayer.__dask_distributed_pack__", "ShuffleLayer.__reduce__"], "tokens": 614}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ShuffleLayer(SimpleShuffleLayer):\n \"\"\"Shuffle-stage HighLevelGraph layer\n\n High-level graph layer corresponding to a single stage of\n a multi-stage inter-partition shuffle operation.\n\n Stage: (shuffle-group) -> (shuffle-split) -> (shuffle-join)\n\n Parameters\n ----------\n name : str\n Name of new (partially) shuffled collection.\n column : str or list of str\n Column(s) to be used to map rows to output partitions (by hashing).\n inputs : list of tuples\n Each tuple dictates the data movement for a specific partition.\n stage : int\n Index of the current shuffle stage.\n npartitions : int\n Number of output partitions for the full (multi-stage) shuffle.\n npartitions_input : int\n Number of partitions in the original (un-shuffled) DataFrame.\n k : int\n A partition is split into this many groups during each stage.\n ignore_index: bool, default False\n Ignore index during shuffle. If ``True``, performance may improve,\n but index values will not be preserved.\n name_input : str\n Name of input collection.\n meta_input : pd.DataFrame-like object\n Empty metadata of input collection.\n parts_out : list of int (optional)\n List of required output-partition indices.\n \"\"\"\n\n def __init__(\n self,\n name,\n column,\n inputs,\n stage,\n npartitions,\n npartitions_input,\n nsplits,\n ignore_index,\n name_input,\n meta_input,\n parts_out=None,\n ):\n self.column = column\n self.name = name\n self.inputs = inputs\n self.stage = stage\n self.npartitions = npartitions\n self.npartitions_input = npartitions_input\n self.nsplits = nsplits\n self.ignore_index = ignore_index\n self.name_input = name_input\n self.meta_input = meta_input\n self.parts_out = parts_out or range(len(inputs))\n\n def __repr__(self):\n return \"ShuffleLayer\".format(\n self.name, self.stage, self.nsplits, self.npartitions\n )\n\n def __reduce__(self):\n attrs = [\n \"name\",\n \"column\",\n \"inputs\",\n \"stage\",\n \"npartitions\",\n \"npartitions_input\",\n \"nsplits\",\n \"ignore_index\",\n \"name_input\",\n \"meta_input\",\n \"parts_out\",\n ]\n return (ShuffleLayer, tuple(getattr(self, attr) for attr in attrs))\n\n def __dask_distributed_pack__(self):\n ret = super().__dask_distributed_pack__()\n ret[\"inputs\"] = self.inputs\n ret[\"stage\"] = self.stage\n ret[\"nsplits\"] = self.nsplits\n return ret", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_ShuffleLayer._cull_dependencies_ShuffleLayer._cull.return.ShuffleLayer_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_ShuffleLayer._cull_dependencies_ShuffleLayer._cull.return.ShuffleLayer_", "embedding": null, "metadata": {"file_path": "dask/dataframe/shuffle.py", "file_name": "shuffle.py", "file_type": "text/x-python", "category": "implementation", "start_line": 358, "end_line": 386, "span_ids": ["ShuffleLayer._cull_dependencies", "ShuffleLayer._cull"], "tokens": 217}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ShuffleLayer(SimpleShuffleLayer):\n\n def _cull_dependencies(self, keys, parts_out=None):\n \"\"\"Determine the necessary dependencies to produce `keys`.\n\n Does not require graph materialization.\n \"\"\"\n deps = defaultdict(set)\n parts_out = parts_out or self._keys_to_parts(keys)\n inp_part_map = {inp: i for i, inp in enumerate(self.inputs)}\n for part in parts_out:\n out = self.inputs[part]\n for k in range(self.nsplits):\n _part = inp_part_map[insert(out, self.stage, k)]\n deps[(self.name, part)].add((self.name_input, _part))\n return deps\n\n def _cull(self, parts_out):\n return ShuffleLayer(\n self.name,\n self.column,\n self.inputs,\n self.stage,\n self.npartitions,\n self.npartitions_input,\n self.nsplits,\n self.ignore_index,\n self.name_input,\n self.meta_input,\n parts_out=parts_out,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_ShuffleLayer._construct_graph_ShuffleLayer._construct_graph.return.dsk": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_ShuffleLayer._construct_graph_ShuffleLayer._construct_graph.return.dsk", "embedding": null, "metadata": {"file_path": "dask/dataframe/shuffle.py", "file_name": "shuffle.py", "file_type": "text/x-python", "category": "implementation", "start_line": 388, "end_line": 444, "span_ids": ["ShuffleLayer._construct_graph"], "tokens": 454}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ShuffleLayer(SimpleShuffleLayer):\n\n def _construct_graph(self):\n \"\"\"Construct graph for a \"rearrange-by-column\" stage.\"\"\"\n\n shuffle_group_name = \"group-\" + self.name\n shuffle_split_name = \"split-\" + self.name\n\n dsk = {}\n inp_part_map = {inp: i for i, inp in enumerate(self.inputs)}\n for part in self.parts_out:\n\n out = self.inputs[part]\n\n _concat_list = [] # get_item tasks to concat for this output partition\n for i in range(self.nsplits):\n # Get out each individual dataframe piece from the dicts\n _inp = insert(out, self.stage, i)\n _idx = out[self.stage]\n _concat_list.append((shuffle_split_name, _idx, _inp))\n\n # concatenate those pieces together, with their friends\n dsk[(self.name, part)] = (_concat, _concat_list, self.ignore_index)\n\n for _, _idx, _inp in _concat_list:\n dsk[(shuffle_split_name, _idx, _inp)] = (\n getitem,\n (shuffle_group_name, _inp),\n _idx,\n )\n\n if (shuffle_group_name, _inp) not in dsk:\n\n # Initial partitions (output of previous stage)\n _part = inp_part_map[_inp]\n if self.stage == 0:\n if _part < self.npartitions_input:\n input_key = (self.name_input, _part)\n else:\n # In order to make sure that to_serialize() serialize the\n # empty dataframe input, we add it as a key.\n input_key = (shuffle_group_name, _inp, \"empty\")\n dsk[input_key] = self.meta_input\n else:\n input_key = (self.name_input, _part)\n\n # Convert partition into dict of dataframe pieces\n dsk[(shuffle_group_name, _inp)] = (\n shuffle_group,\n input_key,\n self.column,\n self.stage,\n self.nsplits,\n self.npartitions_input,\n self.ignore_index,\n self.npartitions,\n )\n\n return dsk", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_rearrange_by_column_disk__noop.return.x": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_rearrange_by_column_disk__noop.return.x", "embedding": null, "metadata": {"file_path": "dask/dataframe/shuffle.py", "file_name": "shuffle.py", "file_type": "text/x-python", "category": "implementation", "start_line": 826, "end_line": 891, "span_ids": ["rearrange_by_column_disk", "_noop"], "tokens": 638}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def rearrange_by_column_disk(df, column, npartitions=None, compute=False):\n \"\"\"Shuffle using local disk\n\n See Also\n --------\n rearrange_by_column_tasks:\n Same function, but using tasks rather than partd\n Has a more informative docstring\n \"\"\"\n if npartitions is None:\n npartitions = df.npartitions\n\n token = tokenize(df, column, npartitions)\n always_new_token = uuid.uuid1().hex\n\n p = (\"zpartd-\" + always_new_token,)\n dsk1 = {p: (maybe_buffered_partd(),)}\n\n # Partition data on disk\n name = \"shuffle-partition-\" + always_new_token\n dsk2 = {\n (name, i): (shuffle_group_3, key, column, npartitions, p)\n for i, key in enumerate(df.__dask_keys__())\n }\n\n dependencies = []\n layer = {}\n if compute:\n graph = HighLevelGraph.merge(df.dask, dsk1, dsk2)\n keys = [p, sorted(dsk2)]\n pp, values = compute_as_if_collection(DataFrame, graph, keys)\n dsk1 = {p: pp}\n dsk2 = dict(zip(sorted(dsk2), values))\n else:\n dependencies.append(df)\n\n # Barrier\n barrier_token = \"barrier-\" + always_new_token\n dsk3 = {barrier_token: (barrier, list(dsk2))}\n\n # Collect groups\n name1 = \"shuffle-collect-1\" + token\n dsk4 = {\n (name1, i): (collect, p, i, df._meta, barrier_token) for i in range(npartitions)\n }\n cleanup_token = \"cleanup-\" + always_new_token\n barrier_token2 = \"barrier2-\" + always_new_token\n # A task that depends on `cleanup-`, but has a small output\n dsk5 = {(barrier_token2, i): (barrier, part) for i, part in enumerate(dsk4)}\n # This indirectly depends on `cleanup-` and so runs after we're done using the disk\n dsk6 = {cleanup_token: (cleanup_partd_files, p, list(dsk5))}\n\n name = \"shuffle-collect-2\" + token\n dsk7 = {(name, i): (_noop, (name1, i), cleanup_token) for i in range(npartitions)}\n divisions = (None,) * (npartitions + 1)\n\n layer = toolz.merge(dsk1, dsk2, dsk3, dsk4, dsk5, dsk6, dsk7)\n graph = HighLevelGraph.from_collections(name, layer, dependencies=dependencies)\n return DataFrame(graph, name, df._meta, divisions)\n\n\ndef _noop(x, cleanup_token):\n \"\"\"\n A task that does nothing.\n \"\"\"\n return x", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_rearrange_by_column_tasks_rearrange_by_column_tasks.max_branch.max_branch_or_32": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_rearrange_by_column_tasks_rearrange_by_column_tasks.max_branch.max_branch_or_32", "embedding": null, "metadata": {"file_path": "dask/dataframe/shuffle.py", "file_name": "shuffle.py", "file_type": "text/x-python", "category": "implementation", "start_line": 894, "end_line": 953, "span_ids": ["rearrange_by_column_tasks"], "tokens": 703}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def rearrange_by_column_tasks(\n df, column, max_branch=32, npartitions=None, ignore_index=False\n):\n \"\"\"Order divisions of DataFrame so that all values within column(s) align\n\n This enacts a task-based shuffle. It contains most of the tricky logic\n around the complex network of tasks. Typically before this function is\n called a new column, ``\"_partitions\"`` has been added to the dataframe,\n containing the output partition number of every row. This function\n produces a new dataframe where every row is in the proper partition. It\n accomplishes this by splitting each input partition into several pieces,\n and then concatenating pieces from different input partitions into output\n partitions. If there are enough partitions then it does this work in\n stages to avoid scheduling overhead.\n\n Lets explain the motivation for this further. Imagine that we have 1000\n input partitions and 1000 output partitions. In theory we could split each\n input into 1000 pieces, and then move the 1 000 000 resulting pieces\n around, and then concatenate them all into 1000 output groups. This would\n be fine, but the central scheduling overhead of 1 000 000 tasks would\n become a bottleneck. Instead we do this in stages so that we split each of\n the 1000 inputs into 30 pieces (we now have 30 000 pieces) move those\n around, concatenate back down to 1000, and then do the same process again.\n This has the same result as the full transfer, but now we've moved data\n twice (expensive) but done so with only 60 000 tasks (cheap).\n\n Note that the `column` input may correspond to a list of columns (rather\n than just a single column name). In this case, the `shuffle_group` and\n `shuffle_group_2` functions will use hashing to map each row to an output\n partition. This approach may require the same rows to be hased multiple\n times, but avoids the need to assign a new \"_partitions\" column.\n\n Parameters\n ----------\n df: dask.dataframe.DataFrame\n column: str or list\n A column name on which we want to split, commonly ``\"_partitions\"``\n which is assigned by functions upstream. This could also be a list of\n columns (in which case shuffle_group will create a hash array/column).\n max_branch: int\n The maximum number of splits per input partition. Defaults to 32.\n If there are more partitions than this then the shuffling will occur in\n stages in order to avoid creating npartitions**2 tasks\n Increasing this number increases scheduling overhead but decreases the\n number of full-dataset transfers that we have to make.\n npartitions: Optional[int]\n The desired number of output partitions\n\n Returns\n -------\n df3: dask.dataframe.DataFrame\n\n See also\n --------\n rearrange_by_column_disk: same operation, but uses partd\n rearrange_by_column: parent function that calls this or rearrange_by_column_disk\n shuffle_group: does the actual splitting per-partition\n \"\"\"\n\n max_branch = max_branch or 32\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_rearrange_by_column_tasks.if_npartitions_or_df_npa_rearrange_by_column_tasks.return.df2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_rearrange_by_column_tasks.if_npartitions_or_df_npa_rearrange_by_column_tasks.return.df2", "embedding": null, "metadata": {"file_path": "dask/dataframe/shuffle.py", "file_name": "shuffle.py", "file_type": "text/x-python", "category": "implementation", "start_line": 955, "end_line": 1040, "span_ids": ["rearrange_by_column_tasks"], "tokens": 682}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def rearrange_by_column_tasks(\n df, column, max_branch=32, npartitions=None, ignore_index=False\n):\n # ... other code\n\n if (npartitions or df.npartitions) <= max_branch:\n # We are creating a small number of output partitions.\n # No need for staged shuffling. Staged shuffling will\n # sometimes require extra work/communication in this case.\n token = tokenize(df, column, npartitions)\n shuffle_name = f\"simple-shuffle-{token}\"\n npartitions = npartitions or df.npartitions\n shuffle_layer = SimpleShuffleLayer(\n shuffle_name,\n column,\n npartitions,\n df.npartitions,\n ignore_index,\n df._name,\n df._meta,\n )\n graph = HighLevelGraph.from_collections(\n shuffle_name, shuffle_layer, dependencies=[df]\n )\n return new_dd_object(graph, shuffle_name, df._meta, [None] * (npartitions + 1))\n\n n = df.npartitions\n stages = int(math.ceil(math.log(n) / math.log(max_branch)))\n if stages > 1:\n k = int(math.ceil(n ** (1 / stages)))\n else:\n k = n\n\n inputs = [tuple(digit(i, j, k) for j in range(stages)) for i in range(k ** stages)]\n\n npartitions_orig = df.npartitions\n token = tokenize(df, stages, column, n, k)\n for stage in range(stages):\n stage_name = f\"shuffle-{stage}-{token}\"\n stage_layer = ShuffleLayer(\n stage_name,\n column,\n inputs,\n stage,\n npartitions,\n n,\n k,\n ignore_index,\n df._name,\n df._meta,\n )\n graph = HighLevelGraph.from_collections(\n stage_name, stage_layer, dependencies=[df]\n )\n df = new_dd_object(graph, stage_name, df._meta, df.divisions)\n\n if npartitions is not None and npartitions != npartitions_orig:\n token = tokenize(df, npartitions)\n repartition_group_token = \"repartition-group-\" + token\n\n dsk = {\n (repartition_group_token, i): (\n shuffle_group_2,\n k,\n column,\n ignore_index,\n npartitions,\n )\n for i, k in enumerate(df.__dask_keys__())\n }\n\n repartition_get_name = \"repartition-get-\" + token\n\n for p in range(npartitions):\n dsk[(repartition_get_name, p)] = (\n shuffle_group_get,\n (repartition_group_token, p % npartitions_orig),\n p,\n )\n\n graph2 = HighLevelGraph.from_collections(\n repartition_get_name, dsk, dependencies=[df]\n )\n df2 = new_dd_object(\n graph2, repartition_get_name, df._meta, [None] * (npartitions + 1)\n )\n else:\n df2 = df\n df2.divisions = (None,) * (npartitions_orig + 1)\n\n return df2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_fuse_roots_test_fuse_roots.hlg_validate_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_fuse_roots_test_fuse_roots.hlg_validate_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 4421, "end_line": 4431, "span_ids": ["test_fuse_roots"], "tokens": 179}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fuse_roots():\n pdf1 = pd.DataFrame(\n {\"a\": [1, 2, 3, 4, 5, 6, 7, 8, 9], \"b\": [3, 5, 2, 5, 7, 2, 4, 2, 4]}\n )\n ddf1 = dd.from_pandas(pdf1, 2)\n pdf2 = pd.DataFrame({\"a\": [True, False, True] * 3, \"b\": [False, False, True] * 3})\n ddf2 = dd.from_pandas(pdf2, 2)\n\n res = ddf1.where(ddf2)\n hlg = fuse_roots(res.__dask_graph__(), keys=res.__dask_keys__())\n hlg.validate()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_attrs_dataframe_test_attrs_series.assert_s_fillna_1_attrs_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_attrs_dataframe_test_attrs_series.assert_s_fillna_1_attrs_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 4434, "end_line": 4451, "span_ids": ["test_attrs_series", "test_attrs_dataframe"], "tokens": 208}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(not dd._compat.PANDAS_GT_100, reason=\"attrs introduced in 1.0.0\")\ndef test_attrs_dataframe():\n df = pd.DataFrame({\"A\": [1, 2], \"B\": [3, 4], \"C\": [5, 6]})\n df.attrs = {\"date\": \"2020-10-16\"}\n ddf = dd.from_pandas(df, 2)\n\n assert df.attrs == ddf.attrs\n assert df.abs().attrs == ddf.abs().attrs\n\n\n@pytest.mark.skipif(not dd._compat.PANDAS_GT_100, reason=\"attrs introduced in 1.0.0\")\ndef test_attrs_series():\n s = pd.Series([1, 2], name=\"A\")\n s.attrs[\"unit\"] = \"kg\"\n ds = dd.from_pandas(s, 2)\n\n assert s.attrs == ds.attrs\n assert s.fillna(1).attrs == ds.fillna(1).attrs", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_attrs_series_in_dataframes_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_attrs_series_in_dataframes_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 4454, "end_line": 4466, "span_ids": ["test_attrs_series_in_dataframes"], "tokens": 190}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(not dd._compat.PANDAS_GT_100, reason=\"attrs introduced in 1.0.0\")\n@pytest.mark.xfail(reason=\"df.iloc[:0] does not keep the series attrs\")\ndef test_attrs_series_in_dataframes():\n df = pd.DataFrame({\"A\": [1, 2], \"B\": [3, 4], \"C\": [5, 6]})\n df.A.attrs[\"unit\"] = \"kg\"\n ddf = dd.from_pandas(df, 2)\n\n # Fails because the pandas iloc method doesn't currently persist\n # the attrs dict for series in a dataframee. Dask uses df.iloc[:0]\n # when creating the _meta dataframe in make_meta_pandas(x, index=None).\n # Should start xpassing when df.iloc works. Remove the xfail then.\n assert df.A.attrs == ddf.A.attrs", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_groupby_concat_cudf_test_groupby_concat_cudf.assert_eq_res_dd_compute_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_groupby_concat_cudf_test_groupby_concat_cudf.assert_eq_res_dd_compute_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 2098, "end_line": 2143, "span_ids": ["test_groupby_concat_cudf"], "tokens": 415}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"engine\", [\"pandas\", \"cudf\"])\ndef test_groupby_concat_cudf(engine):\n\n # NOTE: Issue #5643 Reproducer\n\n size = 6\n npartitions = 3\n d1 = pd.DataFrame(\n {\n \"a\": np.random.permutation(np.arange(size)),\n \"b\": np.random.randint(100, size=size),\n }\n )\n d2 = pd.DataFrame(\n {\n \"c\": np.random.permutation(np.arange(size)),\n \"d\": np.random.randint(100, size=size),\n }\n )\n\n if engine == \"cudf\":\n # NOTE: engine == \"cudf\" requires cudf/dask_cudf,\n # will be skipped by non-GPU CI.\n\n cudf = pytest.importorskip(\"cudf\")\n dask_cudf = pytest.importorskip(\"dask_cudf\")\n\n d1 = cudf.from_pandas(d1)\n d2 = cudf.from_pandas(d2)\n dd1 = dask_cudf.from_cudf(d1, npartitions)\n dd2 = dask_cudf.from_cudf(d2, npartitions)\n else:\n dd1 = dd.from_pandas(d1, npartitions)\n dd2 = dd.from_pandas(d2, npartitions)\n\n grouped_d1 = d1.groupby([\"a\"]).sum()\n grouped_d2 = d2.groupby([\"c\"]).sum()\n res = concat([grouped_d1, grouped_d2], axis=1)\n\n grouped_dd1 = dd1.groupby([\"a\"]).sum()\n grouped_dd2 = dd2.groupby([\"c\"]).sum()\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", UserWarning)\n res_dd = dd.concat([grouped_dd1, grouped_dd2], axis=1)\n\n assert_eq(res_dd.compute().sort_index(), res.sort_index())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_categorical_join_test_categorical_join.if_dd__compat_PANDAS_GT_1.else_.assert_expected_values_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_categorical_join_test_categorical_join.if_dd__compat_PANDAS_GT_1.else_.assert_expected_values_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 2146, "end_line": 2172, "span_ids": ["test_categorical_join"], "tokens": 282}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_categorical_join():\n # https://github.com/dask/dask/issues/6134\n df = pd.DataFrame(\n {\n \"join_col\": [\"a\", \"a\", \"b\", \"b\"],\n \"a\": [0, 0, 10, 10],\n }\n )\n df2 = pd.DataFrame({\"b\": [1, 2, 1, 2]}, index=[\"a\", \"a\", \"b\", \"b\"])\n\n ddf = dd.from_pandas(df, npartitions=2)\n ddf2 = dd.from_pandas(df2, npartitions=1)\n ddf[\"join_col\"] = ddf[\"join_col\"].astype(\"category\")\n ddf2.index = ddf2.index.astype(\"category\")\n\n expected = ddf.compute().join(ddf2.compute(), on=\"join_col\", how=\"left\")\n\n actual_dask = ddf.join(ddf2, on=\"join_col\", how=\"left\")\n assert actual_dask.join_col.dtype == \"category\"\n\n actual = actual_dask.compute()\n if dd._compat.PANDAS_GT_100:\n assert actual.join_col.dtype == \"category\"\n assert assert_eq(expected, actual)\n else:\n assert actual.join_col.dtype == \"object\"\n assert (expected.values == actual.values).all()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_categorical_merge_with_columns_missing_from_left_test_categorical_merge_with_columns_missing_from_left.assert_assert_eq_expected": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_categorical_merge_with_columns_missing_from_left_test_categorical_merge_with_columns_missing_from_left.assert_assert_eq_expected", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 2175, "end_line": 2189, "span_ids": ["test_categorical_merge_with_columns_missing_from_left"], "tokens": 183}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_categorical_merge_with_columns_missing_from_left():\n df1 = pd.DataFrame({\"A\": [0, 1], \"B\": pd.Categorical([\"a\", \"b\"])})\n df2 = pd.DataFrame({\"C\": pd.Categorical([\"a\", \"b\"])})\n\n expected = pd.merge(df2, df1, left_index=True, right_on=\"A\")\n\n ddf1 = dd.from_pandas(df1, npartitions=2)\n ddf2 = dd.from_pandas(df2, npartitions=2)\n\n actual = dd.merge(ddf2, ddf1, left_index=True, right_on=\"A\").compute()\n assert actual.C.dtype == \"category\"\n assert actual.B.dtype == \"category\"\n assert actual.A.dtype == \"int64\"\n assert actual.index.dtype == \"int64\"\n assert assert_eq(expected, actual)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_categorical_merge_with_merge_column_cat_in_one_and_not_other_upcasts_test_categorical_merge_with_merge_column_cat_in_one_and_not_other_upcasts.assert_assert_eq_expected": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_categorical_merge_with_merge_column_cat_in_one_and_not_other_upcasts_test_categorical_merge_with_merge_column_cat_in_one_and_not_other_upcasts.assert_assert_eq_expected", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 2192, "end_line": 2207, "span_ids": ["test_categorical_merge_with_merge_column_cat_in_one_and_not_other_upcasts"], "tokens": 215}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(not dd._compat.PANDAS_GT_0250, reason=\"Changes in categoricals\")\ndef test_categorical_merge_with_merge_column_cat_in_one_and_not_other_upcasts():\n df1 = pd.DataFrame({\"A\": pd.Categorical([0, 1]), \"B\": pd.Categorical([\"a\", \"b\"])})\n df2 = pd.DataFrame({\"C\": pd.Categorical([\"a\", \"b\"])})\n\n expected = pd.merge(df2, df1, left_index=True, right_on=\"A\")\n\n ddf1 = dd.from_pandas(df1, npartitions=2)\n ddf2 = dd.from_pandas(df2, npartitions=2)\n\n actual = dd.merge(ddf2, ddf1, left_index=True, right_on=\"A\").compute()\n assert actual.C.dtype == \"category\"\n assert actual.B.dtype == \"category\"\n assert actual.A.dtype == \"int64\"\n assert actual.index.dtype == \"int64\"\n assert assert_eq(expected, actual)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_categorical_merge_retains_category_dtype_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_categorical_merge_retains_category_dtype_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 2210, "end_line": 2227, "span_ids": ["test_categorical_merge_retains_category_dtype"], "tokens": 204}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_categorical_merge_retains_category_dtype():\n # https://github.com/dask/dask/issues/6142\n a = pd.DataFrame({\"A\": [0, 1, 2, 3], \"B\": [4, 5, 6, 7]})\n b = pd.DataFrame({\"A\": [0, 1, 2, 4], \"C\": [4, 5, 7, 7]})\n\n df1 = dd.from_pandas(a, 2)\n df1[\"A\"] = df1.A.astype(\"category\")\n\n df2 = dd.from_pandas(b, 2)\n df2[\"A\"] = df2.A.astype(\"category\")\n\n actual_dask = df1.merge(df2, on=\"A\")\n assert actual_dask.A.dtype == \"category\"\n\n if dd._compat.PANDAS_GT_100:\n actual = actual_dask.compute()\n assert actual.A.dtype == \"category\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_dataframe_shuffle_on_tasks_api_test_set_index_overlap.assert_eq_a_b_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_dataframe_shuffle_on_tasks_api_test_set_index_overlap.assert_eq_a_b_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 971, "end_line": 1010, "span_ids": ["test_dataframe_shuffle_on_tasks_api", "test_set_index_overlap"], "tokens": 401}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"ignore_index\", [None, True, False])\n@pytest.mark.parametrize(\n \"on\", [\"id\", \"name\", [\"id\", \"name\"], pd.Series([\"id\", \"name\"])]\n)\n@pytest.mark.parametrize(\"max_branch\", [None, 4])\ndef test_dataframe_shuffle_on_tasks_api(on, ignore_index, max_branch):\n # Make sure DataFrame.shuffle API returns the same result\n # whether the ``on`` argument is a list of column names,\n # or a separate DataFrame with equivalent values...\n df_in = dask.datasets.timeseries(\n \"2000\",\n \"2001\",\n types={\"value\": float, \"name\": str, \"id\": int},\n freq=\"2H\",\n partition_freq=\"1M\",\n seed=1,\n )\n if isinstance(on, str):\n ext_on = df_in[[on]].copy()\n else:\n ext_on = df_in[on].copy()\n df_out_1 = df_in.shuffle(\n on, shuffle=\"tasks\", ignore_index=ignore_index, max_branch=max_branch\n )\n df_out_2 = df_in.shuffle(ext_on, shuffle=\"tasks\", ignore_index=ignore_index)\n\n assert_eq(df_out_1, df_out_2, check_index=(not ignore_index))\n\n if ignore_index:\n assert df_out_1.index.dtype != df_in.index.dtype\n else:\n assert df_out_1.index.dtype == df_in.index.dtype\n\n\ndef test_set_index_overlap():\n A = pd.DataFrame({\"key\": [1, 2, 3, 4, 4, 5, 6, 7], \"value\": list(\"abcd\" * 2)})\n a = dd.from_pandas(A, npartitions=2)\n a = a.set_index(\"key\", sorted=True)\n b = a.repartition(divisions=a.divisions)\n assert_eq(a, b)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_shuffle_hlg_layer_test_shuffle_hlg_layer.assert_dsk_dict_culled_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_shuffle_hlg_layer_test_shuffle_hlg_layer.assert_dsk_dict_culled_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 1013, "end_line": 1049, "span_ids": ["test_shuffle_hlg_layer"], "tokens": 368}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_shuffle_hlg_layer():\n # This test checks that the `ShuffleLayer` HLG Layer\n # is used (as expected) for a multi-stage shuffle.\n ddf = dd.from_pandas(\n pd.DataFrame({\"a\": np.random.randint(0, 10, 100)}), npartitions=10\n )\n ddf_shuffled = ddf.shuffle(\"a\", max_branch=3, shuffle=\"tasks\")\n keys = [(ddf_shuffled._name, i) for i in range(ddf_shuffled.npartitions)]\n\n # Cull the HLG\n dsk = ddf_shuffled.__dask_graph__()\n dsk_culled = dsk.cull(set(keys))\n assert isinstance(dsk_culled, dask.highlevelgraph.HighLevelGraph)\n\n # Ensure we have ShuffleLayers\n assert any(\n isinstance(layer, dd.shuffle.ShuffleLayer) for layer in dsk.layers.values()\n )\n\n # Check that the ShuffleLayers are non-materialized\n for layer in dsk.layers.values():\n if isinstance(layer, dd.shuffle.ShuffleLayer):\n assert not hasattr(layer, \"_cached_dict\")\n\n # Make sure HLG culling reduces the graph size\n assert len(dsk_culled) < len(dsk)\n\n # Check ShuffleLayer names\n for name, layer in dsk.layers.items():\n if isinstance(layer, dd.shuffle.ShuffleLayer):\n assert name.startswith(\"shuffle-\")\n\n # Since we already culled the HLG,\n # culling the dictionary should not change the graph\n dsk_dict = dict(dsk_culled)\n dsk_dict_culled, _ = cull(dsk_dict, keys)\n assert dsk_dict_culled == dsk_dict", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_shuffle_hlg_layer_serialize_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_shuffle_hlg_layer_serialize_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 1052, "end_line": 1076, "span_ids": ["test_shuffle_hlg_layer_serialize"], "tokens": 223}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"npartitions\",\n [\n 10, # ShuffleLayer\n 1, # SimpleShuffleLayer\n ],\n)\ndef test_shuffle_hlg_layer_serialize(npartitions):\n ddf = dd.from_pandas(\n pd.DataFrame({\"a\": np.random.randint(0, 10, 100)}), npartitions=npartitions\n )\n ddf_shuffled = ddf.shuffle(\"a\", max_branch=3, shuffle=\"tasks\")\n\n # Ensure shuffle layers can be serialized and don't result in\n # the underlying low-level graph being materialized\n dsk = ddf_shuffled.__dask_graph__()\n for layer in dsk.layers.values():\n if not isinstance(layer, dd.shuffle.SimpleShuffleLayer):\n continue\n assert not hasattr(layer, \"_cached_dict\")\n layer_roundtrip = pickle.loads(pickle.dumps(layer))\n assert type(layer_roundtrip) == type(layer)\n assert not hasattr(layer_roundtrip, \"_cached_dict\")\n assert layer_roundtrip.keys() == layer.keys()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_abc_compute_layer_dependencies.return.ret": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_abc_compute_layer_dependencies.return.ret", "embedding": null, "metadata": {"file_path": "dask/highlevelgraph.py", "file_name": "highlevelgraph.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 38, "span_ids": ["imports", "compute_layer_dependencies"], "tokens": 219}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import abc\nimport collections.abc\nfrom typing import (\n Any,\n Callable,\n Dict,\n Hashable,\n Optional,\n Set,\n Mapping,\n Iterable,\n Tuple,\n)\nimport copy\n\nimport tlz as toolz\n\nfrom .utils import ignoring\nfrom .base import is_dask_collection\nfrom .core import reverse_dict, keys_in_tasks\nfrom .utils_test import add, inc # noqa: F401\n\n\ndef compute_layer_dependencies(layers):\n \"\"\"Returns the dependencies between layers\"\"\"\n\n def _find_layer_containing_key(key):\n for k, v in layers.items():\n if key in v:\n return k\n raise RuntimeError(f\"{repr(key)} not found\")\n\n all_keys = set(key for layer in layers.values() for key in layer)\n ret = {k: set() for k in layers.keys()}\n for k, v in layers.items():\n for key in keys_in_tasks(all_keys.difference(v.keys()), v.values()):\n ret[k].add(_find_layer_containing_key(key))\n return ret", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_Layer_Layer.get_output_keys.return.self_keys_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_Layer_Layer.get_output_keys.return.self_keys_", "embedding": null, "metadata": {"file_path": "dask/highlevelgraph.py", "file_name": "highlevelgraph.py", "file_type": "text/x-python", "category": "implementation", "start_line": 41, "end_line": 76, "span_ids": ["Layer", "Layer.is_materialized", "Layer.get_output_keys"], "tokens": 243}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Layer(collections.abc.Mapping):\n \"\"\"High level graph layer\n\n This abstract class establish a protocol for high level graph layers.\n\n The main motivation of a layer is to represent a collection of tasks\n symbolically in order to speedup a series of operations significantly.\n Ideally, a layer should stay in this symbolic state until execution\n but in practice some operations will force the layer to generate all\n its internal tasks. We say that the layer has been materialized.\n\n Most of the default implementations in this class will materialize the\n layer. It is up to derived classes to implement non-materializing\n implementations.\n \"\"\"\n\n @abc.abstractmethod\n def is_materialized(self) -> bool:\n \"\"\"Return whether the layer is materialized or not\"\"\"\n return True\n\n def get_output_keys(self) -> Set:\n \"\"\"Return a set of all output keys\n\n Output keys are all keys in the layer that might be referenced by\n other layers.\n\n An layer overriding this implementation, should not materialize the\n layer.\n\n Returns\n -------\n keys: Set\n All output keys\n \"\"\"\n return self.keys()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_Layer.cull_Layer.cull.return.BasicLayer_out_ret_deps": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_Layer.cull_Layer.cull.return.BasicLayer_out_ret_deps", "embedding": null, "metadata": {"file_path": "dask/highlevelgraph.py", "file_name": "highlevelgraph.py", "file_type": "text/x-python", "category": "implementation", "start_line": 78, "end_line": 120, "span_ids": ["Layer.cull"], "tokens": 328}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Layer(collections.abc.Mapping):\n\n def cull(\n self, keys: Set, all_hlg_keys: Iterable\n ) -> Tuple[\"Layer\", Mapping[Hashable, Set]]:\n \"\"\"Return a new Layer with only the tasks required to calculate `keys` and\n a map of external key dependencies.\n\n In other words, remove unnecessary tasks from the layer.\n\n Examples\n --------\n >>> d = Layer({'x': 1, 'y': (inc, 'x'), 'out': (add, 'x', 10)}) # doctest: +SKIP\n >>> d.cull({'out'}) # doctest: +SKIP\n {'x': 1, 'out': (add, 'x', 10)}\n\n Returns\n -------\n layer: Layer\n Culled layer\n deps: Map\n Map of external key dependencies\n \"\"\"\n\n if len(keys) == len(self):\n # Nothing to cull if preserving all existing keys\n return self, {\n k: self.get_dependencies(k, all_hlg_keys) for k in self.keys()\n }\n\n ret_deps = {}\n seen = set()\n out = {}\n work = keys.copy()\n while work:\n k = work.pop()\n out[k] = self[k]\n ret_deps[k] = self.get_dependencies(k, all_hlg_keys)\n for d in ret_deps[k]:\n if d not in seen:\n if d in self:\n seen.add(d)\n work.add(d)\n\n return BasicLayer(out), ret_deps", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_Layer.get_dependencies_Layer.map_tasks.return.BasicLayer_k_func_v_fo": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_Layer.get_dependencies_Layer.map_tasks.return.BasicLayer_k_func_v_fo", "embedding": null, "metadata": {"file_path": "dask/highlevelgraph.py", "file_name": "highlevelgraph.py", "file_type": "text/x-python", "category": "implementation", "start_line": 122, "end_line": 161, "span_ids": ["Layer.get_dependencies", "Layer.map_tasks"], "tokens": 274}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Layer(collections.abc.Mapping):\n\n def get_dependencies(self, key: Hashable, all_hlg_keys: Iterable) -> Set:\n \"\"\"Get dependencies of `key` in the layer\n\n Parameters\n ----------\n key: Hashable\n The key to find dependencies of\n all_hlg_keys: Iterable\n All keys in the high level graph.\n\n Returns\n -------\n deps: set\n A set of dependencies\n \"\"\"\n return keys_in_tasks(all_hlg_keys, [self[key]])\n\n def map_tasks(self, func: Callable[[Iterable], Iterable]) -> \"Layer\":\n \"\"\"Map `func` on tasks in the layer and returns a new layer.\n\n `func` should take an iterable of the tasks as input and return a new\n iterable as output and **cannot** change the dependencies between Layers.\n\n Warning\n -------\n A layer is allowed to ignore the map on tasks that are part of its internals.\n For instance, Blockwise will only invoke `func` on the input literals.\n\n Parameters\n ----------\n func: callable\n The function to call on tasks\n\n Returns\n -------\n layer: Layer\n A new layer containing the transformed tasks\n \"\"\"\n\n return BasicLayer({k: func(v) for k, v in self.items()})", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_Layer.__dask_distributed_pack___Layer.__dask_distributed_pack__.return.None": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_Layer.__dask_distributed_pack___Layer.__dask_distributed_pack__.return.None", "embedding": null, "metadata": {"file_path": "dask/highlevelgraph.py", "file_name": "highlevelgraph.py", "file_type": "text/x-python", "category": "implementation", "start_line": 163, "end_line": 186, "span_ids": ["Layer.__dask_distributed_pack__"], "tokens": 229}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Layer(collections.abc.Mapping):\n\n def __dask_distributed_pack__(self) -> Optional[Any]:\n \"\"\"Pack the layer for scheduler communication in Distributed\n\n This method should pack its current state and is called by the Client when\n communicating with the Scheduler.\n The Scheduler will then use .__dask_distributed_unpack__(data, ...) to unpack\n the state, materialize the layer, and merge it into the global task graph.\n\n The returned state must be compatible with Distributed's scheduler, which\n means it must obey the following:\n - Serializable by msgpack (notice, msgpack converts lists to tuples)\n - All remote data must be unpacked (see unpack_remotedata())\n - All keys must be converted to strings now or when unpacking\n - All tasks must be serialized (see dumps_task())\n\n Alternatively, the method can return None, which will make Distributed\n materialize the layer and use a default packing method.\n\n Returns\n -------\n state: Object serializable by msgpack\n Scheduler compatible state of the layer\n \"\"\"\n return None", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_Layer.__dask_distributed_unpack___Layer.__dask_distributed_unpack__.raise_NotImplementedError": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_Layer.__dask_distributed_unpack___Layer.__dask_distributed_unpack__.raise_NotImplementedError", "embedding": null, "metadata": {"file_path": "dask/highlevelgraph.py", "file_name": "highlevelgraph.py", "file_type": "text/x-python", "category": "implementation", "start_line": 188, "end_line": 213, "span_ids": ["Layer.__dask_distributed_unpack__"], "tokens": 247}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Layer(collections.abc.Mapping):\n\n @classmethod\n def __dask_distributed_unpack__(\n cls, state: Any, dsk: Dict[str, Any], dependencies: Mapping[Hashable, Set]\n ) -> None:\n \"\"\"Unpack the state of a layer previously packed by __dask_distributed_pack__()\n\n This method is called by the scheduler in Distributed in order to unpack\n the state of a layer and merge it into its global task graph. The method\n should update `dsk` and `dependencies`, which are the already materialized\n state of the preceding layers in the high level graph. The layers of the\n high level graph are unpacked in topological order.\n\n See Layer.__dask_distributed_pack__() for packing detail.\n\n Parameters\n ----------\n state: Any\n The state returned by Layer.__dask_distributed_pack__()\n dsk: dict\n The materialized low level graph of the already unpacked layers\n dependencies: Mapping\n The dependencies of each key in `dsk`\n \"\"\"\n raise NotImplementedError(\n f\"{type(cls)} doesn't implement __dask_distributed_unpack__()\"\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_Layer.__reduce___Layer.__copy__.return.obj": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_Layer.__reduce___Layer.__copy__.return.obj", "embedding": null, "metadata": {"file_path": "dask/highlevelgraph.py", "file_name": "highlevelgraph.py", "file_type": "text/x-python", "category": "implementation", "start_line": 215, "end_line": 231, "span_ids": ["Layer.__copy__", "Layer.__reduce__"], "tokens": 152}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Layer(collections.abc.Mapping):\n\n def __reduce__(self):\n \"\"\"Default serialization implementation, which materializes the Layer\n\n This should follow the standard pickle protocol[1] but must always return\n a tuple and the arguments for the callable object must be compatible with\n msgpack. This is because Distributed uses msgpack to send Layers to the\n scheduler.\n\n [1] \n \"\"\"\n return (BasicLayer, (dict(self),))\n\n def __copy__(self):\n \"\"\"Default shallow copy implementation\"\"\"\n obj = type(self).__new__(self.__class__)\n obj.__dict__.update(self.__dict__)\n return obj", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_BasicLayer_BasicLayer.get_dependencies.return.self_dependencies_key_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_BasicLayer_BasicLayer.get_dependencies.return.self_dependencies_key_", "embedding": null, "metadata": {"file_path": "dask/highlevelgraph.py", "file_name": "highlevelgraph.py", "file_type": "text/x-python", "category": "implementation", "start_line": 234, "end_line": 277, "span_ids": ["BasicLayer.__init__", "BasicLayer.get_dependencies", "BasicLayer.__iter__", "BasicLayer.__getitem__", "BasicLayer.is_materialized", "BasicLayer", "BasicLayer.__len__", "BasicLayer.__contains__"], "tokens": 298}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class BasicLayer(Layer):\n \"\"\"Basic implementation of `Layer`\n\n Parameters\n ----------\n mapping: Mapping\n The mapping between keys and tasks, typically a dask graph.\n dependencies: Mapping[Hashable, Set], optional\n Mapping between keys and their dependencies\n global_dependencies: Set, optional\n Set of dependencies that all keys in the layer depend on. Notice,\n the set might also contain literals that will be ignored.\n \"\"\"\n\n def __init__(self, mapping, dependencies=None, global_dependencies=None):\n self.mapping = mapping\n self.dependencies = dependencies\n self.global_dependencies = global_dependencies\n self.global_dependencies_has_been_trimmed = False\n\n def __contains__(self, k):\n return k in self.mapping\n\n def __getitem__(self, k):\n return self.mapping[k]\n\n def __iter__(self):\n return iter(self.mapping)\n\n def __len__(self):\n return len(self.mapping)\n\n def is_materialized(self):\n return True\n\n def get_dependencies(self, key, all_hlg_keys):\n if self.dependencies is None or self.global_dependencies is None:\n return super().get_dependencies(key, all_hlg_keys)\n\n if not self.global_dependencies_has_been_trimmed:\n self.global_dependencies = self.global_dependencies & all_hlg_keys\n self.global_dependencies_has_been_trimmed = True\n\n return self.dependencies[key] | self.global_dependencies", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph_HighLevelGraph._Task_graph_composed_of": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph_HighLevelGraph._Task_graph_composed_of", "embedding": null, "metadata": {"file_path": "dask/highlevelgraph.py", "file_name": "highlevelgraph.py", "file_type": "text/x-python", "category": "implementation", "start_line": 280, "end_line": 344, "span_ids": ["HighLevelGraph"], "tokens": 732}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class HighLevelGraph(Mapping):\n \"\"\"Task graph composed of layers of dependent subgraphs\n\n This object encodes a Dask task graph that is composed of layers of\n dependent subgraphs, such as commonly occurs when building task graphs\n using high level collections like Dask array, bag, or dataframe.\n\n Typically each high level array, bag, or dataframe operation takes the task\n graphs of the input collections, merges them, and then adds one or more new\n layers of tasks for the new operation. These layers typically have at\n least as many tasks as there are partitions or chunks in the collection.\n The HighLevelGraph object stores the subgraphs for each operation\n separately in sub-graphs, and also stores the dependency structure between\n them.\n\n Parameters\n ----------\n layers : Mapping[str, Mapping]\n The subgraph layers, keyed by a unique name\n dependencies : Mapping[str, Set[str]]\n The set of layers on which each layer depends\n key_dependencies : Mapping[Hashable, Set], optional\n Mapping (some) keys in the high level graph to their dependencies. If\n a key is missing, its dependencies will be calculated on-the-fly.\n\n Examples\n --------\n Here is an idealized example that shows the internal state of a\n HighLevelGraph\n\n >>> import dask.dataframe as dd\n\n >>> df = dd.read_csv('myfile.*.csv') # doctest: +SKIP\n >>> df = df + 100 # doctest: +SKIP\n >>> df = df[df.name == 'Alice'] # doctest: +SKIP\n\n >>> graph = df.__dask_graph__() # doctest: +SKIP\n >>> graph.layers # doctest: +SKIP\n {\n 'read-csv': {('read-csv', 0): (pandas.read_csv, 'myfile.0.csv'),\n ('read-csv', 1): (pandas.read_csv, 'myfile.1.csv'),\n ('read-csv', 2): (pandas.read_csv, 'myfile.2.csv'),\n ('read-csv', 3): (pandas.read_csv, 'myfile.3.csv')},\n 'add': {('add', 0): (operator.add, ('read-csv', 0), 100),\n ('add', 1): (operator.add, ('read-csv', 1), 100),\n ('add', 2): (operator.add, ('read-csv', 2), 100),\n ('add', 3): (operator.add, ('read-csv', 3), 100)}\n 'filter': {('filter', 0): (lambda part: part[part.name == 'Alice'], ('add', 0)),\n ('filter', 1): (lambda part: part[part.name == 'Alice'], ('add', 1)),\n ('filter', 2): (lambda part: part[part.name == 'Alice'], ('add', 2)),\n ('filter', 3): (lambda part: part[part.name == 'Alice'], ('add', 3))}\n }\n\n >>> graph.dependencies # doctest: +SKIP\n {\n 'read-csv': set(),\n 'add': {'read-csv'},\n 'filter': {'add'}\n }\n\n See Also\n --------\n HighLevelGraph.from_collections :\n typically used by developers to make new HighLevelGraphs\n \"\"\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.__init___HighLevelGraph.__init__.self.layers_5._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.__init___HighLevelGraph.__init__.self.layers_5._", "embedding": null, "metadata": {"file_path": "dask/highlevelgraph.py", "file_name": "highlevelgraph.py", "file_type": "text/x-python", "category": "implementation", "start_line": 346, "end_line": 362, "span_ids": ["HighLevelGraph.__init__"], "tokens": 132}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class HighLevelGraph(Mapping):\n\n def __init__(\n self,\n layers: Mapping[str, Layer],\n dependencies: Mapping[str, Set],\n key_dependencies: Optional[Mapping[Hashable, Set]] = None,\n ):\n self._keys = None\n self._all_external_keys = None\n self.layers = layers\n self.dependencies = dependencies\n self.key_dependencies = key_dependencies if key_dependencies else {}\n\n # Makes sure that all layers are `Layer`\n self.layers = {\n k: v if isinstance(v, Layer) else BasicLayer(v)\n for k, v in self.layers.items()\n }", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.__getitem___HighLevelGraph.keyset.return.self__keys": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.__getitem___HighLevelGraph.keyset.return.self__keys", "embedding": null, "metadata": {"file_path": "dask/highlevelgraph.py", "file_name": "highlevelgraph.py", "file_type": "text/x-python", "category": "implementation", "start_line": 447, "end_line": 475, "span_ids": ["HighLevelGraph.__len__", "HighLevelGraph.keyset", "HighLevelGraph.__getitem__", "HighLevelGraph.__iter__"], "tokens": 181}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class HighLevelGraph(Mapping):\n\n def __getitem__(self, key):\n for d in self.layers.values():\n if key in d:\n return d[key]\n raise KeyError(key)\n\n def __len__(self):\n return len(self.keyset())\n\n def __iter__(self):\n return toolz.unique(toolz.concat(self.layers.values()))\n\n def keyset(self) -> Set:\n \"\"\"Get all keys of all the layers\n\n This will in many cases materialize layers, which makes it\n a relative cheap operation. See `get_all_external_keys()`\n for a faster alternative.\n\n Returns\n -------\n keys: Set\n A set of all keys\n \"\"\"\n if self._keys is None:\n self._keys = set()\n for layer in self.layers.values():\n self._keys.update(layer.keys())\n return self._keys", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.get_all_external_keys_HighLevelGraph.get_all_external_keys.return.self__all_external_keys": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.get_all_external_keys_HighLevelGraph.get_all_external_keys.return.self__all_external_keys", "embedding": null, "metadata": {"file_path": "dask/highlevelgraph.py", "file_name": "highlevelgraph.py", "file_type": "text/x-python", "category": "implementation", "start_line": 477, "end_line": 492, "span_ids": ["HighLevelGraph.get_all_external_keys"], "tokens": 118}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class HighLevelGraph(Mapping):\n\n def get_all_external_keys(self) -> Set:\n \"\"\"Get all output keys of all layers\n\n This will in most cases _not_ materialize any layers, which makes\n it a relative cheap operation.\n\n Returns\n -------\n keys: Set\n A set of all external keys\n \"\"\"\n if self._all_external_keys is None:\n self._all_external_keys = set()\n for layer in self.layers.values():\n self._all_external_keys.update(layer.get_output_keys())\n return self._all_external_keys", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.get_all_dependencies_HighLevelGraph.get_all_dependencies.return.self_key_dependencies": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.get_all_dependencies_HighLevelGraph.get_all_dependencies.return.self_key_dependencies", "embedding": null, "metadata": {"file_path": "dask/highlevelgraph.py", "file_name": "highlevelgraph.py", "file_type": "text/x-python", "category": "implementation", "start_line": 494, "end_line": 511, "span_ids": ["HighLevelGraph.get_all_dependencies"], "tokens": 137}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class HighLevelGraph(Mapping):\n\n def get_all_dependencies(self) -> Mapping[Hashable, Set]:\n \"\"\"Get dependencies of all keys\n\n This will in most cases materialize all layers, which makes\n it an expensive operation.\n\n Returns\n -------\n map: Mapping\n A map that maps each key to its dependencies\n \"\"\"\n all_keys = self.keyset()\n missing_keys = all_keys.difference(self.key_dependencies.keys())\n if missing_keys:\n for layer in self.layers.values():\n for k in missing_keys.intersection(layer.keys()):\n self.key_dependencies[k] = layer.get_dependencies(k, all_keys)\n return self.key_dependencies", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.dependents_HighLevelGraph.visualize.return.graphviz_to_file_g_filen": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.dependents_HighLevelGraph.visualize.return.graphviz_to_file_g_filen", "embedding": null, "metadata": {"file_path": "dask/highlevelgraph.py", "file_name": "highlevelgraph.py", "file_type": "text/x-python", "category": "implementation", "start_line": 513, "end_line": 560, "span_ids": ["HighLevelGraph.visualize", "HighLevelGraph.merge", "HighLevelGraph.dependents", "HighLevelGraph.items", "HighLevelGraph.copy", "HighLevelGraph.keys", "HighLevelGraph.dicts", "HighLevelGraph.values"], "tokens": 285}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class HighLevelGraph(Mapping):\n\n @property\n def dependents(self):\n return reverse_dict(self.dependencies)\n\n @property\n def dicts(self):\n # Backwards compatibility for now\n return self.layers\n\n def items(self):\n items = []\n seen = set()\n for d in self.layers.values():\n for key in d:\n if key not in seen:\n seen.add(key)\n items.append((key, d[key]))\n return items\n\n def keys(self):\n return [key for key, _ in self.items()]\n\n def values(self):\n return [value for _, value in self.items()]\n\n def copy(self):\n return HighLevelGraph(self.layers.copy(), self.dependencies.copy())\n\n @classmethod\n def merge(cls, *graphs):\n layers = {}\n dependencies = {}\n for g in graphs:\n if isinstance(g, HighLevelGraph):\n layers.update(g.layers)\n dependencies.update(g.dependencies)\n elif isinstance(g, Mapping):\n layers[id(g)] = g\n dependencies[id(g)] = set()\n else:\n raise TypeError(g)\n return cls(layers, dependencies)\n\n def visualize(self, filename=\"dask.pdf\", format=None, **kwargs):\n from .dot import graphviz_to_file\n\n g = to_graphviz(self, **kwargs)\n return graphviz_to_file(g, filename, format)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph._toposort_layers_HighLevelGraph._toposort_layers.return.ret": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph._toposort_layers_HighLevelGraph._toposort_layers.return.ret", "embedding": null, "metadata": {"file_path": "dask/highlevelgraph.py", "file_name": "highlevelgraph.py", "file_type": "text/x-python", "category": "implementation", "start_line": 562, "end_line": 586, "span_ids": ["HighLevelGraph._toposort_layers"], "tokens": 164}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class HighLevelGraph(Mapping):\n\n def _toposort_layers(self):\n \"\"\"Sort the layers in a high level graph topologically\n\n Parameters\n ----------\n hlg : HighLevelGraph\n The high level graph's layers to sort\n\n Returns\n -------\n sorted: list\n List of layer names sorted topologically\n \"\"\"\n dependencies = copy.deepcopy(self.dependencies)\n ready = {k for k, v in dependencies.items() if len(v) == 0}\n ret = []\n while len(ready) > 0:\n layer = ready.pop()\n ret.append(layer)\n del dependencies[layer]\n for k, v in dependencies.items():\n v.discard(layer)\n if len(v) == 0:\n ready.add(k)\n return ret", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.cull_HighLevelGraph.cull.return.HighLevelGraph_ret_layers": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.cull_HighLevelGraph.cull.return.HighLevelGraph_ret_layers", "embedding": null, "metadata": {"file_path": "dask/highlevelgraph.py", "file_name": "highlevelgraph.py", "file_type": "text/x-python", "category": "implementation", "start_line": 588, "end_line": 628, "span_ids": ["HighLevelGraph.cull"], "tokens": 352}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class HighLevelGraph(Mapping):\n\n def cull(self, keys: Set) -> \"HighLevelGraph\":\n \"\"\"Return new high level graph with only the tasks required to calculate keys.\n\n In other words, remove unnecessary tasks from dask.\n ``keys`` may be a single key or list of keys.\n\n Returns\n -------\n hlg: HighLevelGraph\n Culled high level graph\n \"\"\"\n\n all_ext_keys = self.get_all_external_keys()\n ret_layers = {}\n ret_key_deps = {}\n for layer_name in reversed(self._toposort_layers()):\n layer = self.layers[layer_name]\n # Let's cull the layer to produce its part of `keys`\n output_keys = keys.intersection(layer.get_output_keys())\n if len(output_keys) > 0:\n culled_layer, culled_deps = layer.cull(output_keys, all_ext_keys)\n # Update `keys` with all layer's external key dependencies, which\n # are all the layer's dependencies (`culled_deps`) excluding\n # the layer's output keys.\n external_deps = set()\n for d in culled_deps.values():\n external_deps |= d\n external_deps.difference_update(culled_layer.get_output_keys())\n keys.update(external_deps)\n\n # Save the culled layer and its key dependencies\n ret_layers[layer_name] = culled_layer\n ret_key_deps.update(culled_deps)\n\n ret_dependencies = {}\n for layer_name in ret_layers:\n ret_dependencies[layer_name] = {\n d for d in self.dependencies[layer_name] if d in ret_layers\n }\n\n return HighLevelGraph(ret_layers, ret_dependencies, ret_key_deps)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.map_basic_layers_HighLevelGraph.map_basic_layers.return.HighLevelGraph_layers_se": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.map_basic_layers_HighLevelGraph.map_basic_layers.return.HighLevelGraph_layers_se", "embedding": null, "metadata": {"file_path": "dask/highlevelgraph.py", "file_name": "highlevelgraph.py", "file_type": "text/x-python", "category": "implementation", "start_line": 630, "end_line": 656, "span_ids": ["HighLevelGraph.map_basic_layers"], "tokens": 202}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class HighLevelGraph(Mapping):\n\n def map_basic_layers(\n self, func: Callable[[BasicLayer], Mapping]\n ) -> \"HighLevelGraph\":\n \"\"\"Map `func` on each basic layer and returns a new high level graph.\n\n `func` should take a BasicLayer as input and return a new Mapping as output\n and **cannot** change the dependencies between Layers.\n\n If `func` returns a non-Layer type, it will be wrapped in a `BasicLayer`\n object automatically.\n\n Parameters\n ----------\n func : callable\n The function to call on each BasicLayer\n\n Returns\n -------\n hlg : HighLevelGraph\n A high level graph containing the transformed BasicLayers and the other\n Layers untouched\n \"\"\"\n layers = {\n k: func(v) if isinstance(v, BasicLayer) else v\n for k, v in self.layers.items()\n }\n return HighLevelGraph(layers, self.dependencies)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.map_tasks_HighLevelGraph.map_tasks.return.HighLevelGraph_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.map_tasks_HighLevelGraph.map_tasks.return.HighLevelGraph_", "embedding": null, "metadata": {"file_path": "dask/highlevelgraph.py", "file_name": "highlevelgraph.py", "file_type": "text/x-python", "category": "implementation", "start_line": 658, "end_line": 684, "span_ids": ["HighLevelGraph.map_tasks"], "tokens": 194}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class HighLevelGraph(Mapping):\n\n def map_tasks(self, func: Callable[[Iterable], Iterable]) -> \"HighLevelGraph\":\n \"\"\"Map `func` on all tasks and returns a new high level graph.\n\n `func` should take an iterable of the tasks as input and return a new\n iterable as output and **cannot** change the dependencies between Layers.\n\n Warning\n -------\n A layer is allowed to ignore the map on tasks that are part of its internals.\n For instance, Blockwise will only invoke `func` on the input literals.\n\n Parameters\n ----------\n func : callable\n The function to call on tasks\n\n Returns\n -------\n hlg : HighLevelGraph\n A high level graph containing the transformed tasks\n \"\"\"\n\n return HighLevelGraph(\n {k: v.map_tasks(func) for k, v in self.layers.items()},\n self.dependencies,\n self.key_dependencies,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_from_distutils_version_im_f3.pass": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_from_distutils_version_im_f3.pass", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 69, "span_ids": ["impl", "imports", "import_or_none", "f3", "f1", "f2"], "tokens": 364}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from distutils.version import LooseVersion\nimport os\nimport pytest\nfrom operator import add, mul\nimport subprocess\nimport sys\nimport time\nfrom collections import OrderedDict\n\nfrom tlz import merge\n\nimport dask\nfrom dask import delayed\nfrom dask.base import (\n compute,\n tokenize,\n normalize_token,\n normalize_function,\n visualize,\n persist,\n function_cache,\n is_dask_collection,\n DaskMethodsMixin,\n optimize,\n unpack_collections,\n named_schedulers,\n get_scheduler,\n)\nfrom dask.core import literal\nfrom dask.delayed import Delayed\nfrom dask.utils import tmpdir, tmpfile, ignoring\nfrom dask.utils_test import inc, dec\nfrom dask.diagnostics import Profiler\n\n\ndef import_or_none(path):\n with ignoring(BaseException):\n return pytest.importorskip(path)\n return None\n\n\ntz = pytest.importorskip(\"tlz\")\nda = import_or_none(\"dask.array\")\ndb = import_or_none(\"dask.bag\")\ndd = import_or_none(\"dask.dataframe\")\nnp = import_or_none(\"numpy\")\nsp = import_or_none(\"scipy.sparse\")\npd = import_or_none(\"pandas\")\n\nif pd:\n PANDAS_VERSION = LooseVersion(pd.__version__)\n PANDAS_GT_100 = PANDAS_VERSION >= LooseVersion(\"1.0.0\")\n\n if PANDAS_GT_100:\n import pandas.testing as tm # noqa: F401\n else:\n import pandas.util.testing as tm # noqa: F401\n\n\ndef f1(a, b, c=1):\n pass\n\n\ndef f2(a, b=1, c=2):\n pass\n\n\ndef f3(a):\n pass", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_highgraph.py_from_functools_import_par_test_basic.assert_all_isinstance_lay": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_highgraph.py_from_functools_import_par_test_basic.assert_all_isinstance_lay", "embedding": null, "metadata": {"file_path": "dask/tests/test_highgraph.py", "file_name": "test_highgraph.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 32, "span_ids": ["imports", "test_visualize", "test_basic"], "tokens": 232}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from functools import partial\nimport os\n\nimport pytest\n\nimport dask.array as da\nfrom dask.utils_test import inc\nfrom dask.highlevelgraph import HighLevelGraph, BasicLayer, Layer\nfrom dask.blockwise import Blockwise\nfrom dask.array.utils import assert_eq\n\n\ndef test_visualize(tmpdir):\n pytest.importorskip(\"graphviz\")\n fn = str(tmpdir)\n a = da.ones(10, chunks=(5,))\n b = a + 1\n c = a + 2\n d = b + c\n d.dask.visualize(fn)\n assert os.path.exists(fn)\n\n\ndef test_basic():\n a = {\"x\": 1}\n b = {\"y\": (inc, \"x\")}\n layers = {\"a\": a, \"b\": b}\n dependencies = {\"a\": set(), \"b\": {\"a\"}}\n hg = HighLevelGraph(layers, dependencies)\n\n assert dict(hg) == {\"x\": 1, \"y\": (inc, \"x\")}\n assert all(isinstance(layer, Layer) for layer in hg.layers.values())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_highgraph.py_test_keys_values_items_methods_test_keys_values_items_methods.assert_items_k_v_f": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_highgraph.py_test_keys_values_items_methods_test_keys_values_items_methods.assert_items_k_v_f", "embedding": null, "metadata": {"file_path": "dask/tests/test_highgraph.py", "file_name": "test_highgraph.py", "file_type": "text/x-python", "category": "test", "start_line": 35, "end_line": 46, "span_ids": ["test_keys_values_items_methods"], "tokens": 126}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_keys_values_items_methods():\n a = da.ones(10, chunks=(5,))\n b = a + 1\n c = a + 2\n d = b + c\n hg = d.dask\n\n keys, values, items = hg.keys(), hg.values(), hg.items()\n assert all(isinstance(i, list) for i in [keys, values, items])\n assert keys == [i for i in hg]\n assert values == [hg[i] for i in hg]\n assert items == [(k, v) for k, v in zip(keys, values)]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_highgraph.py_test_cull_test_cull.assert_dict_culled_by_y_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_highgraph.py_test_cull_test_cull.assert_dict_culled_by_y_", "embedding": null, "metadata": {"file_path": "dask/tests/test_highgraph.py", "file_name": "test_highgraph.py", "file_type": "text/x-python", "category": "test", "start_line": 49, "end_line": 63, "span_ids": ["test_cull"], "tokens": 125}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_cull():\n a = {\"x\": 1, \"y\": (inc, \"x\")}\n layers = {\n \"a\": BasicLayer(\n a, dependencies={\"x\": set(), \"y\": {\"x\"}}, global_dependencies=set()\n )\n }\n dependencies = {\"a\": set()}\n hg = HighLevelGraph(layers, dependencies)\n\n culled_by_x = hg.cull({\"x\"})\n assert dict(culled_by_x) == {\"x\": 1}\n\n culled_by_y = hg.cull({\"y\"})\n assert dict(culled_by_y) == a", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_highgraph.py_test_map_basic_layers_test_map_basic_layers.assert_eq_y_42_3_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_highgraph.py_test_map_basic_layers_test_map_basic_layers.assert_eq_y_42_3_", "embedding": null, "metadata": {"file_path": "dask/tests/test_highgraph.py", "file_name": "test_highgraph.py", "file_type": "text/x-python", "category": "test", "start_line": 66, "end_line": 87, "span_ids": ["test_map_basic_layers"], "tokens": 201}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"inject_dict\", [True, False])\ndef test_map_basic_layers(inject_dict):\n \"\"\"Check map_basic_layers() by injecting an inc() call\"\"\"\n\n y = da.ones(3, chunks=(3,), dtype=\"int\") + 40\n\n def inject_inc(dsk):\n assert isinstance(dsk, BasicLayer)\n dsk = dict(dsk)\n k = next(iter(dsk))\n dsk[k] = (inc, dsk[k])\n if inject_dict:\n return dsk # map_basic_layers() should automatically convert it to a `BasicLayer`\n else:\n return BasicLayer(dsk)\n\n dsk = y.__dask_graph__()\n y.dask = dsk.map_basic_layers(inject_inc)\n layers = list(y.dask.layers.values())\n assert isinstance(layers[0], BasicLayer)\n assert isinstance(layers[1], Blockwise)\n assert_eq(y, [42] * 3)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_highgraph.py_test_map_tasks_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_highgraph.py_test_map_tasks_", "embedding": null, "metadata": {"file_path": "dask/tests/test_highgraph.py", "file_name": "test_highgraph.py", "file_type": "text/x-python", "category": "test", "start_line": 90, "end_line": 113, "span_ids": ["test_map_tasks"], "tokens": 203}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"use_layer_map_task\", [True, False])\ndef test_map_tasks(use_layer_map_task):\n \"\"\"Check map_tasks() by injecting an +1 to the `40` literal\"\"\"\n y = da.ones(3, chunks=(3,), dtype=\"int\") + 40\n\n def plus_one(tasks):\n ret = []\n for t in tasks:\n if t == 40:\n t += 1\n ret.append(t)\n return tuple(ret)\n\n dsk = y.__dask_graph__()\n\n if use_layer_map_task:\n # In order to test the default map_tasks() implementation on a Blockwise Layer,\n # we overwrite Blockwise.map_tasks with Layer.map_tasks\n blockwise_layer = list(dsk.layers.values())[1]\n blockwise_layer.map_tasks = partial(Layer.map_tasks, blockwise_layer)\n\n y.dask = dsk.map_tasks(plus_one)\n assert_eq(y, [42] * 3)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_SubgraphCallable_with_numpy_test_SubgraphCallable_with_numpy.assert_f1_f4": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_SubgraphCallable_with_numpy_test_SubgraphCallable_with_numpy.assert_f1_f4", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 1140, "end_line": 1156, "span_ids": ["test_SubgraphCallable_with_numpy"], "tokens": 219}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_SubgraphCallable_with_numpy():\n np = pytest.importorskip(\"numpy\")\n\n # Testing support of numpy arrays in `dsk`, which uses elementwise equalities.\n dsk1 = {\"a\": np.arange(10)}\n f1 = SubgraphCallable(dsk1, \"a\", [None], name=\"test\")\n f2 = SubgraphCallable(dsk1, \"a\", [None], name=\"test\")\n assert f1 == f2\n\n # Notice, even though `dsk1` and `dsk2` are not equal they compare equal because\n # SubgraphCallable.__eq__() only checks name, outkeys, and inkeys.\n dsk2 = {\"a\": np.arange(10) + 1}\n f3 = SubgraphCallable(dsk2, \"a\", [None], name=\"test\")\n assert f1 == f3\n\n f4 = SubgraphCallable(dsk1, \"a\", [None], name=\"test2\")\n assert f1 != f4", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/docs/source/scripts/scheduling.py_dense_dense.return.d_x_height_1_i_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/docs/source/scripts/scheduling.py_dense_dense.return.d_x_height_1_i_", "embedding": null, "metadata": {"file_path": "docs/source/scripts/scheduling.py", "file_name": "scheduling.py", "file_type": "text/x-python", "category": "implementation", "start_line": 39, "end_line": 49, "span_ids": ["dense"], "tokens": 107}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def dense(width, height):\n \"\"\" Full barriers between each step \"\"\"\n d = {(\"x\", 0, i): i for i in range(width)}\n for j in range(1, height):\n d.update(\n {\n (\"x\", j, i): (noop, [(\"x\", j - 1, k) for k in range(width)])\n for i in range(width)\n }\n )\n return d, [(\"x\", height - 1, i) for i in range(width)]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/docs/source/scripts/scheduling.py_np_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/docs/source/scripts/scheduling.py_np_", "embedding": null, "metadata": {"file_path": "docs/source/scripts/scheduling.py", "file_name": "scheduling.py", "file_type": "text/x-python", "category": "implementation", "start_line": 52, "end_line": 125, "span_ids": ["impl:3"], "tokens": 522}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import numpy as np\n\nx = np.logspace(0, 4, 10)\ntrivial_results = dict()\nfor get in [dask.get, threaded.get, local.get_sync, multiprocessing.get]:\n y = list()\n for n in x:\n dsk, keys = trivial(int(n), 5)\n start = time()\n get(dsk, keys)\n end = time()\n y.append(end - start)\n trivial_results[get] = np.array(y)\n\n\n########\n# Plot #\n########\n\nf, (left, right) = plt.subplots(\n nrows=1, ncols=2, sharex=True, figsize=(12, 5), squeeze=True\n)\n\nfor get in trivial_results:\n left.loglog(x * 5, trivial_results[get], label=get.__module__)\n right.loglog(x * 5, trivial_results[get] / x, label=get.__module__)\n\nleft.set_title(\"Cost for Entire graph\")\nright.set_title(\"Cost per task\")\nleft.set_ylabel(\"Duration (s)\")\nright.set_ylabel(\"Duration (s)\")\nleft.set_xlabel(\"Number of tasks\")\nright.set_xlabel(\"Number of tasks\")\n\nplt.legend()\nplt.savefig(\"images/scaling-nodes.png\")\n\n#####################\n# Crosstalk example #\n#####################\n\nx = np.linspace(1, 100, 10)\ncrosstalk_results = dict()\nfor get in [threaded.get, local.get_sync]:\n y = list()\n for n in x:\n dsk, keys = crosstalk(1000, 5, int(n))\n start = time()\n get(dsk, keys)\n end = time()\n y.append(end - start)\n crosstalk_results[get] = np.array(y)\n\n########\n# Plot #\n########\n\nf, (left, right) = plt.subplots(\n nrows=1, ncols=2, sharex=True, figsize=(12, 5), squeeze=True\n)\n\nfor get in crosstalk_results:\n left.plot(x, crosstalk_results[get], label=get.__module__)\n right.semilogy(x, crosstalk_results[get] / 5000.0 / x, label=get.__module__)\n\nleft.set_title(\"Cost for Entire graph\")\nright.set_title(\"Cost per edge\")\nleft.set_ylabel(\"Duration (s)\")\nright.set_ylabel(\"Duration (s)\")\nleft.set_xlabel(\"Number of edges per task\")\nright.set_xlabel(\"Number of edges per task\")\nplt.legend()\nplt.savefig(\"images/scaling-edges.png\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}}} \ No newline at end of file